xref: /linux/net/ipv4/tcp_output.c (revision e086101b150ae8e99e54ab26101ef3835fa9f48d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
45*e086101bSCong Wang #include <trace/events/tcp.h>
46*e086101bSCong Wang 
471da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
48ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
491da177e4SLinus Torvalds 
5015d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
5115d99e02SRick Jones  * interpret the window field as a signed quantity.
5215d99e02SRick Jones  */
53ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5415d99e02SRick Jones 
55c39c4c6aSWei Liu /* Default TSQ limit of four TSO segments */
56c39c4c6aSWei Liu int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
5746d3ceabSEric Dumazet 
581da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
591da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
601da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
611da177e4SLinus Torvalds  */
62ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
631da177e4SLinus Torvalds 
6435089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
65ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6635089bb2SDavid S. Miller 
6746d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6846d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
69519855c5SWilliam Allen Simpson 
7067edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
7175c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
726ff03ac3SIlpo Järvinen {
736ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
746ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7566f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
769e412ba7SIlpo Järvinen 
771da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
788512430eSIlpo Järvinen 
7975c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
8075c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
8175c119afSEric Dumazet 
8266f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
83bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
84750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
85f19c29e3SYuchung Cheng 
86f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
87f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
886a5dc9e5SEric Dumazet }
891da177e4SLinus Torvalds 
90a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
91a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
921da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
931da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
941da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
951da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
961da177e4SLinus Torvalds  */
97cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
981da177e4SLinus Torvalds {
99cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1009e412ba7SIlpo Järvinen 
101a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
102a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
103a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1041da177e4SLinus Torvalds 		return tp->snd_nxt;
1051da177e4SLinus Torvalds 	else
10690840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1071da177e4SLinus Torvalds }
1081da177e4SLinus Torvalds 
1091da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1101da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1111da177e4SLinus Torvalds  *
1121da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1131da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1141da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1151da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1161da177e4SLinus Torvalds  *    large MSS.
1171da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1181da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1191da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1201da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1211da177e4SLinus Torvalds  *    probably even Jumbo".
1221da177e4SLinus Torvalds  */
1231da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1241da177e4SLinus Torvalds {
1251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
126cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1271da177e4SLinus Torvalds 	int mss = tp->advmss;
1281da177e4SLinus Torvalds 
1290dbaee3bSDavid S. Miller 	if (dst) {
1300dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1310dbaee3bSDavid S. Miller 
1320dbaee3bSDavid S. Miller 		if (metric < mss) {
1330dbaee3bSDavid S. Miller 			mss = metric;
1341da177e4SLinus Torvalds 			tp->advmss = mss;
1351da177e4SLinus Torvalds 		}
1360dbaee3bSDavid S. Miller 	}
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds 	return (__u16)mss;
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
1411da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1426f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1436f021c62SEric Dumazet  */
1446f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1451da177e4SLinus Torvalds {
146463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1476f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1481da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1491da177e4SLinus Torvalds 
1506687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1511da177e4SLinus Torvalds 
1526687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1531da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1541da177e4SLinus Torvalds 
155463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1561da177e4SLinus Torvalds 		cwnd >>= 1;
1571da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
158c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1591da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1601da177e4SLinus Torvalds }
1611da177e4SLinus Torvalds 
16267edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16340efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
164cf533ea5SEric Dumazet 				struct sock *sk)
1651da177e4SLinus Torvalds {
166463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
167d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1681da177e4SLinus Torvalds 
16905c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
17005c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
17105c5a46dSNeal Cardwell 
1721da177e4SLinus Torvalds 	tp->lsndtime = now;
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1751da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1761da177e4SLinus Torvalds 	 */
1772251ae46SJon Maxwell 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
178463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1791da177e4SLinus Torvalds }
1801da177e4SLinus Torvalds 
18167edfef7SAndi Kleen /* Account for an ACK we sent. */
18240efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1831da177e4SLinus Torvalds {
184463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
185463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1861da177e4SLinus Torvalds }
1871da177e4SLinus Torvalds 
18885f16525SYuchung Cheng 
18985f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
19085f16525SYuchung Cheng {
19185f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1929ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
19385f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
19485f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19585f16525SYuchung Cheng 	 */
19685f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
19785f16525SYuchung Cheng 
19885f16525SYuchung Cheng 	if (mss > 1460)
19985f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
20085f16525SYuchung Cheng 	return init_rwnd;
20185f16525SYuchung Cheng }
20285f16525SYuchung Cheng 
2031da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2041da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2051da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2061da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2071da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2081da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2091da177e4SLinus Torvalds  */
2101da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2111da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
21231d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21331d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2141da177e4SLinus Torvalds {
2151da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2181da177e4SLinus Torvalds 	if (*window_clamp == 0)
219589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2201da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2231da177e4SLinus Torvalds 	if (space > mss)
224589c49cbSGao Feng 		space = rounddown(space, mss);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22715d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22815d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22915d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
23015d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
23115d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23215d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2331da177e4SLinus Torvalds 	 */
23415d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2351da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23615d99e02SRick Jones 	else
23715d99e02SRick Jones 		(*rcv_wnd) = space;
23815d99e02SRick Jones 
2391da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2401da177e4SLinus Torvalds 	if (wscale_ok) {
241589c49cbSGao Feng 		/* Set window scaling on max possible window */
242f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_tcp_rmem[2]);
243f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
244316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
245589c49cbSGao Feng 		while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) {
2461da177e4SLinus Torvalds 			space >>= 1;
2471da177e4SLinus Torvalds 			(*rcv_wscale)++;
2481da177e4SLinus Torvalds 		}
2491da177e4SLinus Torvalds 	}
2501da177e4SLinus Torvalds 
2511da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
25285f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
25385f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
254b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2551da177e4SLinus Torvalds 	}
2561da177e4SLinus Torvalds 
2571da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
258589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2591da177e4SLinus Torvalds }
2604bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2631da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2641da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2651da177e4SLinus Torvalds  * frame.
2661da177e4SLinus Torvalds  */
26740efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2681da177e4SLinus Torvalds {
2691da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2708e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2711da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2721da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2731da177e4SLinus Torvalds 
2741da177e4SLinus Torvalds 	/* Never shrink the offered window */
2751da177e4SLinus Torvalds 	if (new_win < cur_win) {
2761da177e4SLinus Torvalds 		/* Danger Will Robinson!
2771da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2781da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2791da177e4SLinus Torvalds 		 * window in time.  --DaveM
2801da177e4SLinus Torvalds 		 *
2811da177e4SLinus Torvalds 		 * Relax Will Robinson.
2821da177e4SLinus Torvalds 		 */
2838e165e20SFlorian Westphal 		if (new_win == 0)
2848e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2858e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
286607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2871da177e4SLinus Torvalds 	}
2881da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2891da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2921da177e4SLinus Torvalds 	 * scaled window.
2931da177e4SLinus Torvalds 	 */
29415d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2951da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2961da177e4SLinus Torvalds 	else
2971da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2981da177e4SLinus Torvalds 
2991da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3001da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3011da177e4SLinus Torvalds 
30231770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
3038e165e20SFlorian Westphal 	if (new_win == 0) {
30431770e34SFlorian Westphal 		tp->pred_flags = 0;
3058e165e20SFlorian Westphal 		if (old_win)
3068e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
3078e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3088e165e20SFlorian Westphal 	} else if (old_win == 0) {
3098e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3108e165e20SFlorian Westphal 	}
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds 	return new_win;
3131da177e4SLinus Torvalds }
3141da177e4SLinus Torvalds 
31567edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
316735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
317bdf1ee5dSIlpo Järvinen {
31830e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31930e502a3SDaniel Borkmann 
3204de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
321bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3224de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
32391b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
32491b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
32530e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
326bdf1ee5dSIlpo Järvinen }
327bdf1ee5dSIlpo Järvinen 
32867edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
329735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
330bdf1ee5dSIlpo Järvinen {
331bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
33291b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
333f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
33491b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
335f7b3bec6SFlorian Westphal 
336f7b3bec6SFlorian Westphal 	if (!use_ecn) {
337f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
338f7b3bec6SFlorian Westphal 
339f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
340f7b3bec6SFlorian Westphal 			use_ecn = true;
341f7b3bec6SFlorian Westphal 	}
342bdf1ee5dSIlpo Järvinen 
343bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
344f7b3bec6SFlorian Westphal 
345f7b3bec6SFlorian Westphal 	if (use_ecn) {
3464de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
347bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34891b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
34930e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
350bdf1ee5dSIlpo Järvinen 	}
351bdf1ee5dSIlpo Järvinen }
352bdf1ee5dSIlpo Järvinen 
35349213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
35449213555SDaniel Borkmann {
35549213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
35649213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
35749213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
35849213555SDaniel Borkmann 		 */
35949213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
36049213555SDaniel Borkmann }
36149213555SDaniel Borkmann 
362735d3831SFlorian Westphal static void
3636ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
364bdf1ee5dSIlpo Järvinen {
3656ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
366bdf1ee5dSIlpo Järvinen 		th->ece = 1;
367bdf1ee5dSIlpo Järvinen }
368bdf1ee5dSIlpo Järvinen 
36967edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
37067edfef7SAndi Kleen  * be sent.
37167edfef7SAndi Kleen  */
372735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
373ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
374bdf1ee5dSIlpo Järvinen {
375bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
376bdf1ee5dSIlpo Järvinen 
377bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
378bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
379bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
380bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
381bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
382bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
383bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
384ea1627c2SEric Dumazet 				th->cwr = 1;
385bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
386bdf1ee5dSIlpo Järvinen 			}
38730e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
388bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
389bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
390bdf1ee5dSIlpo Järvinen 		}
391bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
392ea1627c2SEric Dumazet 			th->ece = 1;
393bdf1ee5dSIlpo Järvinen 	}
394bdf1ee5dSIlpo Järvinen }
395bdf1ee5dSIlpo Järvinen 
396e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
397e870a8efSIlpo Järvinen  * auto increment end seqno.
398e870a8efSIlpo Järvinen  */
399e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
400e870a8efSIlpo Järvinen {
4012e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
402e870a8efSIlpo Järvinen 	skb->csum = 0;
403e870a8efSIlpo Järvinen 
4044de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
405e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
406e870a8efSIlpo Järvinen 
407cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
408e870a8efSIlpo Järvinen 
409e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
410a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
411e870a8efSIlpo Järvinen 		seq++;
412e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
413e870a8efSIlpo Järvinen }
414e870a8efSIlpo Järvinen 
415a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
41633f5f57eSIlpo Järvinen {
41733f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
41833f5f57eSIlpo Järvinen }
41933f5f57eSIlpo Järvinen 
42033ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
42133ad798cSAdam Langley #define OPTION_TS		(1 << 1)
42233ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
42389e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4242100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
42533ad798cSAdam Langley 
42633ad798cSAdam Langley struct tcp_out_options {
4272100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4282100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
42933ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
43033ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
431bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
432bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4332100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4342100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
43533ad798cSAdam Langley };
43633ad798cSAdam Langley 
43767edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
43867edfef7SAndi Kleen  *
43967edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
440fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
441fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4428e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
443fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
444fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
445fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
446fd6149d3SIlpo Järvinen  *
447fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
448fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
449fd6149d3SIlpo Järvinen  */
45033ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
451bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
452bd0388aeSWilliam Allen Simpson {
4532100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
454bd0388aeSWilliam Allen Simpson 
455bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4561a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4571a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
458bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
459bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
46033ad798cSAdam Langley 		ptr += 4;
46133ad798cSAdam Langley 	}
46233ad798cSAdam Langley 
463fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
464fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
465fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
466fd6149d3SIlpo Järvinen 			       opts->mss);
467fd6149d3SIlpo Järvinen 	}
468fd6149d3SIlpo Järvinen 
469bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
470bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
47133ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47233ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
47333ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
47433ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
475bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
47633ad798cSAdam Langley 		} else {
477496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
47840efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
47940efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
48040efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
48140efc6faSStephen Hemminger 		}
48233ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
48333ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
48433ad798cSAdam Langley 	}
48533ad798cSAdam Langley 
486bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
48733ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
48833ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
48933ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
49033ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
49133ad798cSAdam Langley 	}
49233ad798cSAdam Langley 
493bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
49433ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49533ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
49633ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
49733ad798cSAdam Langley 			       opts->ws);
49833ad798cSAdam Langley 	}
49933ad798cSAdam Langley 
50033ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
50133ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50233ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
50340efc6faSStephen Hemminger 		int this_sack;
50440efc6faSStephen Hemminger 
50540efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
50640efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
50740efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
50833ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
50940efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5102de979bdSStephen Hemminger 
51133ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51233ad798cSAdam Langley 		     ++this_sack) {
51340efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
51440efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
51540efc6faSStephen Hemminger 		}
5162de979bdSStephen Hemminger 
51740efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
51840efc6faSStephen Hemminger 	}
5192100c8d2SYuchung Cheng 
5202100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5212100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5227f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5237f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5242100c8d2SYuchung Cheng 
5257f9b838bSDaniel Lee 		if (foc->exp) {
5267f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5277f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5282100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5297f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5307f9b838bSDaniel Lee 		} else {
5317f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5327f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5337f9b838bSDaniel Lee 			*p++ = len;
5342100c8d2SYuchung Cheng 		}
5357f9b838bSDaniel Lee 
5367f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5377f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5387f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5397f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5407f9b838bSDaniel Lee 		}
5417f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5422100c8d2SYuchung Cheng 	}
54340efc6faSStephen Hemminger }
54440efc6faSStephen Hemminger 
54567edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
54667edfef7SAndi Kleen  * network wire format yet.
54767edfef7SAndi Kleen  */
54895c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
54933ad798cSAdam Langley 				struct tcp_out_options *opts,
550cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
551cf533ea5SEric Dumazet {
55233ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
55395c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
554783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
55533ad798cSAdam Langley 
556cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
55733ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
55833ad798cSAdam Langley 	if (*md5) {
55933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
560bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
561cfb6eeb4SYOSHIFUJI Hideaki 	}
56233ad798cSAdam Langley #else
56333ad798cSAdam Langley 	*md5 = NULL;
564cfb6eeb4SYOSHIFUJI Hideaki #endif
56533ad798cSAdam Langley 
56633ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
56733ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
56833ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
56933ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
57033ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
57133ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
57233ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
57333ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
57433ad798cSAdam Langley 	 * going out.  */
57533ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
576bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
57733ad798cSAdam Langley 
5785d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
57933ad798cSAdam Langley 		opts->options |= OPTION_TS;
5807faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
58133ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
582bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
58333ad798cSAdam Langley 	}
5849bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
58533ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
58689e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
587bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
58833ad798cSAdam Langley 	}
589f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
59033ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
591b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
592bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
59333ad798cSAdam Langley 	}
59433ad798cSAdam Langley 
595783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
5962646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
5972646c831SDaniel Lee 
5982646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
5992646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
600783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
601783237e8SYuchung Cheng 		if (remaining >= need) {
602783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
603783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
604783237e8SYuchung Cheng 			remaining -= need;
605783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6062646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
607783237e8SYuchung Cheng 		}
608783237e8SYuchung Cheng 	}
609bd0388aeSWilliam Allen Simpson 
610bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
61133ad798cSAdam Langley }
61233ad798cSAdam Langley 
61367edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
61437bfbddaSEric Dumazet static unsigned int tcp_synack_options(struct request_sock *req,
61595c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
61633ad798cSAdam Langley 				       struct tcp_out_options *opts,
61780f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6188336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6194957faadSWilliam Allen Simpson {
62033ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
62195c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
62233ad798cSAdam Langley 
62333ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
62480f03e27SEric Dumazet 	if (md5) {
62533ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6264957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6274957faadSWilliam Allen Simpson 
6284957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6294957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6304957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6314957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6324957faadSWilliam Allen Simpson 		 */
633de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
63433ad798cSAdam Langley 	}
63533ad798cSAdam Langley #endif
63633ad798cSAdam Langley 
6374957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
63833ad798cSAdam Langley 	opts->mss = mss;
6394957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
64033ad798cSAdam Langley 
64133ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
64233ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
64389e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6444957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
64533ad798cSAdam Langley 	}
646de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
64733ad798cSAdam Langley 		opts->options |= OPTION_TS;
64895a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
64933ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6504957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
65133ad798cSAdam Langley 	}
65233ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
65333ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
654de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6554957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
65633ad798cSAdam Langley 	}
6577f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
6587f9b838bSDaniel Lee 		u32 need = foc->len;
6597f9b838bSDaniel Lee 
6607f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6617f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
6628336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6638336886fSJerry Chu 		if (remaining >= need) {
6648336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6658336886fSJerry Chu 			opts->fastopen_cookie = foc;
6668336886fSJerry Chu 			remaining -= need;
6678336886fSJerry Chu 		}
6688336886fSJerry Chu 	}
6694957faadSWilliam Allen Simpson 
6704957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
67133ad798cSAdam Langley }
67233ad798cSAdam Langley 
67367edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
67467edfef7SAndi Kleen  * final wire format yet.
67567edfef7SAndi Kleen  */
67695c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
67733ad798cSAdam Langley 					struct tcp_out_options *opts,
678cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
679cf533ea5SEric Dumazet {
68033ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
68195c96174SEric Dumazet 	unsigned int size = 0;
682cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
68333ad798cSAdam Langley 
6845843ef42SAndi Kleen 	opts->options = 0;
6855843ef42SAndi Kleen 
68633ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
68733ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
68833ad798cSAdam Langley 	if (unlikely(*md5)) {
68933ad798cSAdam Langley 		opts->options |= OPTION_MD5;
69033ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
69133ad798cSAdam Langley 	}
69233ad798cSAdam Langley #else
69333ad798cSAdam Langley 	*md5 = NULL;
69433ad798cSAdam Langley #endif
69533ad798cSAdam Langley 
69633ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
69733ad798cSAdam Langley 		opts->options |= OPTION_TS;
6987faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
69933ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
70033ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
70133ad798cSAdam Langley 	}
70233ad798cSAdam Langley 
703cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
704cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
70595c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
70633ad798cSAdam Langley 		opts->num_sack_blocks =
70795c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
70833ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
70933ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
71033ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
71133ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
71233ad798cSAdam Langley 	}
71333ad798cSAdam Langley 
71433ad798cSAdam Langley 	return size;
71540efc6faSStephen Hemminger }
7161da177e4SLinus Torvalds 
71746d3ceabSEric Dumazet 
71846d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
71946d3ceabSEric Dumazet  *
72046d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
72146d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
72246d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
72346d3ceabSEric Dumazet  *
72446d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
72546d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7268e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
72746d3ceabSEric Dumazet  *
72846d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
72946d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
73046d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
73146d3ceabSEric Dumazet  */
73246d3ceabSEric Dumazet struct tsq_tasklet {
73346d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
73446d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
73546d3ceabSEric Dumazet };
73646d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
73746d3ceabSEric Dumazet 
7386f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
7396f458dfbSEric Dumazet {
7406f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7416f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
742f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
743f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
744f9616c35SEric Dumazet 
745f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
746f9616c35SEric Dumazet 		    tp->snd_cwnd > tcp_packets_in_flight(tp))
747f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
748f9616c35SEric Dumazet 
749f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
750bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
7516f458dfbSEric Dumazet 	}
752f9616c35SEric Dumazet }
75346d3ceabSEric Dumazet /*
7548e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
75546d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
7568e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
75746d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
75846d3ceabSEric Dumazet  */
75946d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
76046d3ceabSEric Dumazet {
76146d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
76246d3ceabSEric Dumazet 	LIST_HEAD(list);
76346d3ceabSEric Dumazet 	unsigned long flags;
76446d3ceabSEric Dumazet 	struct list_head *q, *n;
76546d3ceabSEric Dumazet 	struct tcp_sock *tp;
76646d3ceabSEric Dumazet 	struct sock *sk;
76746d3ceabSEric Dumazet 
76846d3ceabSEric Dumazet 	local_irq_save(flags);
76946d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
77046d3ceabSEric Dumazet 	local_irq_restore(flags);
77146d3ceabSEric Dumazet 
77246d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
77346d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
77446d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
77546d3ceabSEric Dumazet 
77646d3ceabSEric Dumazet 		sk = (struct sock *)tp;
7770a9648f1SEric Dumazet 		smp_mb__before_atomic();
7787aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
7797aa5470cSEric Dumazet 
780b223feb9SEric Dumazet 		if (!sk->sk_lock.owned &&
7817aa5470cSEric Dumazet 		    test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
78246d3ceabSEric Dumazet 			bh_lock_sock(sk);
78346d3ceabSEric Dumazet 			if (!sock_owned_by_user(sk)) {
7847aa5470cSEric Dumazet 				clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
7856f458dfbSEric Dumazet 				tcp_tsq_handler(sk);
78646d3ceabSEric Dumazet 			}
78746d3ceabSEric Dumazet 			bh_unlock_sock(sk);
788b223feb9SEric Dumazet 		}
78946d3ceabSEric Dumazet 
79046d3ceabSEric Dumazet 		sk_free(sk);
79146d3ceabSEric Dumazet 	}
79246d3ceabSEric Dumazet }
79346d3ceabSEric Dumazet 
79440fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
79540fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
79640fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
79740fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
79846d3ceabSEric Dumazet /**
79946d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
80046d3ceabSEric Dumazet  * @sk: socket
80146d3ceabSEric Dumazet  *
80246d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
80346d3ceabSEric Dumazet  * actions before socket release.
80446d3ceabSEric Dumazet  */
80546d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
80646d3ceabSEric Dumazet {
8076f458dfbSEric Dumazet 	unsigned long flags, nflags;
80846d3ceabSEric Dumazet 
8096f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8106f458dfbSEric Dumazet 	do {
8117aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8126f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8136f458dfbSEric Dumazet 			return;
8146f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8157aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8166f458dfbSEric Dumazet 
81740fc3423SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED)
8186f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
8196f458dfbSEric Dumazet 
820c3f9b018SEric Dumazet 	/* Here begins the tricky part :
821c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
822c3f9b018SEric Dumazet 	 * 1) BH disabled
823c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
824c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
825c3f9b018SEric Dumazet 	 *
826c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
827c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
828c3f9b018SEric Dumazet 	 */
829c3f9b018SEric Dumazet 	sock_release_ownership(sk);
830c3f9b018SEric Dumazet 
83140fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8326f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
833144d56e9SEric Dumazet 		__sock_put(sk);
834144d56e9SEric Dumazet 	}
83540fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8366f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
837144d56e9SEric Dumazet 		__sock_put(sk);
838144d56e9SEric Dumazet 	}
83940fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8404fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
841144d56e9SEric Dumazet 		__sock_put(sk);
842144d56e9SEric Dumazet 	}
84346d3ceabSEric Dumazet }
84446d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
84546d3ceabSEric Dumazet 
84646d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
84746d3ceabSEric Dumazet {
84846d3ceabSEric Dumazet 	int i;
84946d3ceabSEric Dumazet 
85046d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
85146d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
85246d3ceabSEric Dumazet 
85346d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
85446d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
85546d3ceabSEric Dumazet 			     tcp_tasklet_func,
85646d3ceabSEric Dumazet 			     (unsigned long)tsq);
85746d3ceabSEric Dumazet 	}
85846d3ceabSEric Dumazet }
85946d3ceabSEric Dumazet 
86046d3ceabSEric Dumazet /*
86146d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
8628e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
86346d3ceabSEric Dumazet  * hold qdisc lock.
86446d3ceabSEric Dumazet  */
865d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
86646d3ceabSEric Dumazet {
86746d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
86846d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
869408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
8709b462d02SEric Dumazet 
8719b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
8729b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
8739b462d02SEric Dumazet 	 */
87414afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
8759b462d02SEric Dumazet 
8769b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
8779b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
8789b462d02SEric Dumazet 	 * This gives :
8799b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
8809b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
8819b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
8829b462d02SEric Dumazet 	 */
88314afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
8849b462d02SEric Dumazet 		goto out;
88546d3ceabSEric Dumazet 
8867aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
88746d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
888a9b204d1SEric Dumazet 		bool empty;
88946d3ceabSEric Dumazet 
890408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
891408f0a6cSEric Dumazet 			goto out;
892408f0a6cSEric Dumazet 
893b223feb9SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
8947aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
895408f0a6cSEric Dumazet 		if (nval != oval)
896408f0a6cSEric Dumazet 			continue;
897408f0a6cSEric Dumazet 
89846d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
89946d3ceabSEric Dumazet 		local_irq_save(flags);
900903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
901a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
90246d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
903a9b204d1SEric Dumazet 		if (empty)
90446d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
90546d3ceabSEric Dumazet 		local_irq_restore(flags);
9069b462d02SEric Dumazet 		return;
90746d3ceabSEric Dumazet 	}
9089b462d02SEric Dumazet out:
9099b462d02SEric Dumazet 	sk_free(sk);
91046d3ceabSEric Dumazet }
91146d3ceabSEric Dumazet 
912218af599SEric Dumazet /* Note: Called under hard irq.
913218af599SEric Dumazet  * We can not call TCP stack right away.
914218af599SEric Dumazet  */
915218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
916218af599SEric Dumazet {
917218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
918218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
919218af599SEric Dumazet 	unsigned long nval, oval;
920218af599SEric Dumazet 
921218af599SEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
922218af599SEric Dumazet 		struct tsq_tasklet *tsq;
923218af599SEric Dumazet 		bool empty;
924218af599SEric Dumazet 
925218af599SEric Dumazet 		if (oval & TSQF_QUEUED)
926218af599SEric Dumazet 			break;
927218af599SEric Dumazet 
928218af599SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
929218af599SEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
930218af599SEric Dumazet 		if (nval != oval)
931218af599SEric Dumazet 			continue;
932218af599SEric Dumazet 
93314afee4bSReshetova, Elena 		if (!refcount_inc_not_zero(&sk->sk_wmem_alloc))
934218af599SEric Dumazet 			break;
935218af599SEric Dumazet 		/* queue this socket to tasklet queue */
936218af599SEric Dumazet 		tsq = this_cpu_ptr(&tsq_tasklet);
937218af599SEric Dumazet 		empty = list_empty(&tsq->head);
938218af599SEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
939218af599SEric Dumazet 		if (empty)
940218af599SEric Dumazet 			tasklet_schedule(&tsq->tasklet);
941218af599SEric Dumazet 		break;
942218af599SEric Dumazet 	}
943218af599SEric Dumazet 	return HRTIMER_NORESTART;
944218af599SEric Dumazet }
945218af599SEric Dumazet 
946218af599SEric Dumazet /* BBR congestion control needs pacing.
947218af599SEric Dumazet  * Same remark for SO_MAX_PACING_RATE.
948218af599SEric Dumazet  * sch_fq packet scheduler is efficiently handling pacing,
949218af599SEric Dumazet  * but is not always installed/used.
950218af599SEric Dumazet  * Return true if TCP stack should pace packets itself.
951218af599SEric Dumazet  */
952218af599SEric Dumazet static bool tcp_needs_internal_pacing(const struct sock *sk)
953218af599SEric Dumazet {
954218af599SEric Dumazet 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
955218af599SEric Dumazet }
956218af599SEric Dumazet 
957218af599SEric Dumazet static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
958218af599SEric Dumazet {
959218af599SEric Dumazet 	u64 len_ns;
960218af599SEric Dumazet 	u32 rate;
961218af599SEric Dumazet 
962218af599SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
963218af599SEric Dumazet 		return;
964218af599SEric Dumazet 	rate = sk->sk_pacing_rate;
965218af599SEric Dumazet 	if (!rate || rate == ~0U)
966218af599SEric Dumazet 		return;
967218af599SEric Dumazet 
968218af599SEric Dumazet 	/* Should account for header sizes as sch_fq does,
969218af599SEric Dumazet 	 * but lets make things simple.
970218af599SEric Dumazet 	 */
971218af599SEric Dumazet 	len_ns = (u64)skb->len * NSEC_PER_SEC;
972218af599SEric Dumazet 	do_div(len_ns, rate);
973218af599SEric Dumazet 	hrtimer_start(&tcp_sk(sk)->pacing_timer,
974218af599SEric Dumazet 		      ktime_add_ns(ktime_get(), len_ns),
975218af599SEric Dumazet 		      HRTIMER_MODE_ABS_PINNED);
976218af599SEric Dumazet }
977218af599SEric Dumazet 
978e2080072SEric Dumazet static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
979e2080072SEric Dumazet {
980e2080072SEric Dumazet 	skb->skb_mstamp = tp->tcp_mstamp;
981e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
982e2080072SEric Dumazet }
983e2080072SEric Dumazet 
9841da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
9851da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
9861da177e4SLinus Torvalds  * transmission and possible later retransmissions.
9871da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
9881da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
9891da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
9901da177e4SLinus Torvalds  * device.
9911da177e4SLinus Torvalds  *
9921da177e4SLinus Torvalds  * We are working here with either a clone of the original
9931da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
9941da177e4SLinus Torvalds  */
995056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
996056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
9971da177e4SLinus Torvalds {
9986687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
999dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
1000dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
1001dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
100233ad798cSAdam Langley 	struct tcp_out_options opts;
100395c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10048c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1005cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10061da177e4SLinus Torvalds 	struct tcphdr *th;
10071da177e4SLinus Torvalds 	int err;
10081da177e4SLinus Torvalds 
1009dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10106f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
1011dfb4b9dcSDavid S. Miller 
1012ccdbb6e9SEric Dumazet 	if (clone_it) {
10136f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10146f094b9eSLawrence Brakmo 			- tp->snd_una;
10158c72c65bSEric Dumazet 		oskb = skb;
1016e2080072SEric Dumazet 
1017e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1018e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1019e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1020dfb4b9dcSDavid S. Miller 			else
1021e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1022e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1023e2080072SEric Dumazet 
1024dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1025dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1026dfb4b9dcSDavid S. Miller 	}
10278c72c65bSEric Dumazet 	skb->skb_mstamp = tp->tcp_mstamp;
1028dfb4b9dcSDavid S. Miller 
1029dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1030dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
103133ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10321da177e4SLinus Torvalds 
10334de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
103433ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
103533ad798cSAdam Langley 	else
103633ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
103733ad798cSAdam Langley 							   &md5);
103833ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10391da177e4SLinus Torvalds 
1040547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1041b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
1042b2532eb9SEric Dumazet 	 * which holds one reference to sk_wmem_alloc.
1043b2532eb9SEric Dumazet 	 *
1044b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1045b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1046547669d4SEric Dumazet 	 */
1047b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10481da177e4SLinus Torvalds 
104938ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
105038ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
105138ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
105238ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
105338ab52e8SEric Dumazet 	 */
105438ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
105538ab52e8SEric Dumazet 
1056aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1057aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
105846d3ceabSEric Dumazet 
105946d3ceabSEric Dumazet 	skb_orphan(skb);
106046d3ceabSEric Dumazet 	skb->sk = sk;
10611d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1062b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
106314afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10641da177e4SLinus Torvalds 
1065c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1066c3a2e837SJulian Anastasov 
10671da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1068ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1069c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1070c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
10711da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
10721da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
1073df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
10744de075e0SEric Dumazet 					tcb->tcp_flags);
1075dfb4b9dcSDavid S. Miller 
10761da177e4SLinus Torvalds 	th->check		= 0;
10771da177e4SLinus Torvalds 	th->urg_ptr		= 0;
10781da177e4SLinus Torvalds 
107933f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
10807691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
10817691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
10821da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
10831da177e4SLinus Torvalds 			th->urg = 1;
10847691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
10850eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
10867691367dSHerbert Xu 			th->urg = 1;
10877691367dSHerbert Xu 		}
10881da177e4SLinus Torvalds 	}
10891da177e4SLinus Torvalds 
1090bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
109151466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1092ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1093ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1094ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1095ea1627c2SEric Dumazet 	} else {
1096ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1097ea1627c2SEric Dumazet 		 * is never scaled.
1098ea1627c2SEric Dumazet 		 */
1099ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1100ea1627c2SEric Dumazet 	}
1101cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1102cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1103cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1104a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1105bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
110639f8e58eSEric Dumazet 					       md5, sk, skb);
1107cfb6eeb4SYOSHIFUJI Hideaki 	}
1108cfb6eeb4SYOSHIFUJI Hideaki #endif
1109cfb6eeb4SYOSHIFUJI Hideaki 
1110bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11111da177e4SLinus Torvalds 
11124de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1113fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
11141da177e4SLinus Torvalds 
1115a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1116cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1117a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1118218af599SEric Dumazet 		tcp_internal_pacing(sk, skb);
1119a44d6eacSMartin KaFai Lau 	}
11201da177e4SLinus Torvalds 
1121bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1122aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1123aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11241da177e4SLinus Torvalds 
11252efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1126f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1127cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1128f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1129cd7d8498SEric Dumazet 
11307faee5c0SEric Dumazet 	/* Our usage of tstamp should remain private */
11312456e855SThomas Gleixner 	skb->tstamp = 0;
1132971f10ecSEric Dumazet 
1133971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1134971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1135971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1136971f10ecSEric Dumazet 
1137b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11387faee5c0SEric Dumazet 
11398c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11405ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11418c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11428c72c65bSEric Dumazet 	}
1143fc225799SEric Dumazet 	if (!err && oskb) {
1144e2080072SEric Dumazet 		tcp_update_skb_after_send(tp, oskb);
1145fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1146fc225799SEric Dumazet 	}
11478c72c65bSEric Dumazet 	return err;
11481da177e4SLinus Torvalds }
11491da177e4SLinus Torvalds 
115067edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11511da177e4SLinus Torvalds  *
11521da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11531da177e4SLinus Torvalds  * otherwise socket can stall.
11541da177e4SLinus Torvalds  */
11551da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11561da177e4SLinus Torvalds {
11571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11581da177e4SLinus Torvalds 
11591da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
11601da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1161f4a775d1SEric Dumazet 	__skb_header_release(skb);
1162fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
11633ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
11643ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
11651da177e4SLinus Torvalds }
11661da177e4SLinus Torvalds 
116767edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
11685bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1169f6302d1dSDavid S. Miller {
11708f26fb1cSEric Dumazet 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1171f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1172f6302d1dSDavid S. Miller 		 * non-TSO case.
1173f6302d1dSDavid S. Miller 		 */
1174cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1175f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1176f6302d1dSDavid S. Miller 	} else {
1177cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1178f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
11791da177e4SLinus Torvalds 	}
11801da177e4SLinus Torvalds }
11811da177e4SLinus Torvalds 
118291fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
118368f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
118491fed7a1SIlpo Järvinen  */
1185cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
118691fed7a1SIlpo Järvinen 				   int decr)
118791fed7a1SIlpo Järvinen {
1188a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1189a47e5a98SIlpo Järvinen 
1190dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
119191fed7a1SIlpo Järvinen 		return;
119291fed7a1SIlpo Järvinen 
11936859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
119491fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
119591fed7a1SIlpo Järvinen }
119691fed7a1SIlpo Järvinen 
1197797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1198797108d1SIlpo Järvinen  * tweaks to fix counters
1199797108d1SIlpo Järvinen  */
1200cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1201797108d1SIlpo Järvinen {
1202797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1203797108d1SIlpo Järvinen 
1204797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1205797108d1SIlpo Järvinen 
1206797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1207797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1208797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1209797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1210797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1211797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1212797108d1SIlpo Järvinen 
1213797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1214797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1215797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1216797108d1SIlpo Järvinen 
1217797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1218797108d1SIlpo Järvinen 
1219797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1220797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
122152cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1222797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1223797108d1SIlpo Järvinen 
1224797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1225797108d1SIlpo Järvinen }
1226797108d1SIlpo Järvinen 
12270a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12280a2cf20cSSoheil Hassas Yeganeh {
12290a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12300a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12310a2cf20cSSoheil Hassas Yeganeh }
12320a2cf20cSSoheil Hassas Yeganeh 
1233490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1234490cc7d0SWillem de Bruijn {
1235490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1236490cc7d0SWillem de Bruijn 
12370a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1238490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1239490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1240490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1241490cc7d0SWillem de Bruijn 
1242490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1243490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1244490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1245b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1246b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1247490cc7d0SWillem de Bruijn 	}
1248490cc7d0SWillem de Bruijn }
1249490cc7d0SWillem de Bruijn 
1250a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1251a166140eSMartin KaFai Lau {
1252a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1253a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1254a166140eSMartin KaFai Lau }
1255a166140eSMartin KaFai Lau 
125675c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
125775c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
125875c119afSEric Dumazet 					 struct sk_buff *buff,
125975c119afSEric Dumazet 					 struct sock *sk,
126075c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
126175c119afSEric Dumazet {
126275c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
126375c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
126475c119afSEric Dumazet 	else
126575c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
126675c119afSEric Dumazet }
126775c119afSEric Dumazet 
12681da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12691da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12701da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12711da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12721da177e4SLinus Torvalds  */
127375c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
127475c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
12756cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
12761da177e4SLinus Torvalds {
12771da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12781da177e4SLinus Torvalds 	struct sk_buff *buff;
12796475be16SDavid S. Miller 	int nsize, old_factor;
1280b60b49eaSHerbert Xu 	int nlen;
12819ce01461SIlpo Järvinen 	u8 flags;
12821da177e4SLinus Torvalds 
12832fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
12842fceec13SIlpo Järvinen 		return -EINVAL;
12856a438bbeSStephen Hemminger 
12861da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
12871da177e4SLinus Torvalds 	if (nsize < 0)
12881da177e4SLinus Torvalds 		nsize = 0;
12891da177e4SLinus Torvalds 
12906cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
12911da177e4SLinus Torvalds 		return -ENOMEM;
12921da177e4SLinus Torvalds 
12931da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1294eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
129551456b29SIan Morris 	if (!buff)
12961da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1297ef5cb973SHerbert Xu 
12983ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
12993ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1300b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1301b60b49eaSHerbert Xu 	buff->truesize += nlen;
1302b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13031da177e4SLinus Torvalds 
13041da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13051da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13061da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13071da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13081da177e4SLinus Torvalds 
13091da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13104de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13114de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13124de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1313e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1314a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13151da177e4SLinus Torvalds 
131684fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
13171da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1318056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1319056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
13201da177e4SLinus Torvalds 						       nsize, 0);
13211da177e4SLinus Torvalds 
13221da177e4SLinus Torvalds 		skb_trim(skb, len);
13231da177e4SLinus Torvalds 
13241da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
13251da177e4SLinus Torvalds 	} else {
132684fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
13271da177e4SLinus Torvalds 		skb_split(skb, buff, len);
13281da177e4SLinus Torvalds 	}
13291da177e4SLinus Torvalds 
13301da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
13311da177e4SLinus Torvalds 
1332a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1333490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13341da177e4SLinus Torvalds 
13356475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13366475be16SDavid S. Miller 
13371da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13385bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13395bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13401da177e4SLinus Torvalds 
1341b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1342b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1343b9f64820SYuchung Cheng 
13446475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13456475be16SDavid S. Miller 	 * adjust the various packet counters.
13466475be16SDavid S. Miller 	 */
1347cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13486475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13496475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13501da177e4SLinus Torvalds 
1351797108d1SIlpo Järvinen 		if (diff)
1352797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13531da177e4SLinus Torvalds 	}
13541da177e4SLinus Torvalds 
13551da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1356f4a775d1SEric Dumazet 	__skb_header_release(buff);
135775c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1358e2080072SEric Dumazet 	list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13591da177e4SLinus Torvalds 
13601da177e4SLinus Torvalds 	return 0;
13611da177e4SLinus Torvalds }
13621da177e4SLinus Torvalds 
1363f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1364f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13651da177e4SLinus Torvalds  */
13667162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13671da177e4SLinus Torvalds {
13687b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13691da177e4SLinus Torvalds 	int i, k, eat;
13701da177e4SLinus Torvalds 
13714fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13724fa48bf3SEric Dumazet 	if (eat) {
13734fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
13744fa48bf3SEric Dumazet 		len -= eat;
13754fa48bf3SEric Dumazet 		if (!len)
13767162fb24SEric Dumazet 			return 0;
13774fa48bf3SEric Dumazet 	}
13781da177e4SLinus Torvalds 	eat = len;
13791da177e4SLinus Torvalds 	k = 0;
13807b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
13817b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
13827b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
13839e903e08SEric Dumazet 
13849e903e08SEric Dumazet 		if (size <= eat) {
1385aff65da0SIan Campbell 			skb_frag_unref(skb, i);
13869e903e08SEric Dumazet 			eat -= size;
13871da177e4SLinus Torvalds 		} else {
13887b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
13891da177e4SLinus Torvalds 			if (eat) {
13907b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
13917b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
13921da177e4SLinus Torvalds 				eat = 0;
13931da177e4SLinus Torvalds 			}
13941da177e4SLinus Torvalds 			k++;
13951da177e4SLinus Torvalds 		}
13961da177e4SLinus Torvalds 	}
13977b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
13981da177e4SLinus Torvalds 
13991da177e4SLinus Torvalds 	skb->data_len -= len;
14001da177e4SLinus Torvalds 	skb->len = skb->data_len;
14017162fb24SEric Dumazet 	return len;
14021da177e4SLinus Torvalds }
14031da177e4SLinus Torvalds 
140467edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14051da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14061da177e4SLinus Torvalds {
14077162fb24SEric Dumazet 	u32 delta_truesize;
14087162fb24SEric Dumazet 
140914bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14101da177e4SLinus Torvalds 		return -ENOMEM;
14111da177e4SLinus Torvalds 
14127162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14131da177e4SLinus Torvalds 
14141da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
141584fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14161da177e4SLinus Torvalds 
14177162fb24SEric Dumazet 	if (delta_truesize) {
14187162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
14197162fb24SEric Dumazet 		sk->sk_wmem_queued -= delta_truesize;
14207162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14211da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14227162fb24SEric Dumazet 	}
14231da177e4SLinus Torvalds 
14245b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14251da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14265bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14271da177e4SLinus Torvalds 
14281da177e4SLinus Torvalds 	return 0;
14291da177e4SLinus Torvalds }
14301da177e4SLinus Torvalds 
14311b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14321b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14335d424d5aSJohn Heffner {
1434cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1435cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14365d424d5aSJohn Heffner 	int mss_now;
14375d424d5aSJohn Heffner 
14385d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14395d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14405d424d5aSJohn Heffner 	 */
14415d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14425d424d5aSJohn Heffner 
144367469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
144467469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
144567469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
144667469601SEric Dumazet 
144767469601SEric Dumazet 		if (dst && dst_allfrag(dst))
144867469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
144967469601SEric Dumazet 	}
145067469601SEric Dumazet 
14515d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14525d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14535d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14545d424d5aSJohn Heffner 
14555d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14565d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14575d424d5aSJohn Heffner 
14585d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14595d424d5aSJohn Heffner 	if (mss_now < 48)
14605d424d5aSJohn Heffner 		mss_now = 48;
14615d424d5aSJohn Heffner 	return mss_now;
14625d424d5aSJohn Heffner }
14635d424d5aSJohn Heffner 
14641b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14651b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14661b63edd6SYuchung Cheng {
14671b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14681b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14691b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14701b63edd6SYuchung Cheng }
14711b63edd6SYuchung Cheng 
14725d424d5aSJohn Heffner /* Inverse of above */
147367469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
14745d424d5aSJohn Heffner {
1475cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1476cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14775d424d5aSJohn Heffner 	int mtu;
14785d424d5aSJohn Heffner 
14795d424d5aSJohn Heffner 	mtu = mss +
14805d424d5aSJohn Heffner 	      tp->tcp_header_len +
14815d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14825d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
14835d424d5aSJohn Heffner 
148467469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
148567469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
148667469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
148767469601SEric Dumazet 
148867469601SEric Dumazet 		if (dst && dst_allfrag(dst))
148967469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
149067469601SEric Dumazet 	}
14915d424d5aSJohn Heffner 	return mtu;
14925d424d5aSJohn Heffner }
1493556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
14945d424d5aSJohn Heffner 
149567edfef7SAndi Kleen /* MTU probing init per socket */
14965d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
14975d424d5aSJohn Heffner {
14985d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
14995d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1500b0f9ca53SFan Du 	struct net *net = sock_net(sk);
15015d424d5aSJohn Heffner 
1502b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15035d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15045d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1505b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15065d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
150705cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1508c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15095d424d5aSJohn Heffner }
15104bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15115d424d5aSJohn Heffner 
15121da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15131da177e4SLinus Torvalds 
15141da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15151da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15161da177e4SLinus Torvalds 
15171da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1518caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15191da177e4SLinus Torvalds    It also does not include TCP options.
15201da177e4SLinus Torvalds 
1521d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15221da177e4SLinus Torvalds 
15231da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15241da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15251da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15261da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15271da177e4SLinus Torvalds 
15281da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15291da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15301da177e4SLinus Torvalds 
1531d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1532d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15331da177e4SLinus Torvalds  */
15341da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15351da177e4SLinus Torvalds {
15361da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1537d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15385d424d5aSJohn Heffner 	int mss_now;
15391da177e4SLinus Torvalds 
15405d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15415d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15421da177e4SLinus Torvalds 
15435d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1544409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15451da177e4SLinus Torvalds 
15461da177e4SLinus Torvalds 	/* And store cached results */
1547d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15485d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15495d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1550c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds 	return mss_now;
15531da177e4SLinus Torvalds }
15544bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15571da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15581da177e4SLinus Torvalds  */
15590c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15601da177e4SLinus Torvalds {
1561cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1562cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1563c1b4a7e6SDavid S. Miller 	u32 mss_now;
156495c96174SEric Dumazet 	unsigned int header_len;
156533ad798cSAdam Langley 	struct tcp_out_options opts;
156633ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15671da177e4SLinus Torvalds 
1568c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1569c1b4a7e6SDavid S. Miller 
15701da177e4SLinus Torvalds 	if (dst) {
15711da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1572d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
15731da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
15741da177e4SLinus Torvalds 	}
15751da177e4SLinus Torvalds 
157633ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
157733ad798cSAdam Langley 		     sizeof(struct tcphdr);
157833ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
157933ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
158033ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
158133ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
158233ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
158333ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
158433ad798cSAdam Langley 		mss_now -= delta;
158533ad798cSAdam Langley 	}
1586cfb6eeb4SYOSHIFUJI Hideaki 
15871da177e4SLinus Torvalds 	return mss_now;
15881da177e4SLinus Torvalds }
15891da177e4SLinus Torvalds 
159086fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
159186fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
159286fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
159386fd14adSWeiping Pan  */
159486fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1595a762a980SDavid S. Miller {
15969e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1597a762a980SDavid S. Miller 
159886fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
159986fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
160086fd14adSWeiping Pan 		/* Limited by application or receiver window. */
160186fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
160286fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
160386fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
160486fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
160586fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
160686fd14adSWeiping Pan 		}
160786fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
160886fd14adSWeiping Pan 	}
1609c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
161086fd14adSWeiping Pan }
161186fd14adSWeiping Pan 
1612ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1613a762a980SDavid S. Miller {
16141b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1615a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1616a762a980SDavid S. Miller 
1617ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1618ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1619ca8a2263SNeal Cardwell 	 */
1620ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1621ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1622ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1623ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1624ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1625ca8a2263SNeal Cardwell 	}
1626e114a710SEric Dumazet 
162724901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1628a762a980SDavid S. Miller 		/* Network is feed fully. */
1629a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1630c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1631a762a980SDavid S. Miller 	} else {
1632a762a980SDavid S. Miller 		/* Network starves. */
1633a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1634a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1635a762a980SDavid S. Miller 
163615d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
1637c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16381b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1639a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1640b0f71bd3SFrancis Yan 
1641b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1642b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1643b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1644b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
164575c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1646b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1647b0f71bd3SFrancis Yan 		 */
164875c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1649b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1650b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1651b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1652a762a980SDavid S. Miller 	}
1653a762a980SDavid S. Miller }
1654a762a980SDavid S. Miller 
1655d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1656d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1657d4589926SEric Dumazet {
1658d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1659d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1660d4589926SEric Dumazet }
1661d4589926SEric Dumazet 
1662d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1663d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1664d4589926SEric Dumazet  * The test is really :
1665d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1666d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1667d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1668d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16690e3a4803SIlpo Järvinen  */
1670d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1671d4589926SEric Dumazet 				const struct sk_buff *skb)
1672d4589926SEric Dumazet {
1673d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1674d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1675d4589926SEric Dumazet }
1676d4589926SEric Dumazet 
1677d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1678d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1679d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1680d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1681d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1682d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1683d4589926SEric Dumazet  */
1684d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1685cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1686d4589926SEric Dumazet {
1687d4589926SEric Dumazet 	return partial &&
1688d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1689d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1690d4589926SEric Dumazet }
1691605ad7f1SEric Dumazet 
1692605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1693605ad7f1SEric Dumazet  * to send one TSO packet per ms
1694605ad7f1SEric Dumazet  */
16951b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
16961b3878caSNeal Cardwell 		     int min_tso_segs)
1697605ad7f1SEric Dumazet {
1698605ad7f1SEric Dumazet 	u32 bytes, segs;
1699605ad7f1SEric Dumazet 
1700605ad7f1SEric Dumazet 	bytes = min(sk->sk_pacing_rate >> 10,
1701605ad7f1SEric Dumazet 		    sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1702605ad7f1SEric Dumazet 
1703605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1704605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1705605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1706605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1707605ad7f1SEric Dumazet 	 */
17081b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1709605ad7f1SEric Dumazet 
1710605ad7f1SEric Dumazet 	return min_t(u32, segs, sk->sk_gso_max_segs);
1711605ad7f1SEric Dumazet }
17121b3878caSNeal Cardwell EXPORT_SYMBOL(tcp_tso_autosize);
1713605ad7f1SEric Dumazet 
1714ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1715ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1716ed6e7268SNeal Cardwell  */
1717ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1718ed6e7268SNeal Cardwell {
1719ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1720ed6e7268SNeal Cardwell 	u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
1721ed6e7268SNeal Cardwell 
17221b3878caSNeal Cardwell 	return tso_segs ? :
17231b3878caSNeal Cardwell 		tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
1724ed6e7268SNeal Cardwell }
1725ed6e7268SNeal Cardwell 
1726d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1727d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1728d4589926SEric Dumazet 					const struct sk_buff *skb,
1729d4589926SEric Dumazet 					unsigned int mss_now,
1730d4589926SEric Dumazet 					unsigned int max_segs,
1731d4589926SEric Dumazet 					int nonagle)
1732c1b4a7e6SDavid S. Miller {
1733cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1734d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1735c1b4a7e6SDavid S. Miller 
173690840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17371485348dSBen Hutchings 	max_len = mss_now * max_segs;
17380e3a4803SIlpo Järvinen 
17391485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17401485348dSBen Hutchings 		return max_len;
17410e3a4803SIlpo Järvinen 
17425ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17435ea3a748SIlpo Järvinen 
17441485348dSBen Hutchings 	if (max_len <= needed)
17451485348dSBen Hutchings 		return max_len;
17460e3a4803SIlpo Järvinen 
1747d4589926SEric Dumazet 	partial = needed % mss_now;
1748d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1749d4589926SEric Dumazet 	 * to include this last segment in this skb.
1750d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1751d4589926SEric Dumazet 	 */
1752cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1753d4589926SEric Dumazet 		return needed - partial;
1754d4589926SEric Dumazet 
1755d4589926SEric Dumazet 	return needed;
1756c1b4a7e6SDavid S. Miller }
1757c1b4a7e6SDavid S. Miller 
1758c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1759c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1760c1b4a7e6SDavid S. Miller  */
1761cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1762cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1763c1b4a7e6SDavid S. Miller {
1764d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1765c1b4a7e6SDavid S. Miller 
1766c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17674de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17684de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1769c1b4a7e6SDavid S. Miller 		return 1;
1770c1b4a7e6SDavid S. Miller 
1771c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1772c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1773d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1774c1b4a7e6SDavid S. Miller 		return 0;
1775d649a7a8SEric Dumazet 
1776d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1777d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1778d649a7a8SEric Dumazet 	 */
1779d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1780d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1781c1b4a7e6SDavid S. Miller }
1782c1b4a7e6SDavid S. Miller 
1783b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
178467edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1785c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1786c1b4a7e6SDavid S. Miller  */
17875bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1788c1b4a7e6SDavid S. Miller {
1789c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1790c1b4a7e6SDavid S. Miller 
1791f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
17925bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1793c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1794c1b4a7e6SDavid S. Miller 	}
1795c1b4a7e6SDavid S. Miller 	return tso_segs;
1796c1b4a7e6SDavid S. Miller }
1797c1b4a7e6SDavid S. Miller 
1798c1b4a7e6SDavid S. Miller 
1799a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1800c1b4a7e6SDavid S. Miller  * sent now.
1801c1b4a7e6SDavid S. Miller  */
1802a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1803c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1804c1b4a7e6SDavid S. Miller {
1805c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1806c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1807c1b4a7e6SDavid S. Miller 	 *
1808c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1809c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1810c1b4a7e6SDavid S. Miller 	 */
1811c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1812a2a385d6SEric Dumazet 		return true;
1813c1b4a7e6SDavid S. Miller 
18149b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18159b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1816a2a385d6SEric Dumazet 		return true;
1817c1b4a7e6SDavid S. Miller 
1818cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1819a2a385d6SEric Dumazet 		return true;
1820c1b4a7e6SDavid S. Miller 
1821a2a385d6SEric Dumazet 	return false;
1822c1b4a7e6SDavid S. Miller }
1823c1b4a7e6SDavid S. Miller 
1824c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1825a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1826a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1827056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1828c1b4a7e6SDavid S. Miller {
1829c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1830c1b4a7e6SDavid S. Miller 
1831c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1832c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1833c1b4a7e6SDavid S. Miller 
183490840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1835c1b4a7e6SDavid S. Miller }
1836c1b4a7e6SDavid S. Miller 
1837c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1838c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1839c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1840c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1841c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1842c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1843c1b4a7e6SDavid S. Miller  */
184475c119afSEric Dumazet static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
184575c119afSEric Dumazet 			struct sk_buff *skb, unsigned int len,
1846c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1847c1b4a7e6SDavid S. Miller {
1848c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1849c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
18509ce01461SIlpo Järvinen 	u8 flags;
1851c1b4a7e6SDavid S. Miller 
1852c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1853c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
185475c119afSEric Dumazet 		return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
1855c1b4a7e6SDavid S. Miller 
1856eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
185751456b29SIan Morris 	if (unlikely(!buff))
1858c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1859c1b4a7e6SDavid S. Miller 
18603ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
18613ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1862b60b49eaSHerbert Xu 	buff->truesize += nlen;
1863c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1864c1b4a7e6SDavid S. Miller 
1865c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1866c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1867c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1868c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1869c1b4a7e6SDavid S. Miller 
1870c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
18714de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
18724de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
18734de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1874c1b4a7e6SDavid S. Miller 
1875c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1876c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1877c1b4a7e6SDavid S. Miller 
1878a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1879a166140eSMartin KaFai Lau 
188084fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1881c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1882490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1883c1b4a7e6SDavid S. Miller 
1884c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
18855bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
18865bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1887c1b4a7e6SDavid S. Miller 
1888c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1889f4a775d1SEric Dumazet 	__skb_header_release(buff);
189075c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1891c1b4a7e6SDavid S. Miller 
1892c1b4a7e6SDavid S. Miller 	return 0;
1893c1b4a7e6SDavid S. Miller }
1894c1b4a7e6SDavid S. Miller 
1895c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1896c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1897c1b4a7e6SDavid S. Miller  *
1898c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1899c1b4a7e6SDavid S. Miller  */
1900ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1901605ad7f1SEric Dumazet 				 bool *is_cwnd_limited, u32 max_segs)
1902c1b4a7e6SDavid S. Miller {
19036687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
190450c8339eSEric Dumazet 	u32 age, send_win, cong_win, limit, in_flight;
190550c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
190650c8339eSEric Dumazet 	struct sk_buff *head;
1907ad9f4f50SEric Dumazet 	int win_divisor;
1908c1b4a7e6SDavid S. Miller 
19094de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1910ae8064acSJohn Heffner 		goto send_now;
1911c1b4a7e6SDavid S. Miller 
191299d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1913ae8064acSJohn Heffner 		goto send_now;
1914ae8064acSJohn Heffner 
19155f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
19165f852eb5SEric Dumazet 	 * only if the last write was recent.
19175f852eb5SEric Dumazet 	 */
1918d635fbe2SEric Dumazet 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
1919ae8064acSJohn Heffner 		goto send_now;
1920908a75c1SDavid S. Miller 
1921c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1922c1b4a7e6SDavid S. Miller 
1923056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1924c1b4a7e6SDavid S. Miller 
192590840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1926c1b4a7e6SDavid S. Miller 
1927c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1928c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1929c1b4a7e6SDavid S. Miller 
1930c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1931c1b4a7e6SDavid S. Miller 
1932ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1933605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1934ae8064acSJohn Heffner 		goto send_now;
1935ba244fe9SDavid S. Miller 
193662ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
193762ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
193862ad2761SIlpo Järvinen 		goto send_now;
193962ad2761SIlpo Järvinen 
1940ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1941ad9f4f50SEric Dumazet 	if (win_divisor) {
1942c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1943c1b4a7e6SDavid S. Miller 
1944c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1945c1b4a7e6SDavid S. Miller 		 * just use it.
1946c1b4a7e6SDavid S. Miller 		 */
1947ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1948c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1949ae8064acSJohn Heffner 			goto send_now;
1950c1b4a7e6SDavid S. Miller 	} else {
1951c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1952c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1953c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1954c1b4a7e6SDavid S. Miller 		 * then send now.
1955c1b4a7e6SDavid S. Miller 		 */
19566b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1957ae8064acSJohn Heffner 			goto send_now;
1958c1b4a7e6SDavid S. Miller 	}
1959c1b4a7e6SDavid S. Miller 
196075c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
196175c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
196275c119afSEric Dumazet 	if (!head)
196375c119afSEric Dumazet 		goto send_now;
19649a568de4SEric Dumazet 	age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
196550c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
196650c8339eSEric Dumazet 	if (age < (tp->srtt_us >> 4))
196750c8339eSEric Dumazet 		goto send_now;
196850c8339eSEric Dumazet 
19695f852eb5SEric Dumazet 	/* Ok, it looks like it is advisable to defer. */
1970ae8064acSJohn Heffner 
1971d2e1339fSBendik Rønning Opstad 	if (cong_win < send_win && cong_win <= skb->len)
1972ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1973ca8a2263SNeal Cardwell 
1974a2a385d6SEric Dumazet 	return true;
1975ae8064acSJohn Heffner 
1976ae8064acSJohn Heffner send_now:
1977a2a385d6SEric Dumazet 	return false;
1978c1b4a7e6SDavid S. Miller }
1979c1b4a7e6SDavid S. Miller 
198005cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
198105cbc0dbSFan Du {
198205cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
198305cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
198405cbc0dbSFan Du 	struct net *net = sock_net(sk);
198505cbc0dbSFan Du 	u32 interval;
198605cbc0dbSFan Du 	s32 delta;
198705cbc0dbSFan Du 
198805cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
1989c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
199005cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
199105cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
199205cbc0dbSFan Du 
199305cbc0dbSFan Du 		/* Update current search range */
199405cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
199505cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
199605cbc0dbSFan Du 			sizeof(struct tcphdr) +
199705cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
199805cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
199905cbc0dbSFan Du 
200005cbc0dbSFan Du 		/* Update probe time stamp */
2001c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
200205cbc0dbSFan Du 	}
200305cbc0dbSFan Du }
200405cbc0dbSFan Du 
20055d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
200667edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
200767edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
200867edfef7SAndi Kleen  * changes resulting in larger path MTUs.
200967edfef7SAndi Kleen  *
20105d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20115d424d5aSJohn Heffner  *         1 if a probe was sent,
2012056834d9SIlpo Järvinen  *         -1 otherwise
2013056834d9SIlpo Järvinen  */
20145d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20155d424d5aSJohn Heffner {
20165d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
201712a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20185d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20196b58e0a5SFan Du 	struct net *net = sock_net(sk);
20205d424d5aSJohn Heffner 	int probe_size;
202191cc17c0SIlpo Järvinen 	int size_needed;
202212a59abcSEric Dumazet 	int copy, len;
20235d424d5aSJohn Heffner 	int mss_now;
20246b58e0a5SFan Du 	int interval;
20255d424d5aSJohn Heffner 
20265d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20275d424d5aSJohn Heffner 	 * not in recovery,
20285d424d5aSJohn Heffner 	 * have enough cwnd, and
202912a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
203012a59abcSEric Dumazet 	 */
203112a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
20325d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
20335d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
20345d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
203512a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
20365d424d5aSJohn Heffner 		return -1;
20375d424d5aSJohn Heffner 
20386b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
20396b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
20406b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
20416b58e0a5SFan Du 	 */
20420c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
20436b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
20446b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
204591cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
20466b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
204705cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
204805cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
204905cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
205005cbc0dbSFan Du 	 */
20516b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
205205cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
205305cbc0dbSFan Du 		/* Check whether enough time has elaplased for
205405cbc0dbSFan Du 		 * another round of probing.
205505cbc0dbSFan Du 		 */
205605cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
20575d424d5aSJohn Heffner 		return -1;
20585d424d5aSJohn Heffner 	}
20595d424d5aSJohn Heffner 
20605d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
20617f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
20625d424d5aSJohn Heffner 		return -1;
20635d424d5aSJohn Heffner 
206491cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
20655d424d5aSJohn Heffner 		return -1;
206690840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
20675d424d5aSJohn Heffner 		return 0;
20685d424d5aSJohn Heffner 
2069d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2070d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2071d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
20725d424d5aSJohn Heffner 			return -1;
20735d424d5aSJohn Heffner 		else
20745d424d5aSJohn Heffner 			return 0;
20755d424d5aSJohn Heffner 	}
20765d424d5aSJohn Heffner 
20775d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2078eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
207951456b29SIan Morris 	if (!nskb)
20805d424d5aSJohn Heffner 		return -1;
20813ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
20823ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
20835d424d5aSJohn Heffner 
2084fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
20855d424d5aSJohn Heffner 
20865d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
20875d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
20884de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
20895d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
20905d424d5aSJohn Heffner 	nskb->csum = 0;
209184fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
20925d424d5aSJohn Heffner 
209350c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
209450c4817eSIlpo Järvinen 
20955d424d5aSJohn Heffner 	len = 0;
2096234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
20975d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
20982fe664f1SDouglas Caetano dos Santos 		if (nskb->ip_summed) {
20995d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
21002fe664f1SDouglas Caetano dos Santos 		} else {
21012fe664f1SDouglas Caetano dos Santos 			__wsum csum = skb_copy_and_csum_bits(skb, 0,
2102056834d9SIlpo Järvinen 							     skb_put(nskb, copy),
21032fe664f1SDouglas Caetano dos Santos 							     copy, 0);
21042fe664f1SDouglas Caetano dos Santos 			nskb->csum = csum_block_add(nskb->csum, csum, len);
21052fe664f1SDouglas Caetano dos Santos 		}
21065d424d5aSJohn Heffner 
21075d424d5aSJohn Heffner 		if (skb->len <= copy) {
21085d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21095d424d5aSJohn Heffner 			 * Throw it away. */
21104de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2111fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21123ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21135d424d5aSJohn Heffner 		} else {
21144de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2115a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21165d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21175d424d5aSJohn Heffner 				skb_pull(skb, copy);
211884fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
2119056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
2120056834d9SIlpo Järvinen 								 skb->len, 0);
21215d424d5aSJohn Heffner 			} else {
21225d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21235bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21245d424d5aSJohn Heffner 			}
21255d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21265d424d5aSJohn Heffner 		}
21275d424d5aSJohn Heffner 
21285d424d5aSJohn Heffner 		len += copy;
2129234b6860SIlpo Järvinen 
2130234b6860SIlpo Järvinen 		if (len >= probe_size)
2131234b6860SIlpo Järvinen 			break;
21325d424d5aSJohn Heffner 	}
21335bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
21345d424d5aSJohn Heffner 
21355d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
21367faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
21377faee5c0SEric Dumazet 	 */
21385d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
21395d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
21405d424d5aSJohn Heffner 		 * effectively two packets. */
21415d424d5aSJohn Heffner 		tp->snd_cwnd--;
214266f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
21435d424d5aSJohn Heffner 
21445d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
21450e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
21460e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
21475d424d5aSJohn Heffner 
21485d424d5aSJohn Heffner 		return 1;
21495d424d5aSJohn Heffner 	}
21505d424d5aSJohn Heffner 
21515d424d5aSJohn Heffner 	return -1;
21525d424d5aSJohn Heffner }
21535d424d5aSJohn Heffner 
2154218af599SEric Dumazet static bool tcp_pacing_check(const struct sock *sk)
2155218af599SEric Dumazet {
2156218af599SEric Dumazet 	return tcp_needs_internal_pacing(sk) &&
2157218af599SEric Dumazet 	       hrtimer_active(&tcp_sk(sk)->pacing_timer);
2158218af599SEric Dumazet }
2159218af599SEric Dumazet 
2160f9616c35SEric Dumazet /* TCP Small Queues :
2161f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2162f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2163f9616c35SEric Dumazet  * This allows for :
2164f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2165f9616c35SEric Dumazet  *  - faster recovery
2166f9616c35SEric Dumazet  *  - high rates
2167f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2168f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2169f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2170f9616c35SEric Dumazet  */
2171f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2172f9616c35SEric Dumazet 				  unsigned int factor)
2173f9616c35SEric Dumazet {
2174f9616c35SEric Dumazet 	unsigned int limit;
2175f9616c35SEric Dumazet 
2176f9616c35SEric Dumazet 	limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
2177f9616c35SEric Dumazet 	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
2178f9616c35SEric Dumazet 	limit <<= factor;
2179f9616c35SEric Dumazet 
218014afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
218175c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
218275eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
218375eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
218475eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
218575eefc6cSEric Dumazet 		 */
218675c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
218775eefc6cSEric Dumazet 			return false;
218875eefc6cSEric Dumazet 
21897aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2190f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2191f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2192f9616c35SEric Dumazet 		 * test again the condition.
2193f9616c35SEric Dumazet 		 */
2194f9616c35SEric Dumazet 		smp_mb__after_atomic();
219514afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2196f9616c35SEric Dumazet 			return true;
2197f9616c35SEric Dumazet 	}
2198f9616c35SEric Dumazet 	return false;
2199f9616c35SEric Dumazet }
2200f9616c35SEric Dumazet 
220105b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
220205b055e8SFrancis Yan {
2203628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2204efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
220505b055e8SFrancis Yan 
2206efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2207efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
220805b055e8SFrancis Yan 	tp->chrono_start = now;
220905b055e8SFrancis Yan 	tp->chrono_type = new;
221005b055e8SFrancis Yan }
221105b055e8SFrancis Yan 
221205b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
221305b055e8SFrancis Yan {
221405b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
221505b055e8SFrancis Yan 
221605b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
22170f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
22180f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
221905b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
222005b055e8SFrancis Yan 	 */
222105b055e8SFrancis Yan 	if (type > tp->chrono_type)
222205b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
222305b055e8SFrancis Yan }
222405b055e8SFrancis Yan 
222505b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
222605b055e8SFrancis Yan {
222705b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
222805b055e8SFrancis Yan 
22290f87230dSFrancis Yan 
22300f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
22310f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
22320f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
22330f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
22340f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
22350f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
22360f87230dSFrancis Yan 	 */
223775c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
223805b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
22390f87230dSFrancis Yan 	else if (type == tp->chrono_type)
22400f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
224105b055e8SFrancis Yan }
224205b055e8SFrancis Yan 
22431da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
22441da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
22451da177e4SLinus Torvalds  * window for us.
22461da177e4SLinus Torvalds  *
2247f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2248f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2249f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2250f8269a49SIlpo Järvinen  *
22516ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
22526ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
22536ba8a3b1SNandita Dukkipati 
2254a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2255a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
22561da177e4SLinus Torvalds  */
2257a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2258d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
22591da177e4SLinus Torvalds {
22601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
226192df7b51SDavid S. Miller 	struct sk_buff *skb;
2262c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2263c1b4a7e6SDavid S. Miller 	int cwnd_quota;
22645d424d5aSJohn Heffner 	int result;
22655615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2266605ad7f1SEric Dumazet 	u32 max_segs;
22671da177e4SLinus Torvalds 
2268c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
22695d424d5aSJohn Heffner 
2270d5dd9175SIlpo Järvinen 	if (!push_one) {
22715d424d5aSJohn Heffner 		/* Do MTU probing. */
2272d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2273d5dd9175SIlpo Järvinen 		if (!result) {
2274a2a385d6SEric Dumazet 			return false;
22755d424d5aSJohn Heffner 		} else if (result > 0) {
22765d424d5aSJohn Heffner 			sent_pkts = 1;
22775d424d5aSJohn Heffner 		}
2278d5dd9175SIlpo Järvinen 	}
22795d424d5aSJohn Heffner 
2280ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
22819a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
2282fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2283c8ac3774SHerbert Xu 		unsigned int limit;
2284c8ac3774SHerbert Xu 
2285218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2286218af599SEric Dumazet 			break;
2287218af599SEric Dumazet 
22885bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2289c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2290c1b4a7e6SDavid S. Miller 
22919d186cacSAndrey Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
22927faee5c0SEric Dumazet 			/* "skb_mstamp" is used as a start point for the retransmit timer */
2293e2080072SEric Dumazet 			tcp_update_skb_after_send(tp, skb);
2294ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
22959d186cacSAndrey Vagin 		}
2296ec342325SAndrew Vagin 
2297b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
22986ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
22996ba8a3b1SNandita Dukkipati 			if (push_one == 2)
23006ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
23016ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
23026ba8a3b1SNandita Dukkipati 			else
2303b68e9f85SHerbert Xu 				break;
23046ba8a3b1SNandita Dukkipati 		}
2305b68e9f85SHerbert Xu 
23065615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
23075615f886SFrancis Yan 			is_rwnd_limited = true;
2308b68e9f85SHerbert Xu 			break;
23095615f886SFrancis Yan 		}
2310b68e9f85SHerbert Xu 
2311d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2312aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2313aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2314aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2315aa93466bSDavid S. Miller 				break;
2316c1b4a7e6SDavid S. Miller 		} else {
2317ca8a2263SNeal Cardwell 			if (!push_one &&
2318605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2319605ad7f1SEric Dumazet 						 max_segs))
2320aa93466bSDavid S. Miller 				break;
2321c1b4a7e6SDavid S. Miller 		}
2322aa93466bSDavid S. Miller 
2323605ad7f1SEric Dumazet 		limit = mss_now;
2324d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2325605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2326605ad7f1SEric Dumazet 						    min_t(unsigned int,
2327605ad7f1SEric Dumazet 							  cwnd_quota,
2328605ad7f1SEric Dumazet 							  max_segs),
2329605ad7f1SEric Dumazet 						    nonagle);
2330605ad7f1SEric Dumazet 
2331605ad7f1SEric Dumazet 		if (skb->len > limit &&
233275c119afSEric Dumazet 		    unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
233375c119afSEric Dumazet 					  skb, limit, mss_now, gfp)))
2334605ad7f1SEric Dumazet 			break;
2335605ad7f1SEric Dumazet 
23367aa5470cSEric Dumazet 		if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
23377aa5470cSEric Dumazet 			clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
2338f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
233946d3ceabSEric Dumazet 			break;
2340c9eeec26SEric Dumazet 
2341d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
23421da177e4SLinus Torvalds 			break;
23431da177e4SLinus Torvalds 
2344ec342325SAndrew Vagin repair:
23451da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
23461da177e4SLinus Torvalds 		 * This call will increment packets_out.
23471da177e4SLinus Torvalds 		 */
234866f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
23491da177e4SLinus Torvalds 
23501da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2351a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2352d5dd9175SIlpo Järvinen 
2353d5dd9175SIlpo Järvinen 		if (push_one)
2354d5dd9175SIlpo Järvinen 			break;
23551da177e4SLinus Torvalds 	}
23561da177e4SLinus Torvalds 
23575615f886SFrancis Yan 	if (is_rwnd_limited)
23585615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
23595615f886SFrancis Yan 	else
23605615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
23615615f886SFrancis Yan 
2362aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2363684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2364684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
23656ba8a3b1SNandita Dukkipati 
23666ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
23676ba8a3b1SNandita Dukkipati 		if (push_one != 2)
23686ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
2369d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2370ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2371a2a385d6SEric Dumazet 		return false;
23721da177e4SLinus Torvalds 	}
237375c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
23746ba8a3b1SNandita Dukkipati }
23756ba8a3b1SNandita Dukkipati 
23766ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
23776ba8a3b1SNandita Dukkipati {
23786ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
23796ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2380a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
23816ba8a3b1SNandita Dukkipati 
23826ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
23836ba8a3b1SNandita Dukkipati 	 * finishes.
23846ba8a3b1SNandita Dukkipati 	 */
2385f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
23866ba8a3b1SNandita Dukkipati 		return false;
23876ba8a3b1SNandita Dukkipati 
23886ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
23896ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
23906ba8a3b1SNandita Dukkipati 	 */
2391bec41a11SYuchung Cheng 	if ((sysctl_tcp_early_retrans != 3 && sysctl_tcp_early_retrans != 4) ||
2392bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2393bec41a11SYuchung Cheng 	    icsk->icsk_ca_state != TCP_CA_Open)
23946ba8a3b1SNandita Dukkipati 		return false;
23956ba8a3b1SNandita Dukkipati 
23966ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
239775c119afSEric Dumazet 	     !tcp_write_queue_empty(sk))
23986ba8a3b1SNandita Dukkipati 		return false;
23996ba8a3b1SNandita Dukkipati 
2400bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2401f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2402f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
24036ba8a3b1SNandita Dukkipati 	 */
2404bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2405bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
24066ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2407bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2408bb4d991aSYuchung Cheng 		else
2409bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2410bb4d991aSYuchung Cheng 	} else {
2411bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2412bb4d991aSYuchung Cheng 	}
24136ba8a3b1SNandita Dukkipati 
2414a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2415a2815817SNeal Cardwell 	rto_delta_us = tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2416a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2417a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
24186ba8a3b1SNandita Dukkipati 
24196ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
24206ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
24216ba8a3b1SNandita Dukkipati 	return true;
24226ba8a3b1SNandita Dukkipati }
24236ba8a3b1SNandita Dukkipati 
24241f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
24251f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
24261f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
24271f3279aeSEric Dumazet  */
24281f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
24291f3279aeSEric Dumazet 				    const struct sk_buff *skb)
24301f3279aeSEric Dumazet {
243139bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2432c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
24331f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
24341f3279aeSEric Dumazet 		return true;
24351f3279aeSEric Dumazet 	}
24361f3279aeSEric Dumazet 	return false;
24371f3279aeSEric Dumazet }
24381f3279aeSEric Dumazet 
2439b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
24406ba8a3b1SNandita Dukkipati  * retransmit the last segment.
24416ba8a3b1SNandita Dukkipati  */
24426ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
24436ba8a3b1SNandita Dukkipati {
24449b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
24456ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
24466ba8a3b1SNandita Dukkipati 	int pcount;
24476ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
24486ba8a3b1SNandita Dukkipati 
2449b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
245075c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2451b340b264SYuchung Cheng 		pcount = tp->packets_out;
2452b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2453b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2454b340b264SYuchung Cheng 			goto probe_sent;
24556ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24566ba8a3b1SNandita Dukkipati 	}
245775c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
24586ba8a3b1SNandita Dukkipati 
24599b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
24609b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
24619b717a8dSNandita Dukkipati 		goto rearm_timer;
24629b717a8dSNandita Dukkipati 
24636ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
24646ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
24656ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24666ba8a3b1SNandita Dukkipati 
24671f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
24681f3279aeSEric Dumazet 		goto rearm_timer;
24691f3279aeSEric Dumazet 
24706ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
24716ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
24726ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24736ba8a3b1SNandita Dukkipati 
24746ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
247575c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
247675c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
24776cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
24786ba8a3b1SNandita Dukkipati 			goto rearm_timer;
247975c119afSEric Dumazet 		skb = skb_rb_next(skb);
24806ba8a3b1SNandita Dukkipati 	}
24816ba8a3b1SNandita Dukkipati 
24826ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
24836ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24846ba8a3b1SNandita Dukkipati 
248510d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2486b340b264SYuchung Cheng 		goto rearm_timer;
24876ba8a3b1SNandita Dukkipati 
24889b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
24899b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
24909b717a8dSNandita Dukkipati 
2491b340b264SYuchung Cheng probe_sent:
2492c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2493fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2494fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2495b340b264SYuchung Cheng rearm_timer:
2496fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
24971da177e4SLinus Torvalds }
24981da177e4SLinus Torvalds 
2499a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2500a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2501a762a980SDavid S. Miller  * The socket must be locked by the caller.
2502a762a980SDavid S. Miller  */
25039e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
25049e412ba7SIlpo Järvinen 			       int nonagle)
2505a762a980SDavid S. Miller {
2506726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2507726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2508726e07a8SIlpo Järvinen 	 * all will be happy.
2509726e07a8SIlpo Järvinen 	 */
2510726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2511726e07a8SIlpo Järvinen 		return;
2512726e07a8SIlpo Järvinen 
251399a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
25147450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
25159e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2516a762a980SDavid S. Miller }
2517a762a980SDavid S. Miller 
2518c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2519c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2520c1b4a7e6SDavid S. Miller  */
2521c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2522c1b4a7e6SDavid S. Miller {
2523fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2524c1b4a7e6SDavid S. Miller 
2525c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2526c1b4a7e6SDavid S. Miller 
2527d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2528c1b4a7e6SDavid S. Miller }
2529c1b4a7e6SDavid S. Miller 
25301da177e4SLinus Torvalds /* This function returns the amount that we can raise the
25311da177e4SLinus Torvalds  * usable window based on the following constraints
25321da177e4SLinus Torvalds  *
25331da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
25341da177e4SLinus Torvalds  * 2. We limit memory per socket
25351da177e4SLinus Torvalds  *
25361da177e4SLinus Torvalds  * RFC 1122:
25371da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
25381da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
25391da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
25401da177e4SLinus Torvalds  *
25411da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
25421da177e4SLinus Torvalds  * it at least MSS bytes.
25431da177e4SLinus Torvalds  *
25441da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
25451da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
25461da177e4SLinus Torvalds  *
25471da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
25481da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
25491da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
25501da177e4SLinus Torvalds  * window to always advance by a single byte.
25511da177e4SLinus Torvalds  *
25521da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
25531da177e4SLinus Torvalds  * then this will not be a problem.
25541da177e4SLinus Torvalds  *
25551da177e4SLinus Torvalds  * BSD seems to make the following compromise:
25561da177e4SLinus Torvalds  *
25571da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
25581da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
25591da177e4SLinus Torvalds  *	then set the window to 0.
25601da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
25611da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
25621da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
25631da177e4SLinus Torvalds  *
25641da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
25651da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
25661da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
25671da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
25681da177e4SLinus Torvalds  * because the pipeline is full.
25691da177e4SLinus Torvalds  *
25701da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
25711da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
25721da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
25731da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
25741da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
25751da177e4SLinus Torvalds  *
25761da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
25771da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
25781da177e4SLinus Torvalds  *
25791da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
25801da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
25811da177e4SLinus Torvalds  */
25821da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
25831da177e4SLinus Torvalds {
2584463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
25851da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2586caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
25871da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
25881da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
25891da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
25901da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
25911da177e4SLinus Torvalds 	 */
2592463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
25931da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
259486c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
259586c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
25961da177e4SLinus Torvalds 	int window;
25971da177e4SLinus Torvalds 
259806425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
25991da177e4SLinus Torvalds 		mss = full_space;
260006425c30SEric Dumazet 		if (mss <= 0)
260106425c30SEric Dumazet 			return 0;
260206425c30SEric Dumazet 	}
2603b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2604463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
26051da177e4SLinus Torvalds 
2606b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2607056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2608056834d9SIlpo Järvinen 					       4U * tp->advmss);
26091da177e4SLinus Torvalds 
261086c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
261186c1a045SFlorian Westphal 		 * increase it due to wscale.
261286c1a045SFlorian Westphal 		 */
261386c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
261486c1a045SFlorian Westphal 
261586c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
261686c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
261786c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
261886c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
261986c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
262086c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
262186c1a045SFlorian Westphal 		 */
262286c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
26231da177e4SLinus Torvalds 			return 0;
26241da177e4SLinus Torvalds 	}
26251da177e4SLinus Torvalds 
26261da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
26271da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
26281da177e4SLinus Torvalds 
26291da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
26301da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
26311da177e4SLinus Torvalds 	 */
26321da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
26331da177e4SLinus Torvalds 		window = free_space;
26341da177e4SLinus Torvalds 
26351da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
26361da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
26371da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
26381da177e4SLinus Torvalds 		 */
26391935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
26401da177e4SLinus Torvalds 	} else {
26411935299dSGao Feng 		window = tp->rcv_wnd;
26421da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
26431da177e4SLinus Torvalds 		 * Window clamp already applied above.
26441da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
26451da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
26461da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
26471da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
26481da177e4SLinus Torvalds 		 * is too small.
26491da177e4SLinus Torvalds 		 */
26501da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
26511935299dSGao Feng 			window = rounddown(free_space, mss);
265284565070SJohn Heffner 		else if (mss == full_space &&
2653b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
265484565070SJohn Heffner 			window = free_space;
26551da177e4SLinus Torvalds 	}
26561da177e4SLinus Torvalds 
26571da177e4SLinus Torvalds 	return window;
26581da177e4SLinus Torvalds }
26591da177e4SLinus Torvalds 
2660cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2661082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2662082ac2d5SMartin KaFai Lau {
26630a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
26640a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
26650a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2666082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2667082ac2d5SMartin KaFai Lau 
26680a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2669082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
26702de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
26712de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2672082ac2d5SMartin KaFai Lau 	}
2673082ac2d5SMartin KaFai Lau }
2674082ac2d5SMartin KaFai Lau 
26754a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2676f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
26771da177e4SLinus Torvalds {
26781da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
267975c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
2680058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
26811da177e4SLinus Torvalds 
2682058dc334SIlpo Järvinen 	skb_size = skb->len;
2683058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
26841da177e4SLinus Torvalds 
2685058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
26861da177e4SLinus Torvalds 
2687f8071cdeSEric Dumazet 	if (next_skb_size) {
2688f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2689f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2690f8071cdeSEric Dumazet 				      next_skb_size);
2691f8071cdeSEric Dumazet 		else if (!skb_shift(skb, next_skb, next_skb_size))
2692f8071cdeSEric Dumazet 			return false;
2693f8071cdeSEric Dumazet 	}
26946859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2695a6963a6bSIlpo Järvinen 
269652d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
269752d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
26981da177e4SLinus Torvalds 
269984fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
27001da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
27011da177e4SLinus Torvalds 
27021da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
27031da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27041da177e4SLinus Torvalds 
2705e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
27064de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
27071da177e4SLinus Torvalds 
27081da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
27091da177e4SLinus Torvalds 	 * packet counting does not break.
27101da177e4SLinus Torvalds 	 */
27114828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2712a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2713b7689205SIlpo Järvinen 
2714b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2715ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2716ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2717ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2718b7689205SIlpo Järvinen 
2719797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2720797108d1SIlpo Järvinen 
2721082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2722082ac2d5SMartin KaFai Lau 
272375c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2724f8071cdeSEric Dumazet 	return true;
27251da177e4SLinus Torvalds }
27261da177e4SLinus Torvalds 
272767edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2728a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
27294a17fc3aSIlpo Järvinen {
27304a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2731a2a385d6SEric Dumazet 		return false;
27324a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2733a2a385d6SEric Dumazet 		return false;
27342331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
27354a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2736a2a385d6SEric Dumazet 		return false;
27374a17fc3aSIlpo Järvinen 
2738a2a385d6SEric Dumazet 	return true;
27394a17fc3aSIlpo Järvinen }
27404a17fc3aSIlpo Järvinen 
274167edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
274267edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
274367edfef7SAndi Kleen  */
27444a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
27454a17fc3aSIlpo Järvinen 				     int space)
27464a17fc3aSIlpo Järvinen {
27474a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
27484a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2749a2a385d6SEric Dumazet 	bool first = true;
27504a17fc3aSIlpo Järvinen 
27514a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
27524a17fc3aSIlpo Järvinen 		return;
27534de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
27544a17fc3aSIlpo Järvinen 		return;
27554a17fc3aSIlpo Järvinen 
275675c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
27574a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
27584a17fc3aSIlpo Järvinen 			break;
27594a17fc3aSIlpo Järvinen 
2760a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2761a643b5d4SMartin KaFai Lau 			break;
2762a643b5d4SMartin KaFai Lau 
27634a17fc3aSIlpo Järvinen 		space -= skb->len;
27644a17fc3aSIlpo Järvinen 
27654a17fc3aSIlpo Järvinen 		if (first) {
2766a2a385d6SEric Dumazet 			first = false;
27674a17fc3aSIlpo Järvinen 			continue;
27684a17fc3aSIlpo Järvinen 		}
27694a17fc3aSIlpo Järvinen 
27704a17fc3aSIlpo Järvinen 		if (space < 0)
27714a17fc3aSIlpo Järvinen 			break;
27724a17fc3aSIlpo Järvinen 
27734a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
27744a17fc3aSIlpo Järvinen 			break;
27754a17fc3aSIlpo Järvinen 
2776f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2777f8071cdeSEric Dumazet 			break;
27784a17fc3aSIlpo Järvinen 	}
27794a17fc3aSIlpo Järvinen }
27804a17fc3aSIlpo Järvinen 
27811da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
27821da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
27831da177e4SLinus Torvalds  * error occurred which prevented the send.
27841da177e4SLinus Torvalds  */
278510d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
27861da177e4SLinus Torvalds {
27875d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
278810d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
27897d227cd2SSridhar Samudrala 	unsigned int cur_mss;
279010d3be56SEric Dumazet 	int diff, len, err;
27911da177e4SLinus Torvalds 
279210d3be56SEric Dumazet 
279310d3be56SEric Dumazet 	/* Inconclusive MTU probe */
279410d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
27955d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
27965d424d5aSJohn Heffner 
27971da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2798caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
27991da177e4SLinus Torvalds 	 */
280014afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2801ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2802ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
28031da177e4SLinus Torvalds 		return -EAGAIN;
28041da177e4SLinus Torvalds 
28051f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28061f3279aeSEric Dumazet 		return -EBUSY;
28071f3279aeSEric Dumazet 
28081da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
28091da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
28101da177e4SLinus Torvalds 			BUG();
28111da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
28121da177e4SLinus Torvalds 			return -ENOMEM;
28131da177e4SLinus Torvalds 	}
28141da177e4SLinus Torvalds 
28157d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
28167d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
28177d227cd2SSridhar Samudrala 
28180c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
28197d227cd2SSridhar Samudrala 
28201da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
28211da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
28221da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
28231da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
28241da177e4SLinus Torvalds 	 */
28259d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
28269d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
28271da177e4SLinus Torvalds 		return -EAGAIN;
28281da177e4SLinus Torvalds 
282910d3be56SEric Dumazet 	len = cur_mss * segs;
283010d3be56SEric Dumazet 	if (skb->len > len) {
283175c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
283275c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
28331da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
283402276f3cSIlpo Järvinen 	} else {
2835c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2836c52e2421SEric Dumazet 			return -ENOMEM;
283710d3be56SEric Dumazet 
283810d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
283910d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
284010d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
284110d3be56SEric Dumazet 		if (diff)
284210d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
284310d3be56SEric Dumazet 		if (skb->len < cur_mss)
284410d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
28451da177e4SLinus Torvalds 	}
28461da177e4SLinus Torvalds 
284749213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
284849213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
284949213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
285049213555SDaniel Borkmann 
2851678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2852678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2853678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2854678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2855678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2856678550c6SYuchung Cheng 	tp->total_retrans += segs;
2857678550c6SYuchung Cheng 
285850bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
285950bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
286050bceae9SThomas Graf 	 * beyond what csum_start can cover.
286150bceae9SThomas Graf 	 */
286250bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
286350bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
286410a81980SEric Dumazet 		struct sk_buff *nskb;
286510a81980SEric Dumazet 
2866e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
286710a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2868c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2869117632e6SEric Dumazet 				     -ENOBUFS;
2870e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2871e2080072SEric Dumazet 
28728c72c65bSEric Dumazet 		if (!err)
2873e2080072SEric Dumazet 			tcp_update_skb_after_send(tp, skb);
2874117632e6SEric Dumazet 	} else {
2875c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2876117632e6SEric Dumazet 	}
2877c84a5711SYuchung Cheng 
2878fc9f3501SEric Dumazet 	if (likely(!err)) {
2879c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2880*e086101bSCong Wang 		trace_tcp_retransmit_skb(sk, skb);
2881678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
2882678550c6SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2883fc9f3501SEric Dumazet 	}
2884c84a5711SYuchung Cheng 	return err;
288593b174adSYuchung Cheng }
288693b174adSYuchung Cheng 
288710d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
288893b174adSYuchung Cheng {
288993b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
289010d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
28911da177e4SLinus Torvalds 
28921da177e4SLinus Torvalds 	if (err == 0) {
28931da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
28941da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2895e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
28961da177e4SLinus Torvalds 		}
28971da177e4SLinus Torvalds #endif
28981da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
28991da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
29001da177e4SLinus Torvalds 
29011da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
29021da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
29037faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
29041da177e4SLinus Torvalds 
29051da177e4SLinus Torvalds 	}
29066e08d5e3SYuchung Cheng 
29076e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
29086e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
29096e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
29101da177e4SLinus Torvalds 	return err;
29111da177e4SLinus Torvalds }
29121da177e4SLinus Torvalds 
29131da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
29141da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
29151da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
29161da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
29171da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
29181da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
29191da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
29201da177e4SLinus Torvalds  */
29211da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
29221da177e4SLinus Torvalds {
29236687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
292475c119afSEric Dumazet 	struct sk_buff *skb, *rtx_head = NULL, *hole = NULL;
29251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2926840a3cbeSYuchung Cheng 	u32 max_segs;
292761eb55f4SIlpo Järvinen 	int mib_idx;
29286a438bbeSStephen Hemminger 
292945e77d31SIlpo Järvinen 	if (!tp->packets_out)
293045e77d31SIlpo Järvinen 		return;
293145e77d31SIlpo Järvinen 
29326a438bbeSStephen Hemminger 	skb = tp->retransmit_skb_hint;
293375c119afSEric Dumazet 	if (!skb) {
293475c119afSEric Dumazet 		rtx_head = tcp_rtx_queue_head(sk);
293575c119afSEric Dumazet 		skb = rtx_head;
2936618d9f25SIlpo Järvinen 	}
2937ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
293875c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
2939dca0aaf8SEric Dumazet 		__u8 sacked;
294010d3be56SEric Dumazet 		int segs;
29411da177e4SLinus Torvalds 
2942218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2943218af599SEric Dumazet 			break;
2944218af599SEric Dumazet 
29456a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
294651456b29SIan Morris 		if (!hole)
29476a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
29486a438bbeSStephen Hemminger 
294910d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
295010d3be56SEric Dumazet 		if (segs <= 0)
29511da177e4SLinus Torvalds 			return;
2952dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
2953a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
2954a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
2955a3d2e9f8SEric Dumazet 		 */
2956a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
29570e1c54c2SIlpo Järvinen 
2958840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
2959006f582cSIlpo Järvinen 			break;
29600e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
296151456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
29620e1c54c2SIlpo Järvinen 				hole = skb;
296361eb55f4SIlpo Järvinen 			continue;
29641da177e4SLinus Torvalds 
29650e1c54c2SIlpo Järvinen 		} else {
29660e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
29670e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
29680e1c54c2SIlpo Järvinen 			else
29690e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
29700e1c54c2SIlpo Järvinen 		}
29710e1c54c2SIlpo Järvinen 
29720e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
297361eb55f4SIlpo Järvinen 			continue;
297440b215e5SPavel Emelyanov 
2975f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
2976f9616c35SEric Dumazet 			return;
2977f9616c35SEric Dumazet 
297810d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
29791da177e4SLinus Torvalds 			return;
298024ab6becSYuchung Cheng 
2981de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
29821da177e4SLinus Torvalds 
2983684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2984a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2985a262f0cdSNandita Dukkipati 
298675c119afSEric Dumazet 		if (skb == rtx_head &&
298757dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
2988463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
29893f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
29903f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
29911da177e4SLinus Torvalds 	}
29921da177e4SLinus Torvalds }
29931da177e4SLinus Torvalds 
2994d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
2995d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
2996845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
2997845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
2998a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
2999a6c5ea4cSEric Dumazet  * with edge trigger epoll()
3000d83769a5SEric Dumazet  */
3001a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
3002d83769a5SEric Dumazet {
3003e805605cSJohannes Weiner 	int amt;
3004d83769a5SEric Dumazet 
3005d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3006d83769a5SEric Dumazet 		return;
3007d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3008d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3009e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3010e805605cSJohannes Weiner 
3011baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3012baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3013d83769a5SEric Dumazet }
3014d83769a5SEric Dumazet 
3015845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3016845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
30171da177e4SLinus Torvalds  */
30181da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
30191da177e4SLinus Torvalds {
3020845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
30211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30221da177e4SLinus Torvalds 
3023845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3024845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3025845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3026845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
30271da177e4SLinus Torvalds 	 */
302875c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
302975c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
303075c119afSEric Dumazet 
303175c119afSEric Dumazet 	if (tskb) {
3032845704a5SEric Dumazet coalesce:
3033845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3034845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
30351da177e4SLinus Torvalds 		tp->write_seq++;
303675c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3037845704a5SEric Dumazet 			/* This means tskb was already sent.
3038845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3039845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3040845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3041845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3042845704a5SEric Dumazet 			 */
3043845704a5SEric Dumazet 			tp->snd_nxt++;
3044845704a5SEric Dumazet 			return;
3045845704a5SEric Dumazet 		}
30461da177e4SLinus Torvalds 	} else {
3047845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3048845704a5SEric Dumazet 		if (unlikely(!skb)) {
3049845704a5SEric Dumazet 			if (tskb)
3050845704a5SEric Dumazet 				goto coalesce;
3051845704a5SEric Dumazet 			return;
30521da177e4SLinus Torvalds 		}
3053e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3054d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3055a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
30561da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3057e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3058a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
30591da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
30601da177e4SLinus Torvalds 	}
3061845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
30621da177e4SLinus Torvalds }
30631da177e4SLinus Torvalds 
30641da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
30651da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
30661da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
306765bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
30681da177e4SLinus Torvalds  */
3069dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
30701da177e4SLinus Torvalds {
30711da177e4SLinus Torvalds 	struct sk_buff *skb;
30721da177e4SLinus Torvalds 
30737cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
30747cc2b043SGao Feng 
30751da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
30761da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
30771da177e4SLinus Torvalds 	if (!skb) {
30784e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
30791da177e4SLinus Torvalds 		return;
30801da177e4SLinus Torvalds 	}
30811da177e4SLinus Torvalds 
30821da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
30831da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3084e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3085a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
30869a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
30871da177e4SLinus Torvalds 	/* Send it off. */
3088dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
30894e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
30901da177e4SLinus Torvalds }
30911da177e4SLinus Torvalds 
309267edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
309367edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
30941da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
30951da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
30961da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
30971da177e4SLinus Torvalds  */
30981da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
30991da177e4SLinus Torvalds {
31001da177e4SLinus Torvalds 	struct sk_buff *skb;
31011da177e4SLinus Torvalds 
310275c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
310351456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
310475c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
31051da177e4SLinus Torvalds 		return -EFAULT;
31061da177e4SLinus Torvalds 	}
31074de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
31081da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3109e2080072SEric Dumazet 			struct sk_buff *nskb;
3110e2080072SEric Dumazet 
3111e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3112e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3113e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
311451456b29SIan Morris 			if (!nskb)
31151da177e4SLinus Torvalds 				return -ENOMEM;
3116e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
311775c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3118f4a775d1SEric Dumazet 			__skb_header_release(nskb);
311975c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
31203ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
31213ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
31221da177e4SLinus Torvalds 			skb = nskb;
31231da177e4SLinus Torvalds 		}
31241da177e4SLinus Torvalds 
31254de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3126735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
31271da177e4SLinus Torvalds 	}
3128dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
31291da177e4SLinus Torvalds }
31301da177e4SLinus Torvalds 
31314aea39c1SEric Dumazet /**
31324aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
31334aea39c1SEric Dumazet  * sk: listener socket
31344aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
31354aea39c1SEric Dumazet  * req: request_sock pointer
31364aea39c1SEric Dumazet  *
31374aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
31384aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
31394aea39c1SEric Dumazet  */
31405d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3141e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3142ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3143b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
31441da177e4SLinus Torvalds {
31452e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
31465d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
314780f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
31485d062de7SEric Dumazet 	struct tcp_out_options opts;
31495d062de7SEric Dumazet 	struct sk_buff *skb;
3150bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
31515d062de7SEric Dumazet 	struct tcphdr *th;
3152f5fff5dcSTom Quetchenbach 	int mss;
31531da177e4SLinus Torvalds 
3154ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
31554aea39c1SEric Dumazet 	if (unlikely(!skb)) {
31564aea39c1SEric Dumazet 		dst_release(dst);
31571da177e4SLinus Torvalds 		return NULL;
31584aea39c1SEric Dumazet 	}
31591da177e4SLinus Torvalds 	/* Reserve space for headers. */
31601da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
31611da177e4SLinus Torvalds 
3162b3d05147SEric Dumazet 	switch (synack_type) {
3163b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
31649e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3165b3d05147SEric Dumazet 		break;
3166b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3167b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3168b3d05147SEric Dumazet 		 * to avoid false sharing.
3169b3d05147SEric Dumazet 		 */
3170b3d05147SEric Dumazet 		break;
3171b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3172ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3173ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3174ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3175ca6fb065SEric Dumazet 		 */
3176ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3177b3d05147SEric Dumazet 		break;
3178ca6fb065SEric Dumazet 	}
31794aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
31801da177e4SLinus Torvalds 
31813541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3182f5fff5dcSTom Quetchenbach 
318333ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
31848b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
31858b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
31869a568de4SEric Dumazet 		skb->skb_mstamp = cookie_init_timestamp(req);
31878b5f12d0SFlorian Westphal 	else
31888b5f12d0SFlorian Westphal #endif
31899a568de4SEric Dumazet 		skb->skb_mstamp = tcp_clock_us();
319080f03e27SEric Dumazet 
319180f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
319280f03e27SEric Dumazet 	rcu_read_lock();
3193fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
319480f03e27SEric Dumazet #endif
319558d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
319637bfbddaSEric Dumazet 	tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
319737bfbddaSEric Dumazet 			  sizeof(*th);
319833ad798cSAdam Langley 
3199aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3200aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
32011da177e4SLinus Torvalds 
3202ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
32031da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
32041da177e4SLinus Torvalds 	th->syn = 1;
32051da177e4SLinus Torvalds 	th->ack = 1;
32066ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3207b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3208634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3209e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
3210e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
3211e870a8efSIlpo Järvinen 	 * not even correctly set)
3212e870a8efSIlpo Järvinen 	 */
3213e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
3214a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
32154957faadSWilliam Allen Simpson 
32161da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
32178336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
32188336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
32191da177e4SLinus Torvalds 
32201da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3221ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
32225d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
32231da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
322490bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3225cfb6eeb4SYOSHIFUJI Hideaki 
3226cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3227cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
322880f03e27SEric Dumazet 	if (md5)
3229bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
323039f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
323180f03e27SEric Dumazet 	rcu_read_unlock();
3232cfb6eeb4SYOSHIFUJI Hideaki #endif
3233cfb6eeb4SYOSHIFUJI Hideaki 
3234b50edd78SEric Dumazet 	/* Do not fool tcpdump (if any), clean our debris */
32352456e855SThomas Gleixner 	skb->tstamp = 0;
32361da177e4SLinus Torvalds 	return skb;
32371da177e4SLinus Torvalds }
32384bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
32391da177e4SLinus Torvalds 
324081164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
324181164413SDaniel Borkmann {
324281164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
324381164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
324481164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
324581164413SDaniel Borkmann 
324681164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
324781164413SDaniel Borkmann 		return;
324881164413SDaniel Borkmann 
324981164413SDaniel Borkmann 	rcu_read_lock();
325081164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
325181164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
325281164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
325381164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
325481164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
325581164413SDaniel Borkmann 	}
325681164413SDaniel Borkmann 	rcu_read_unlock();
325781164413SDaniel Borkmann }
325881164413SDaniel Borkmann 
325967edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3260f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
32611da177e4SLinus Torvalds {
3262cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
32631da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32641da177e4SLinus Torvalds 	__u8 rcv_wscale;
326513d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
32661da177e4SLinus Torvalds 
32671da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
32681da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
32691da177e4SLinus Torvalds 	 */
32705d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
32715d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
32725d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
32731da177e4SLinus Torvalds 
3274cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
327500db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3276cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3277cfb6eeb4SYOSHIFUJI Hideaki #endif
3278cfb6eeb4SYOSHIFUJI Hideaki 
32791da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
32801da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
32811da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
32821da177e4SLinus Torvalds 	tp->max_window = 0;
32835d424d5aSJohn Heffner 	tcp_mtup_init(sk);
32841da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
32851da177e4SLinus Torvalds 
328681164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
328781164413SDaniel Borkmann 
32881da177e4SLinus Torvalds 	if (!tp->window_clamp)
32891da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
32903541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3291f5fff5dcSTom Quetchenbach 
32921da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
32931da177e4SLinus Torvalds 
3294e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3295e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3296e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3297e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3298e88c64f0SHagen Paul Pfeifer 
329913d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
330013d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
330113d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
330213d3b1ebSLawrence Brakmo 
33031da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
33041da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
33051da177e4SLinus Torvalds 				  &tp->rcv_wnd,
33061da177e4SLinus Torvalds 				  &tp->window_clamp,
33079bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
330831d12926Slaurent chavey 				  &rcv_wscale,
330913d3b1ebSLawrence Brakmo 				  rcv_wnd);
33101da177e4SLinus Torvalds 
33111da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
33121da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
33131da177e4SLinus Torvalds 
33141da177e4SLinus Torvalds 	sk->sk_err = 0;
33151da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
33161da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3317ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
33181da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
33191da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
332033f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3321370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3322ee995283SPavel Emelyanov 
3323ee995283SPavel Emelyanov 	if (likely(!tp->repair))
33241da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3325c7781a6eSAndrew Vagin 	else
332670eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3327ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3328ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
33291da177e4SLinus Torvalds 
33308550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3331463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
33321da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
33331da177e4SLinus Torvalds }
33341da177e4SLinus Torvalds 
3335783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3336783237e8SYuchung Cheng {
3337783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3338783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3339783237e8SYuchung Cheng 
3340783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3341f4a775d1SEric Dumazet 	__skb_header_release(skb);
3342783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3343783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3344783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3345783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3346783237e8SYuchung Cheng }
3347783237e8SYuchung Cheng 
3348783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3349783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3350783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3351783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3352783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3353783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3354783237e8SYuchung Cheng  */
3355783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3356783237e8SYuchung Cheng {
3357783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3358783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3359065263f4SWei Wang 	int space, err = 0;
3360355a901eSEric Dumazet 	struct sk_buff *syn_data;
3361783237e8SYuchung Cheng 
336267da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3363065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3364783237e8SYuchung Cheng 		goto fallback;
3365783237e8SYuchung Cheng 
3366783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3367783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3368783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3369783237e8SYuchung Cheng 	 */
33703541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
33713541f9e8SEric Dumazet 
33721b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3373783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3374783237e8SYuchung Cheng 
3375f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3376f5ddcbbbSEric Dumazet 
3377f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3378f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3379f5ddcbbbSEric Dumazet 
3380eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3381355a901eSEric Dumazet 	if (!syn_data)
3382783237e8SYuchung Cheng 		goto fallback;
3383355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3384355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
338507e100f9SEric Dumazet 	if (space) {
338607e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
338757be5bdaSAl Viro 					    &fo->data->msg_iter);
338857be5bdaSAl Viro 		if (unlikely(!copied)) {
3389355a901eSEric Dumazet 			kfree_skb(syn_data);
3390783237e8SYuchung Cheng 			goto fallback;
3391783237e8SYuchung Cheng 		}
339257be5bdaSAl Viro 		if (copied != space) {
339357be5bdaSAl Viro 			skb_trim(syn_data, copied);
339457be5bdaSAl Viro 			space = copied;
339557be5bdaSAl Viro 		}
339607e100f9SEric Dumazet 	}
3397355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3398355a901eSEric Dumazet 	if (space == fo->size)
3399355a901eSEric Dumazet 		fo->data = NULL;
3400355a901eSEric Dumazet 	fo->copied = space;
3401783237e8SYuchung Cheng 
3402355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
34030f87230dSFrancis Yan 	if (syn_data->len)
34040f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3405355a901eSEric Dumazet 
3406355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3407355a901eSEric Dumazet 
3408355a901eSEric Dumazet 	syn->skb_mstamp = syn_data->skb_mstamp;
3409355a901eSEric Dumazet 
3410355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3411355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3412355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3413355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3414431a9124SEric Dumazet 	 */
3415355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3416355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3417355a901eSEric Dumazet 	if (!err) {
341867da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
341975c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3420f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3421783237e8SYuchung Cheng 		goto done;
3422783237e8SYuchung Cheng 	}
3423783237e8SYuchung Cheng 
342475c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
342575c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3426b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3427b5b7db8dSEric Dumazet 
3428783237e8SYuchung Cheng fallback:
3429783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3430783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3431783237e8SYuchung Cheng 		fo->cookie.len = 0;
3432783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3433783237e8SYuchung Cheng 	if (err)
3434783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3435783237e8SYuchung Cheng done:
3436783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3437783237e8SYuchung Cheng 	return err;
3438783237e8SYuchung Cheng }
3439783237e8SYuchung Cheng 
344067edfef7SAndi Kleen /* Build a SYN and send it off. */
34411da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
34421da177e4SLinus Torvalds {
34431da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34441da177e4SLinus Torvalds 	struct sk_buff *buff;
3445ee586811SEric Paris 	int err;
34461da177e4SLinus Torvalds 
34479872a4bdSLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
34488ba60924SEric Dumazet 
34498ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
34508ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
34518ba60924SEric Dumazet 
34521da177e4SLinus Torvalds 	tcp_connect_init(sk);
34531da177e4SLinus Torvalds 
34542b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
34552b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
34562b916477SAndrey Vagin 		return 0;
34572b916477SAndrey Vagin 	}
34582b916477SAndrey Vagin 
3459eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3460355a901eSEric Dumazet 	if (unlikely(!buff))
34611da177e4SLinus Torvalds 		return -ENOBUFS;
34621da177e4SLinus Torvalds 
3463a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
34649a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
34659a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3466783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3467735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
346875c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
34691da177e4SLinus Torvalds 
3470783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3471783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3472783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3473ee586811SEric Paris 	if (err == -ECONNREFUSED)
3474ee586811SEric Paris 		return err;
3475bd37a088SWei Yongjun 
3476bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3477bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3478bd37a088SWei Yongjun 	 */
3479bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3480bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3481b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3482b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3483b5b7db8dSEric Dumazet 		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
3484b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3485b5b7db8dSEric Dumazet 	}
348681cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
34871da177e4SLinus Torvalds 
34881da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
34893f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
34903f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
34911da177e4SLinus Torvalds 	return 0;
34921da177e4SLinus Torvalds }
34934bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
34941da177e4SLinus Torvalds 
34951da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
34961da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
34971da177e4SLinus Torvalds  * for details.
34981da177e4SLinus Torvalds  */
34991da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
35001da177e4SLinus Torvalds {
3501463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3502463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
35031da177e4SLinus Torvalds 	unsigned long timeout;
35041da177e4SLinus Torvalds 
35059890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
35069890092eSFlorian Westphal 
35071da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3508463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
35091da177e4SLinus Torvalds 		int max_ato = HZ / 2;
35101da177e4SLinus Torvalds 
3511056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3512056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
35131da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
35141da177e4SLinus Torvalds 
35151da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
35161da177e4SLinus Torvalds 
35171da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3518463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
35191da177e4SLinus Torvalds 		 * directly.
35201da177e4SLinus Torvalds 		 */
3521740b0f18SEric Dumazet 		if (tp->srtt_us) {
3522740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3523740b0f18SEric Dumazet 					TCP_DELACK_MIN);
35241da177e4SLinus Torvalds 
35251da177e4SLinus Torvalds 			if (rtt < max_ato)
35261da177e4SLinus Torvalds 				max_ato = rtt;
35271da177e4SLinus Torvalds 		}
35281da177e4SLinus Torvalds 
35291da177e4SLinus Torvalds 		ato = min(ato, max_ato);
35301da177e4SLinus Torvalds 	}
35311da177e4SLinus Torvalds 
35321da177e4SLinus Torvalds 	/* Stay within the limit we were given */
35331da177e4SLinus Torvalds 	timeout = jiffies + ato;
35341da177e4SLinus Torvalds 
35351da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3536463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
35371da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
35381da177e4SLinus Torvalds 		 * send ACK now.
35391da177e4SLinus Torvalds 		 */
3540463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3541463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
35421da177e4SLinus Torvalds 			tcp_send_ack(sk);
35431da177e4SLinus Torvalds 			return;
35441da177e4SLinus Torvalds 		}
35451da177e4SLinus Torvalds 
3546463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3547463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
35481da177e4SLinus Torvalds 	}
3549463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3550463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3551463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
35521da177e4SLinus Torvalds }
35531da177e4SLinus Torvalds 
35541da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
35551da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
35561da177e4SLinus Torvalds {
35571da177e4SLinus Torvalds 	struct sk_buff *buff;
35581da177e4SLinus Torvalds 
3559058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3560058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3561058dc334SIlpo Järvinen 		return;
3562058dc334SIlpo Järvinen 
35639890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
35649890092eSFlorian Westphal 
35651da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
35661da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
35671da177e4SLinus Torvalds 	 * sock.
35681da177e4SLinus Torvalds 	 */
35697450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
35707450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
35717450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3572463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3573463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
35743f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
35753f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
35761da177e4SLinus Torvalds 		return;
35771da177e4SLinus Torvalds 	}
35781da177e4SLinus Torvalds 
35791da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
35801da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3581a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
35821da177e4SLinus Torvalds 
358398781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
358498781965SEric Dumazet 	 * too much.
358598781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
358698781965SEric Dumazet 	 */
358798781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
358898781965SEric Dumazet 
35891da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
35907450aaf6SEric Dumazet 	tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
35911da177e4SLinus Torvalds }
3592e3118e83SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_send_ack);
35931da177e4SLinus Torvalds 
35941da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
35951da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
35961da177e4SLinus Torvalds  *
35971da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
35981da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
35991da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
36001da177e4SLinus Torvalds  *
36011da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
36021da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
36031da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
36041da177e4SLinus Torvalds  */
3605e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
36061da177e4SLinus Torvalds {
36071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36081da177e4SLinus Torvalds 	struct sk_buff *skb;
36091da177e4SLinus Torvalds 
36101da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
36117450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
36127450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
361351456b29SIan Morris 	if (!skb)
36141da177e4SLinus Torvalds 		return -1;
36151da177e4SLinus Torvalds 
36161da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
36171da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36181da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
36191da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
36201da177e4SLinus Torvalds 	 * send it.
36211da177e4SLinus Torvalds 	 */
3622a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3623e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
36247450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
36251da177e4SLinus Torvalds }
36261da177e4SLinus Torvalds 
3627385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3628ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3629ee995283SPavel Emelyanov {
3630ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3631ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
36329a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3633e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3634ee995283SPavel Emelyanov 	}
3635ee995283SPavel Emelyanov }
3636ee995283SPavel Emelyanov 
363767edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3638e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
36391da177e4SLinus Torvalds {
36401da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36411da177e4SLinus Torvalds 	struct sk_buff *skb;
36421da177e4SLinus Torvalds 
3643058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3644058dc334SIlpo Järvinen 		return -1;
3645058dc334SIlpo Järvinen 
364600db4124SIan Morris 	skb = tcp_send_head(sk);
364700db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
36481da177e4SLinus Torvalds 		int err;
36490c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
365090840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
36511da177e4SLinus Torvalds 
36521da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
36531da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
36541da177e4SLinus Torvalds 
36551da177e4SLinus Torvalds 		/* We are probing the opening of a window
36561da177e4SLinus Torvalds 		 * but the window size is != 0
36571da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
36581da177e4SLinus Torvalds 		 */
36591da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
36601da177e4SLinus Torvalds 		    skb->len > mss) {
36611da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
36624de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
366375c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
366475c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
36651da177e4SLinus Torvalds 				return -1;
36661da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
36675bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
36681da177e4SLinus Torvalds 
36694de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3670dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
367166f5fe62SIlpo Järvinen 		if (!err)
367266f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
36731da177e4SLinus Torvalds 		return err;
36741da177e4SLinus Torvalds 	} else {
367533f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3676e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3677e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
36781da177e4SLinus Torvalds 	}
36791da177e4SLinus Torvalds }
36801da177e4SLinus Torvalds 
36811da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
36821da177e4SLinus Torvalds  * a partial packet else a zero probe.
36831da177e4SLinus Torvalds  */
36841da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
36851da177e4SLinus Torvalds {
3686463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
36871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3688c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3689fcdd1cf4SEric Dumazet 	unsigned long probe_max;
36901da177e4SLinus Torvalds 	int err;
36911da177e4SLinus Torvalds 
3692e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
36931da177e4SLinus Torvalds 
369475c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
36951da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
36966687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3697463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
36981da177e4SLinus Torvalds 		return;
36991da177e4SLinus Torvalds 	}
37001da177e4SLinus Torvalds 
37011da177e4SLinus Torvalds 	if (err <= 0) {
3702c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3703463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
37046687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3705fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
37061da177e4SLinus Torvalds 	} else {
37071da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
37086687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
37091da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
37101da177e4SLinus Torvalds 		 *
37111da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
37121da177e4SLinus Torvalds 		 */
37136687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
37146687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3715fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
37161da177e4SLinus Torvalds 	}
3717fcdd1cf4SEric Dumazet 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
371821c8fe99SEric Dumazet 				  tcp_probe0_when(sk, probe_max),
3719fcdd1cf4SEric Dumazet 				  TCP_RTO_MAX);
37201da177e4SLinus Torvalds }
37215db92c99SOctavian Purdila 
3722ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
37235db92c99SOctavian Purdila {
37245db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
37255db92c99SOctavian Purdila 	struct flowi fl;
37265db92c99SOctavian Purdila 	int res;
37275db92c99SOctavian Purdila 
372858d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3729b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
37305db92c99SOctavian Purdila 	if (!res) {
373190bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
373202a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
37337e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
37347e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
37355db92c99SOctavian Purdila 	}
37365db92c99SOctavian Purdila 	return res;
37375db92c99SOctavian Purdila }
37385db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3739