xref: /linux/net/ipv4/tcp_output.c (revision 75c119afe14f74b4dd967d75ed9f57ab6c0ef045)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
53c39c4c6aSWei Liu /* Default TSQ limit of four TSO segments */
54c39c4c6aSWei Liu int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
6235089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
63ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6435089bb2SDavid S. Miller 
6546d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
6646d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
67519855c5SWilliam Allen Simpson 
6867edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
69*75c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
706ff03ac3SIlpo Järvinen {
716ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
726ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7366f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
749e412ba7SIlpo Järvinen 
751da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
768512430eSIlpo Järvinen 
77*75c119afSEric Dumazet 	__skb_unlink(skb, &sk->sk_write_queue);
78*75c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
79*75c119afSEric Dumazet 
8066f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
81bec41a11SYuchung Cheng 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
82750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
83f19c29e3SYuchung Cheng 
84f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
85f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
866a5dc9e5SEric Dumazet }
871da177e4SLinus Torvalds 
88a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
89a4ecb15aSCui, Cheng  * window scaling factor due to loss of precision.
901da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
911da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
921da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
931da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
941da177e4SLinus Torvalds  */
95cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
961da177e4SLinus Torvalds {
97cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
989e412ba7SIlpo Järvinen 
99a4ecb15aSCui, Cheng 	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
100a4ecb15aSCui, Cheng 	    (tp->rx_opt.wscale_ok &&
101a4ecb15aSCui, Cheng 	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
1021da177e4SLinus Torvalds 		return tp->snd_nxt;
1031da177e4SLinus Torvalds 	else
10490840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1051da177e4SLinus Torvalds }
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1081da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1091da177e4SLinus Torvalds  *
1101da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1111da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1121da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1131da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1141da177e4SLinus Torvalds  *    large MSS.
1151da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1161da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1171da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1181da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1191da177e4SLinus Torvalds  *    probably even Jumbo".
1201da177e4SLinus Torvalds  */
1211da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1221da177e4SLinus Torvalds {
1231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
124cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1251da177e4SLinus Torvalds 	int mss = tp->advmss;
1261da177e4SLinus Torvalds 
1270dbaee3bSDavid S. Miller 	if (dst) {
1280dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1290dbaee3bSDavid S. Miller 
1300dbaee3bSDavid S. Miller 		if (metric < mss) {
1310dbaee3bSDavid S. Miller 			mss = metric;
1321da177e4SLinus Torvalds 			tp->advmss = mss;
1331da177e4SLinus Torvalds 		}
1340dbaee3bSDavid S. Miller 	}
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds 	return (__u16)mss;
1371da177e4SLinus Torvalds }
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1406f021c62SEric Dumazet  * This is the first part of cwnd validation mechanism.
1416f021c62SEric Dumazet  */
1426f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta)
1431da177e4SLinus Torvalds {
144463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1456f021c62SEric Dumazet 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
1461da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1471da177e4SLinus Torvalds 
1486687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1491da177e4SLinus Torvalds 
1506687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1511da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1521da177e4SLinus Torvalds 
153463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1541da177e4SLinus Torvalds 		cwnd >>= 1;
1551da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
156c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
1571da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1581da177e4SLinus Torvalds }
1591da177e4SLinus Torvalds 
16067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
162cf533ea5SEric Dumazet 				struct sock *sk)
1631da177e4SLinus Torvalds {
164463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
165d635fbe2SEric Dumazet 	const u32 now = tcp_jiffies32;
1661da177e4SLinus Torvalds 
16705c5a46dSNeal Cardwell 	if (tcp_packets_in_flight(tp) == 0)
16805c5a46dSNeal Cardwell 		tcp_ca_event(sk, CA_EVENT_TX_START);
16905c5a46dSNeal Cardwell 
1701da177e4SLinus Torvalds 	tp->lsndtime = now;
1711da177e4SLinus Torvalds 
1721da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1731da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1741da177e4SLinus Torvalds 	 */
1752251ae46SJon Maxwell 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
176463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
17967edfef7SAndi Kleen /* Account for an ACK we sent. */
18040efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1811da177e4SLinus Torvalds {
182463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
183463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1841da177e4SLinus Torvalds }
1851da177e4SLinus Torvalds 
18685f16525SYuchung Cheng 
18785f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
18885f16525SYuchung Cheng {
18985f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1909ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
19185f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
19285f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19385f16525SYuchung Cheng 	 */
19485f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
19585f16525SYuchung Cheng 
19685f16525SYuchung Cheng 	if (mss > 1460)
19785f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
19885f16525SYuchung Cheng 	return init_rwnd;
19985f16525SYuchung Cheng }
20085f16525SYuchung Cheng 
2011da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2021da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2031da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2041da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2051da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2061da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2071da177e4SLinus Torvalds  */
2081da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2091da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
21031d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21131d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2121da177e4SLinus Torvalds {
2131da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2141da177e4SLinus Torvalds 
2151da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2161da177e4SLinus Torvalds 	if (*window_clamp == 0)
217589c49cbSGao Feng 		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
2181da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2211da177e4SLinus Torvalds 	if (space > mss)
222589c49cbSGao Feng 		space = rounddown(space, mss);
2231da177e4SLinus Torvalds 
2241da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
22515d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
22615d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
22715d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
22815d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
22915d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23015d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2311da177e4SLinus Torvalds 	 */
23215d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2331da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
23415d99e02SRick Jones 	else
23515d99e02SRick Jones 		(*rcv_wnd) = space;
23615d99e02SRick Jones 
2371da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2381da177e4SLinus Torvalds 	if (wscale_ok) {
239589c49cbSGao Feng 		/* Set window scaling on max possible window */
240f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_tcp_rmem[2]);
241f626300aSSoheil Hassas Yeganeh 		space = max_t(u32, space, sysctl_rmem_max);
242316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
243589c49cbSGao Feng 		while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) {
2441da177e4SLinus Torvalds 			space >>= 1;
2451da177e4SLinus Torvalds 			(*rcv_wscale)++;
2461da177e4SLinus Torvalds 		}
2471da177e4SLinus Torvalds 	}
2481da177e4SLinus Torvalds 
2491da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
25085f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
25185f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
252b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2531da177e4SLinus Torvalds 	}
2541da177e4SLinus Torvalds 
2551da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
256589c49cbSGao Feng 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
2571da177e4SLinus Torvalds }
2584bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2591da177e4SLinus Torvalds 
2601da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2611da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2621da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2631da177e4SLinus Torvalds  * frame.
2641da177e4SLinus Torvalds  */
26540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2661da177e4SLinus Torvalds {
2671da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2688e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2691da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2701da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds 	/* Never shrink the offered window */
2731da177e4SLinus Torvalds 	if (new_win < cur_win) {
2741da177e4SLinus Torvalds 		/* Danger Will Robinson!
2751da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2761da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2771da177e4SLinus Torvalds 		 * window in time.  --DaveM
2781da177e4SLinus Torvalds 		 *
2791da177e4SLinus Torvalds 		 * Relax Will Robinson.
2801da177e4SLinus Torvalds 		 */
2818e165e20SFlorian Westphal 		if (new_win == 0)
2828e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2838e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
284607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2851da177e4SLinus Torvalds 	}
2861da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2871da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2881da177e4SLinus Torvalds 
2891da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2901da177e4SLinus Torvalds 	 * scaled window.
2911da177e4SLinus Torvalds 	 */
29215d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2931da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2941da177e4SLinus Torvalds 	else
2951da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2961da177e4SLinus Torvalds 
2971da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2981da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2991da177e4SLinus Torvalds 
30031770e34SFlorian Westphal 	/* If we advertise zero window, disable fast path. */
3018e165e20SFlorian Westphal 	if (new_win == 0) {
30231770e34SFlorian Westphal 		tp->pred_flags = 0;
3038e165e20SFlorian Westphal 		if (old_win)
3048e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
3058e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3068e165e20SFlorian Westphal 	} else if (old_win == 0) {
3078e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3088e165e20SFlorian Westphal 	}
3091da177e4SLinus Torvalds 
3101da177e4SLinus Torvalds 	return new_win;
3111da177e4SLinus Torvalds }
3121da177e4SLinus Torvalds 
31367edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
314735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
315bdf1ee5dSIlpo Järvinen {
31630e502a3SDaniel Borkmann 	const struct tcp_sock *tp = tcp_sk(sk);
31730e502a3SDaniel Borkmann 
3184de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
319bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3204de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
32191b5b21cSLawrence Brakmo 	else if (tcp_ca_needs_ecn(sk) ||
32291b5b21cSLawrence Brakmo 		 tcp_bpf_ca_needs_ecn(sk))
32330e502a3SDaniel Borkmann 		INET_ECN_xmit(sk);
324bdf1ee5dSIlpo Järvinen }
325bdf1ee5dSIlpo Järvinen 
32667edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
327735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
328bdf1ee5dSIlpo Järvinen {
329bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
33091b5b21cSLawrence Brakmo 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
331f7b3bec6SFlorian Westphal 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
33291b5b21cSLawrence Brakmo 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
333f7b3bec6SFlorian Westphal 
334f7b3bec6SFlorian Westphal 	if (!use_ecn) {
335f7b3bec6SFlorian Westphal 		const struct dst_entry *dst = __sk_dst_get(sk);
336f7b3bec6SFlorian Westphal 
337f7b3bec6SFlorian Westphal 		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
338f7b3bec6SFlorian Westphal 			use_ecn = true;
339f7b3bec6SFlorian Westphal 	}
340bdf1ee5dSIlpo Järvinen 
341bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
342f7b3bec6SFlorian Westphal 
343f7b3bec6SFlorian Westphal 	if (use_ecn) {
3444de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
345bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
34691b5b21cSLawrence Brakmo 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
34730e502a3SDaniel Borkmann 			INET_ECN_xmit(sk);
348bdf1ee5dSIlpo Järvinen 	}
349bdf1ee5dSIlpo Järvinen }
350bdf1ee5dSIlpo Järvinen 
35149213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
35249213555SDaniel Borkmann {
35349213555SDaniel Borkmann 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
35449213555SDaniel Borkmann 		/* tp->ecn_flags are cleared at a later point in time when
35549213555SDaniel Borkmann 		 * SYN ACK is ultimatively being received.
35649213555SDaniel Borkmann 		 */
35749213555SDaniel Borkmann 		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
35849213555SDaniel Borkmann }
35949213555SDaniel Borkmann 
360735d3831SFlorian Westphal static void
3616ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
362bdf1ee5dSIlpo Järvinen {
3636ac705b1SEric Dumazet 	if (inet_rsk(req)->ecn_ok)
364bdf1ee5dSIlpo Järvinen 		th->ece = 1;
365bdf1ee5dSIlpo Järvinen }
366bdf1ee5dSIlpo Järvinen 
36767edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
36867edfef7SAndi Kleen  * be sent.
36967edfef7SAndi Kleen  */
370735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
371ea1627c2SEric Dumazet 			 struct tcphdr *th, int tcp_header_len)
372bdf1ee5dSIlpo Järvinen {
373bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
374bdf1ee5dSIlpo Järvinen 
375bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
376bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
377bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
378bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
379bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
380bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
381bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
382ea1627c2SEric Dumazet 				th->cwr = 1;
383bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
384bdf1ee5dSIlpo Järvinen 			}
38530e502a3SDaniel Borkmann 		} else if (!tcp_ca_needs_ecn(sk)) {
386bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
387bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
388bdf1ee5dSIlpo Järvinen 		}
389bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
390ea1627c2SEric Dumazet 			th->ece = 1;
391bdf1ee5dSIlpo Järvinen 	}
392bdf1ee5dSIlpo Järvinen }
393bdf1ee5dSIlpo Järvinen 
394e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
395e870a8efSIlpo Järvinen  * auto increment end seqno.
396e870a8efSIlpo Järvinen  */
397e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
398e870a8efSIlpo Järvinen {
3992e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
400e870a8efSIlpo Järvinen 	skb->csum = 0;
401e870a8efSIlpo Järvinen 
4024de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
403e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
404e870a8efSIlpo Järvinen 
405cd7d8498SEric Dumazet 	tcp_skb_pcount_set(skb, 1);
406e870a8efSIlpo Järvinen 
407e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
408a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
409e870a8efSIlpo Järvinen 		seq++;
410e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
411e870a8efSIlpo Järvinen }
412e870a8efSIlpo Järvinen 
413a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
41433f5f57eSIlpo Järvinen {
41533f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
41633f5f57eSIlpo Järvinen }
41733f5f57eSIlpo Järvinen 
41833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
41933ad798cSAdam Langley #define OPTION_TS		(1 << 1)
42033ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
42189e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4222100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
42333ad798cSAdam Langley 
42433ad798cSAdam Langley struct tcp_out_options {
4252100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4262100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
42733ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
42833ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
429bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
430bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4312100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4322100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
43333ad798cSAdam Langley };
43433ad798cSAdam Langley 
43567edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
43667edfef7SAndi Kleen  *
43767edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
438fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
439fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4408e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
441fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
442fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
443fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
444fd6149d3SIlpo Järvinen  *
445fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
446fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
447fd6149d3SIlpo Järvinen  */
44833ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
449bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
450bd0388aeSWilliam Allen Simpson {
4512100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
452bd0388aeSWilliam Allen Simpson 
453bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4541a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4551a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
456bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
457bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
45833ad798cSAdam Langley 		ptr += 4;
45933ad798cSAdam Langley 	}
46033ad798cSAdam Langley 
461fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
462fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
463fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
464fd6149d3SIlpo Järvinen 			       opts->mss);
465fd6149d3SIlpo Järvinen 	}
466fd6149d3SIlpo Järvinen 
467bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
468bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
46933ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
47033ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
47133ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
47233ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
473bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
47433ad798cSAdam Langley 		} else {
475496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
47640efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
47740efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
47840efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
47940efc6faSStephen Hemminger 		}
48033ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
48133ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
48233ad798cSAdam Langley 	}
48333ad798cSAdam Langley 
484bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
48533ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
48633ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
48733ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
48833ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
48933ad798cSAdam Langley 	}
49033ad798cSAdam Langley 
491bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
49233ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
49333ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
49433ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
49533ad798cSAdam Langley 			       opts->ws);
49633ad798cSAdam Langley 	}
49733ad798cSAdam Langley 
49833ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
49933ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
50033ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
50140efc6faSStephen Hemminger 		int this_sack;
50240efc6faSStephen Hemminger 
50340efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
50440efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
50540efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
50633ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
50740efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5082de979bdSStephen Hemminger 
50933ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
51033ad798cSAdam Langley 		     ++this_sack) {
51140efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
51240efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
51340efc6faSStephen Hemminger 		}
5142de979bdSStephen Hemminger 
51540efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
51640efc6faSStephen Hemminger 	}
5172100c8d2SYuchung Cheng 
5182100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5192100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5207f9b838bSDaniel Lee 		u8 *p = (u8 *)ptr;
5217f9b838bSDaniel Lee 		u32 len; /* Fast Open option length */
5222100c8d2SYuchung Cheng 
5237f9b838bSDaniel Lee 		if (foc->exp) {
5247f9b838bSDaniel Lee 			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
5257f9b838bSDaniel Lee 			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
5262100c8d2SYuchung Cheng 				     TCPOPT_FASTOPEN_MAGIC);
5277f9b838bSDaniel Lee 			p += TCPOLEN_EXP_FASTOPEN_BASE;
5287f9b838bSDaniel Lee 		} else {
5297f9b838bSDaniel Lee 			len = TCPOLEN_FASTOPEN_BASE + foc->len;
5307f9b838bSDaniel Lee 			*p++ = TCPOPT_FASTOPEN;
5317f9b838bSDaniel Lee 			*p++ = len;
5322100c8d2SYuchung Cheng 		}
5337f9b838bSDaniel Lee 
5347f9b838bSDaniel Lee 		memcpy(p, foc->val, foc->len);
5357f9b838bSDaniel Lee 		if ((len & 3) == 2) {
5367f9b838bSDaniel Lee 			p[foc->len] = TCPOPT_NOP;
5377f9b838bSDaniel Lee 			p[foc->len + 1] = TCPOPT_NOP;
5387f9b838bSDaniel Lee 		}
5397f9b838bSDaniel Lee 		ptr += (len + 3) >> 2;
5402100c8d2SYuchung Cheng 	}
54140efc6faSStephen Hemminger }
54240efc6faSStephen Hemminger 
54367edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
54467edfef7SAndi Kleen  * network wire format yet.
54567edfef7SAndi Kleen  */
54695c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
54733ad798cSAdam Langley 				struct tcp_out_options *opts,
548cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
549cf533ea5SEric Dumazet {
55033ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
55195c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
552783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
55333ad798cSAdam Langley 
554cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
55533ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
55633ad798cSAdam Langley 	if (*md5) {
55733ad798cSAdam Langley 		opts->options |= OPTION_MD5;
558bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
559cfb6eeb4SYOSHIFUJI Hideaki 	}
56033ad798cSAdam Langley #else
56133ad798cSAdam Langley 	*md5 = NULL;
562cfb6eeb4SYOSHIFUJI Hideaki #endif
56333ad798cSAdam Langley 
56433ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
56533ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
56633ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
56733ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
56833ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
56933ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
57033ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
57133ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
57233ad798cSAdam Langley 	 * going out.  */
57333ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
574bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
57533ad798cSAdam Langley 
5765d2ed052SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
57733ad798cSAdam Langley 		opts->options |= OPTION_TS;
5787faee5c0SEric Dumazet 		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
57933ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
580bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
58133ad798cSAdam Langley 	}
5829bb37ef0SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
58333ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
58489e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
585bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
58633ad798cSAdam Langley 	}
587f9301034SEric Dumazet 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
58833ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
589b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
590bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
59133ad798cSAdam Langley 	}
59233ad798cSAdam Langley 
593783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
5942646c831SDaniel Lee 		u32 need = fastopen->cookie.len;
5952646c831SDaniel Lee 
5962646c831SDaniel Lee 		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
5972646c831SDaniel Lee 					       TCPOLEN_FASTOPEN_BASE;
598783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
599783237e8SYuchung Cheng 		if (remaining >= need) {
600783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
601783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
602783237e8SYuchung Cheng 			remaining -= need;
603783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
6042646c831SDaniel Lee 			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
605783237e8SYuchung Cheng 		}
606783237e8SYuchung Cheng 	}
607bd0388aeSWilliam Allen Simpson 
608bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
60933ad798cSAdam Langley }
61033ad798cSAdam Langley 
61167edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
61237bfbddaSEric Dumazet static unsigned int tcp_synack_options(struct request_sock *req,
61395c96174SEric Dumazet 				       unsigned int mss, struct sk_buff *skb,
61433ad798cSAdam Langley 				       struct tcp_out_options *opts,
61580f03e27SEric Dumazet 				       const struct tcp_md5sig_key *md5,
6168336886fSJerry Chu 				       struct tcp_fastopen_cookie *foc)
6174957faadSWilliam Allen Simpson {
61833ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
61995c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
62033ad798cSAdam Langley 
62133ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
62280f03e27SEric Dumazet 	if (md5) {
62333ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6244957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6254957faadSWilliam Allen Simpson 
6264957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6274957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6284957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6294957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6304957faadSWilliam Allen Simpson 		 */
631de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
63233ad798cSAdam Langley 	}
63333ad798cSAdam Langley #endif
63433ad798cSAdam Langley 
6354957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
63633ad798cSAdam Langley 	opts->mss = mss;
6374957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
63833ad798cSAdam Langley 
63933ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
64033ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
64189e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6424957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
64333ad798cSAdam Langley 	}
644de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
64533ad798cSAdam Langley 		opts->options |= OPTION_TS;
64695a22caeSFlorian Westphal 		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
64733ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6484957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
64933ad798cSAdam Langley 	}
65033ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
65133ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
652de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6534957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
65433ad798cSAdam Langley 	}
6557f9b838bSDaniel Lee 	if (foc != NULL && foc->len >= 0) {
6567f9b838bSDaniel Lee 		u32 need = foc->len;
6577f9b838bSDaniel Lee 
6587f9b838bSDaniel Lee 		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
6597f9b838bSDaniel Lee 				   TCPOLEN_FASTOPEN_BASE;
6608336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6618336886fSJerry Chu 		if (remaining >= need) {
6628336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6638336886fSJerry Chu 			opts->fastopen_cookie = foc;
6648336886fSJerry Chu 			remaining -= need;
6658336886fSJerry Chu 		}
6668336886fSJerry Chu 	}
6674957faadSWilliam Allen Simpson 
6684957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
66933ad798cSAdam Langley }
67033ad798cSAdam Langley 
67167edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
67267edfef7SAndi Kleen  * final wire format yet.
67367edfef7SAndi Kleen  */
67495c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
67533ad798cSAdam Langley 					struct tcp_out_options *opts,
676cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
677cf533ea5SEric Dumazet {
67833ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
67995c96174SEric Dumazet 	unsigned int size = 0;
680cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
68133ad798cSAdam Langley 
6825843ef42SAndi Kleen 	opts->options = 0;
6835843ef42SAndi Kleen 
68433ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
68533ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
68633ad798cSAdam Langley 	if (unlikely(*md5)) {
68733ad798cSAdam Langley 		opts->options |= OPTION_MD5;
68833ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
68933ad798cSAdam Langley 	}
69033ad798cSAdam Langley #else
69133ad798cSAdam Langley 	*md5 = NULL;
69233ad798cSAdam Langley #endif
69333ad798cSAdam Langley 
69433ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
69533ad798cSAdam Langley 		opts->options |= OPTION_TS;
6967faee5c0SEric Dumazet 		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
69733ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
69833ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
69933ad798cSAdam Langley 	}
70033ad798cSAdam Langley 
701cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
702cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
70395c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
70433ad798cSAdam Langley 		opts->num_sack_blocks =
70595c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
70633ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
70733ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
70833ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
70933ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
71033ad798cSAdam Langley 	}
71133ad798cSAdam Langley 
71233ad798cSAdam Langley 	return size;
71340efc6faSStephen Hemminger }
7141da177e4SLinus Torvalds 
71546d3ceabSEric Dumazet 
71646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
71746d3ceabSEric Dumazet  *
71846d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
71946d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
72046d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
72146d3ceabSEric Dumazet  *
72246d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
72346d3ceabSEric Dumazet  * needs to be reallocated in a driver.
7248e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
72546d3ceabSEric Dumazet  *
72646d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
72746d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
72846d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
72946d3ceabSEric Dumazet  */
73046d3ceabSEric Dumazet struct tsq_tasklet {
73146d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
73246d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
73346d3ceabSEric Dumazet };
73446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
73546d3ceabSEric Dumazet 
7366f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
7376f458dfbSEric Dumazet {
7386f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7396f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
740f9616c35SEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
741f9616c35SEric Dumazet 		struct tcp_sock *tp = tcp_sk(sk);
742f9616c35SEric Dumazet 
743f9616c35SEric Dumazet 		if (tp->lost_out > tp->retrans_out &&
744f9616c35SEric Dumazet 		    tp->snd_cwnd > tcp_packets_in_flight(tp))
745f9616c35SEric Dumazet 			tcp_xmit_retransmit_queue(sk);
746f9616c35SEric Dumazet 
747f9616c35SEric Dumazet 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
748bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
7496f458dfbSEric Dumazet 	}
750f9616c35SEric Dumazet }
75146d3ceabSEric Dumazet /*
7528e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
75346d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
7548e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
75546d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
75646d3ceabSEric Dumazet  */
75746d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
75846d3ceabSEric Dumazet {
75946d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
76046d3ceabSEric Dumazet 	LIST_HEAD(list);
76146d3ceabSEric Dumazet 	unsigned long flags;
76246d3ceabSEric Dumazet 	struct list_head *q, *n;
76346d3ceabSEric Dumazet 	struct tcp_sock *tp;
76446d3ceabSEric Dumazet 	struct sock *sk;
76546d3ceabSEric Dumazet 
76646d3ceabSEric Dumazet 	local_irq_save(flags);
76746d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
76846d3ceabSEric Dumazet 	local_irq_restore(flags);
76946d3ceabSEric Dumazet 
77046d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
77146d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
77246d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
77346d3ceabSEric Dumazet 
77446d3ceabSEric Dumazet 		sk = (struct sock *)tp;
7750a9648f1SEric Dumazet 		smp_mb__before_atomic();
7767aa5470cSEric Dumazet 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
7777aa5470cSEric Dumazet 
778b223feb9SEric Dumazet 		if (!sk->sk_lock.owned &&
7797aa5470cSEric Dumazet 		    test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
78046d3ceabSEric Dumazet 			bh_lock_sock(sk);
78146d3ceabSEric Dumazet 			if (!sock_owned_by_user(sk)) {
7827aa5470cSEric Dumazet 				clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
7836f458dfbSEric Dumazet 				tcp_tsq_handler(sk);
78446d3ceabSEric Dumazet 			}
78546d3ceabSEric Dumazet 			bh_unlock_sock(sk);
786b223feb9SEric Dumazet 		}
78746d3ceabSEric Dumazet 
78846d3ceabSEric Dumazet 		sk_free(sk);
78946d3ceabSEric Dumazet 	}
79046d3ceabSEric Dumazet }
79146d3ceabSEric Dumazet 
79240fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
79340fc3423SEric Dumazet 			  TCPF_WRITE_TIMER_DEFERRED |	\
79440fc3423SEric Dumazet 			  TCPF_DELACK_TIMER_DEFERRED |	\
79540fc3423SEric Dumazet 			  TCPF_MTU_REDUCED_DEFERRED)
79646d3ceabSEric Dumazet /**
79746d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
79846d3ceabSEric Dumazet  * @sk: socket
79946d3ceabSEric Dumazet  *
80046d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
80146d3ceabSEric Dumazet  * actions before socket release.
80246d3ceabSEric Dumazet  */
80346d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
80446d3ceabSEric Dumazet {
8056f458dfbSEric Dumazet 	unsigned long flags, nflags;
80646d3ceabSEric Dumazet 
8076f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
8086f458dfbSEric Dumazet 	do {
8097aa5470cSEric Dumazet 		flags = sk->sk_tsq_flags;
8106f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
8116f458dfbSEric Dumazet 			return;
8126f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
8137aa5470cSEric Dumazet 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
8146f458dfbSEric Dumazet 
81540fc3423SEric Dumazet 	if (flags & TCPF_TSQ_DEFERRED)
8166f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
8176f458dfbSEric Dumazet 
818c3f9b018SEric Dumazet 	/* Here begins the tricky part :
819c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
820c3f9b018SEric Dumazet 	 * 1) BH disabled
821c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
822c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
823c3f9b018SEric Dumazet 	 *
824c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
825c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
826c3f9b018SEric Dumazet 	 */
827c3f9b018SEric Dumazet 	sock_release_ownership(sk);
828c3f9b018SEric Dumazet 
82940fc3423SEric Dumazet 	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
8306f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
831144d56e9SEric Dumazet 		__sock_put(sk);
832144d56e9SEric Dumazet 	}
83340fc3423SEric Dumazet 	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
8346f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
835144d56e9SEric Dumazet 		__sock_put(sk);
836144d56e9SEric Dumazet 	}
83740fc3423SEric Dumazet 	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
8384fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
839144d56e9SEric Dumazet 		__sock_put(sk);
840144d56e9SEric Dumazet 	}
84146d3ceabSEric Dumazet }
84246d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
84346d3ceabSEric Dumazet 
84446d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
84546d3ceabSEric Dumazet {
84646d3ceabSEric Dumazet 	int i;
84746d3ceabSEric Dumazet 
84846d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
84946d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
85046d3ceabSEric Dumazet 
85146d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
85246d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
85346d3ceabSEric Dumazet 			     tcp_tasklet_func,
85446d3ceabSEric Dumazet 			     (unsigned long)tsq);
85546d3ceabSEric Dumazet 	}
85646d3ceabSEric Dumazet }
85746d3ceabSEric Dumazet 
85846d3ceabSEric Dumazet /*
85946d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
8608e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
86146d3ceabSEric Dumazet  * hold qdisc lock.
86246d3ceabSEric Dumazet  */
863d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
86446d3ceabSEric Dumazet {
86546d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
86646d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
867408f0a6cSEric Dumazet 	unsigned long flags, nval, oval;
8689b462d02SEric Dumazet 
8699b462d02SEric Dumazet 	/* Keep one reference on sk_wmem_alloc.
8709b462d02SEric Dumazet 	 * Will be released by sk_free() from here or tcp_tasklet_func()
8719b462d02SEric Dumazet 	 */
87214afee4bSReshetova, Elena 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
8739b462d02SEric Dumazet 
8749b462d02SEric Dumazet 	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
8759b462d02SEric Dumazet 	 * Wait until our queues (qdisc + devices) are drained.
8769b462d02SEric Dumazet 	 * This gives :
8779b462d02SEric Dumazet 	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
8789b462d02SEric Dumazet 	 * - chance for incoming ACK (processed by another cpu maybe)
8799b462d02SEric Dumazet 	 *   to migrate this flow (skb->ooo_okay will be eventually set)
8809b462d02SEric Dumazet 	 */
88114afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
8829b462d02SEric Dumazet 		goto out;
88346d3ceabSEric Dumazet 
8847aa5470cSEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
88546d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
886a9b204d1SEric Dumazet 		bool empty;
88746d3ceabSEric Dumazet 
888408f0a6cSEric Dumazet 		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
889408f0a6cSEric Dumazet 			goto out;
890408f0a6cSEric Dumazet 
891b223feb9SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
8927aa5470cSEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
893408f0a6cSEric Dumazet 		if (nval != oval)
894408f0a6cSEric Dumazet 			continue;
895408f0a6cSEric Dumazet 
89646d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
89746d3ceabSEric Dumazet 		local_irq_save(flags);
898903ceff7SChristoph Lameter 		tsq = this_cpu_ptr(&tsq_tasklet);
899a9b204d1SEric Dumazet 		empty = list_empty(&tsq->head);
90046d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
901a9b204d1SEric Dumazet 		if (empty)
90246d3ceabSEric Dumazet 			tasklet_schedule(&tsq->tasklet);
90346d3ceabSEric Dumazet 		local_irq_restore(flags);
9049b462d02SEric Dumazet 		return;
90546d3ceabSEric Dumazet 	}
9069b462d02SEric Dumazet out:
9079b462d02SEric Dumazet 	sk_free(sk);
90846d3ceabSEric Dumazet }
90946d3ceabSEric Dumazet 
910218af599SEric Dumazet /* Note: Called under hard irq.
911218af599SEric Dumazet  * We can not call TCP stack right away.
912218af599SEric Dumazet  */
913218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
914218af599SEric Dumazet {
915218af599SEric Dumazet 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
916218af599SEric Dumazet 	struct sock *sk = (struct sock *)tp;
917218af599SEric Dumazet 	unsigned long nval, oval;
918218af599SEric Dumazet 
919218af599SEric Dumazet 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
920218af599SEric Dumazet 		struct tsq_tasklet *tsq;
921218af599SEric Dumazet 		bool empty;
922218af599SEric Dumazet 
923218af599SEric Dumazet 		if (oval & TSQF_QUEUED)
924218af599SEric Dumazet 			break;
925218af599SEric Dumazet 
926218af599SEric Dumazet 		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
927218af599SEric Dumazet 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
928218af599SEric Dumazet 		if (nval != oval)
929218af599SEric Dumazet 			continue;
930218af599SEric Dumazet 
93114afee4bSReshetova, Elena 		if (!refcount_inc_not_zero(&sk->sk_wmem_alloc))
932218af599SEric Dumazet 			break;
933218af599SEric Dumazet 		/* queue this socket to tasklet queue */
934218af599SEric Dumazet 		tsq = this_cpu_ptr(&tsq_tasklet);
935218af599SEric Dumazet 		empty = list_empty(&tsq->head);
936218af599SEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
937218af599SEric Dumazet 		if (empty)
938218af599SEric Dumazet 			tasklet_schedule(&tsq->tasklet);
939218af599SEric Dumazet 		break;
940218af599SEric Dumazet 	}
941218af599SEric Dumazet 	return HRTIMER_NORESTART;
942218af599SEric Dumazet }
943218af599SEric Dumazet 
944218af599SEric Dumazet /* BBR congestion control needs pacing.
945218af599SEric Dumazet  * Same remark for SO_MAX_PACING_RATE.
946218af599SEric Dumazet  * sch_fq packet scheduler is efficiently handling pacing,
947218af599SEric Dumazet  * but is not always installed/used.
948218af599SEric Dumazet  * Return true if TCP stack should pace packets itself.
949218af599SEric Dumazet  */
950218af599SEric Dumazet static bool tcp_needs_internal_pacing(const struct sock *sk)
951218af599SEric Dumazet {
952218af599SEric Dumazet 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
953218af599SEric Dumazet }
954218af599SEric Dumazet 
955218af599SEric Dumazet static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
956218af599SEric Dumazet {
957218af599SEric Dumazet 	u64 len_ns;
958218af599SEric Dumazet 	u32 rate;
959218af599SEric Dumazet 
960218af599SEric Dumazet 	if (!tcp_needs_internal_pacing(sk))
961218af599SEric Dumazet 		return;
962218af599SEric Dumazet 	rate = sk->sk_pacing_rate;
963218af599SEric Dumazet 	if (!rate || rate == ~0U)
964218af599SEric Dumazet 		return;
965218af599SEric Dumazet 
966218af599SEric Dumazet 	/* Should account for header sizes as sch_fq does,
967218af599SEric Dumazet 	 * but lets make things simple.
968218af599SEric Dumazet 	 */
969218af599SEric Dumazet 	len_ns = (u64)skb->len * NSEC_PER_SEC;
970218af599SEric Dumazet 	do_div(len_ns, rate);
971218af599SEric Dumazet 	hrtimer_start(&tcp_sk(sk)->pacing_timer,
972218af599SEric Dumazet 		      ktime_add_ns(ktime_get(), len_ns),
973218af599SEric Dumazet 		      HRTIMER_MODE_ABS_PINNED);
974218af599SEric Dumazet }
975218af599SEric Dumazet 
976e2080072SEric Dumazet static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
977e2080072SEric Dumazet {
978e2080072SEric Dumazet 	skb->skb_mstamp = tp->tcp_mstamp;
979e2080072SEric Dumazet 	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
980e2080072SEric Dumazet }
981e2080072SEric Dumazet 
9821da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
9831da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
9841da177e4SLinus Torvalds  * transmission and possible later retransmissions.
9851da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
9861da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
9871da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
9881da177e4SLinus Torvalds  * device.
9891da177e4SLinus Torvalds  *
9901da177e4SLinus Torvalds  * We are working here with either a clone of the original
9911da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
9921da177e4SLinus Torvalds  */
993056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
994056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
9951da177e4SLinus Torvalds {
9966687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
997dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
998dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
999dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
100033ad798cSAdam Langley 	struct tcp_out_options opts;
100195c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
10028c72c65bSEric Dumazet 	struct sk_buff *oskb = NULL;
1003cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
10041da177e4SLinus Torvalds 	struct tcphdr *th;
10051da177e4SLinus Torvalds 	int err;
10061da177e4SLinus Torvalds 
1007dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
10086f094b9eSLawrence Brakmo 	tp = tcp_sk(sk);
1009dfb4b9dcSDavid S. Miller 
1010ccdbb6e9SEric Dumazet 	if (clone_it) {
10116f094b9eSLawrence Brakmo 		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
10126f094b9eSLawrence Brakmo 			- tp->snd_una;
10138c72c65bSEric Dumazet 		oskb = skb;
1014e2080072SEric Dumazet 
1015e2080072SEric Dumazet 		tcp_skb_tsorted_save(oskb) {
1016e2080072SEric Dumazet 			if (unlikely(skb_cloned(oskb)))
1017e2080072SEric Dumazet 				skb = pskb_copy(oskb, gfp_mask);
1018dfb4b9dcSDavid S. Miller 			else
1019e2080072SEric Dumazet 				skb = skb_clone(oskb, gfp_mask);
1020e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(oskb);
1021e2080072SEric Dumazet 
1022dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
1023dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
1024dfb4b9dcSDavid S. Miller 	}
10258c72c65bSEric Dumazet 	skb->skb_mstamp = tp->tcp_mstamp;
1026dfb4b9dcSDavid S. Miller 
1027dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
1028dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
102933ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
10301da177e4SLinus Torvalds 
10314de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
103233ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
103333ad798cSAdam Langley 	else
103433ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
103533ad798cSAdam Langley 							   &md5);
103633ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
10371da177e4SLinus Torvalds 
1038547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
1039b2532eb9SEric Dumazet 	 * another queue. We can be called from tcp_tsq_handler()
1040b2532eb9SEric Dumazet 	 * which holds one reference to sk_wmem_alloc.
1041b2532eb9SEric Dumazet 	 *
1042b2532eb9SEric Dumazet 	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1043b2532eb9SEric Dumazet 	 * One way to get this would be to set skb->truesize = 2 on them.
1044547669d4SEric Dumazet 	 */
1045b2532eb9SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
10461da177e4SLinus Torvalds 
104738ab52e8SEric Dumazet 	/* If we had to use memory reserve to allocate this skb,
104838ab52e8SEric Dumazet 	 * this might cause drops if packet is looped back :
104938ab52e8SEric Dumazet 	 * Other socket might not have SOCK_MEMALLOC.
105038ab52e8SEric Dumazet 	 * Packets not looped back do not care about pfmemalloc.
105138ab52e8SEric Dumazet 	 */
105238ab52e8SEric Dumazet 	skb->pfmemalloc = 0;
105338ab52e8SEric Dumazet 
1054aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
1055aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
105646d3ceabSEric Dumazet 
105746d3ceabSEric Dumazet 	skb_orphan(skb);
105846d3ceabSEric Dumazet 	skb->sk = sk;
10591d2077acSEric Dumazet 	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1060b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
106114afee4bSReshetova, Elena 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
10621da177e4SLinus Torvalds 
1063c3a2e837SJulian Anastasov 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1064c3a2e837SJulian Anastasov 
10651da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
1066ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
1067c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
1068c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
10691da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
10701da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
1071df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
10724de075e0SEric Dumazet 					tcb->tcp_flags);
1073dfb4b9dcSDavid S. Miller 
10741da177e4SLinus Torvalds 	th->check		= 0;
10751da177e4SLinus Torvalds 	th->urg_ptr		= 0;
10761da177e4SLinus Torvalds 
107733f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
10787691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
10797691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
10801da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
10811da177e4SLinus Torvalds 			th->urg = 1;
10827691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
10830eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
10847691367dSHerbert Xu 			th->urg = 1;
10857691367dSHerbert Xu 		}
10861da177e4SLinus Torvalds 	}
10871da177e4SLinus Torvalds 
1088bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
108951466a75SEric Dumazet 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1090ea1627c2SEric Dumazet 	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1091ea1627c2SEric Dumazet 		th->window      = htons(tcp_select_window(sk));
1092ea1627c2SEric Dumazet 		tcp_ecn_send(sk, skb, th, tcp_header_size);
1093ea1627c2SEric Dumazet 	} else {
1094ea1627c2SEric Dumazet 		/* RFC1323: The window in SYN & SYN/ACK segments
1095ea1627c2SEric Dumazet 		 * is never scaled.
1096ea1627c2SEric Dumazet 		 */
1097ea1627c2SEric Dumazet 		th->window	= htons(min(tp->rcv_wnd, 65535U));
1098ea1627c2SEric Dumazet 	}
1099cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1100cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
1101cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
1102a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1103bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
110439f8e58eSEric Dumazet 					       md5, sk, skb);
1105cfb6eeb4SYOSHIFUJI Hideaki 	}
1106cfb6eeb4SYOSHIFUJI Hideaki #endif
1107cfb6eeb4SYOSHIFUJI Hideaki 
1108bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
11091da177e4SLinus Torvalds 
11104de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1111fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
11121da177e4SLinus Torvalds 
1113a44d6eacSMartin KaFai Lau 	if (skb->len != tcp_header_size) {
1114cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
1115a44d6eacSMartin KaFai Lau 		tp->data_segs_out += tcp_skb_pcount(skb);
1116218af599SEric Dumazet 		tcp_internal_pacing(sk, skb);
1117a44d6eacSMartin KaFai Lau 	}
11181da177e4SLinus Torvalds 
1119bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1120aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1121aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
11221da177e4SLinus Torvalds 
11232efd055cSMarcelo Ricardo Leitner 	tp->segs_out += tcp_skb_pcount(skb);
1124f69ad292SEric Dumazet 	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1125cd7d8498SEric Dumazet 	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1126f69ad292SEric Dumazet 	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1127cd7d8498SEric Dumazet 
11287faee5c0SEric Dumazet 	/* Our usage of tstamp should remain private */
11292456e855SThomas Gleixner 	skb->tstamp = 0;
1130971f10ecSEric Dumazet 
1131971f10ecSEric Dumazet 	/* Cleanup our debris for IP stacks */
1132971f10ecSEric Dumazet 	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1133971f10ecSEric Dumazet 			       sizeof(struct inet6_skb_parm)));
1134971f10ecSEric Dumazet 
1135b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
11367faee5c0SEric Dumazet 
11378c72c65bSEric Dumazet 	if (unlikely(err > 0)) {
11385ee2c941SChristoph Paasch 		tcp_enter_cwr(sk);
11398c72c65bSEric Dumazet 		err = net_xmit_eval(err);
11408c72c65bSEric Dumazet 	}
1141fc225799SEric Dumazet 	if (!err && oskb) {
1142e2080072SEric Dumazet 		tcp_update_skb_after_send(tp, oskb);
1143fc225799SEric Dumazet 		tcp_rate_skb_sent(sk, oskb);
1144fc225799SEric Dumazet 	}
11458c72c65bSEric Dumazet 	return err;
11461da177e4SLinus Torvalds }
11471da177e4SLinus Torvalds 
114867edfef7SAndi Kleen /* This routine just queues the buffer for sending.
11491da177e4SLinus Torvalds  *
11501da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
11511da177e4SLinus Torvalds  * otherwise socket can stall.
11521da177e4SLinus Torvalds  */
11531da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
11541da177e4SLinus Torvalds {
11551da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
11581da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1159f4a775d1SEric Dumazet 	__skb_header_release(skb);
1160fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
11613ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
11623ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
11631da177e4SLinus Torvalds }
11641da177e4SLinus Torvalds 
116567edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
11665bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1167f6302d1dSDavid S. Miller {
11688f26fb1cSEric Dumazet 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1169f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1170f6302d1dSDavid S. Miller 		 * non-TSO case.
1171f6302d1dSDavid S. Miller 		 */
1172cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 1);
1173f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1174f6302d1dSDavid S. Miller 	} else {
1175cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1176f69ad292SEric Dumazet 		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
11771da177e4SLinus Torvalds 	}
11781da177e4SLinus Torvalds }
11791da177e4SLinus Torvalds 
118091fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
118168f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
118291fed7a1SIlpo Järvinen  */
1183cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
118491fed7a1SIlpo Järvinen 				   int decr)
118591fed7a1SIlpo Järvinen {
1186a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1187a47e5a98SIlpo Järvinen 
1188dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
118991fed7a1SIlpo Järvinen 		return;
119091fed7a1SIlpo Järvinen 
11916859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
119291fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
119391fed7a1SIlpo Järvinen }
119491fed7a1SIlpo Järvinen 
1195797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1196797108d1SIlpo Järvinen  * tweaks to fix counters
1197797108d1SIlpo Järvinen  */
1198cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1199797108d1SIlpo Järvinen {
1200797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1201797108d1SIlpo Järvinen 
1202797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1203797108d1SIlpo Järvinen 
1204797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1205797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1206797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1207797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1208797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1209797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1210797108d1SIlpo Järvinen 
1211797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1212797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1213797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1214797108d1SIlpo Järvinen 
1215797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1216797108d1SIlpo Järvinen 
1217797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1218797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
121952cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1220797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1221797108d1SIlpo Järvinen 
1222797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1223797108d1SIlpo Järvinen }
1224797108d1SIlpo Järvinen 
12250a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
12260a2cf20cSSoheil Hassas Yeganeh {
12270a2cf20cSSoheil Hassas Yeganeh 	return TCP_SKB_CB(skb)->txstamp_ack ||
12280a2cf20cSSoheil Hassas Yeganeh 		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
12290a2cf20cSSoheil Hassas Yeganeh }
12300a2cf20cSSoheil Hassas Yeganeh 
1231490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1232490cc7d0SWillem de Bruijn {
1233490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1234490cc7d0SWillem de Bruijn 
12350a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1236490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1237490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1238490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1239490cc7d0SWillem de Bruijn 
1240490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1241490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1242490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1243b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1244b51e13faSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack = 0;
1245490cc7d0SWillem de Bruijn 	}
1246490cc7d0SWillem de Bruijn }
1247490cc7d0SWillem de Bruijn 
1248a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1249a166140eSMartin KaFai Lau {
1250a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1251a166140eSMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = 0;
1252a166140eSMartin KaFai Lau }
1253a166140eSMartin KaFai Lau 
1254*75c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk.  */
1255*75c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb,
1256*75c119afSEric Dumazet 					 struct sk_buff *buff,
1257*75c119afSEric Dumazet 					 struct sock *sk,
1258*75c119afSEric Dumazet 					 enum tcp_queue tcp_queue)
1259*75c119afSEric Dumazet {
1260*75c119afSEric Dumazet 	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1261*75c119afSEric Dumazet 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
1262*75c119afSEric Dumazet 	else
1263*75c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1264*75c119afSEric Dumazet }
1265*75c119afSEric Dumazet 
12661da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
12671da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
12681da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
12691da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
12701da177e4SLinus Torvalds  */
1271*75c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1272*75c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
12736cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
12741da177e4SLinus Torvalds {
12751da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12761da177e4SLinus Torvalds 	struct sk_buff *buff;
12776475be16SDavid S. Miller 	int nsize, old_factor;
1278b60b49eaSHerbert Xu 	int nlen;
12799ce01461SIlpo Järvinen 	u8 flags;
12801da177e4SLinus Torvalds 
12812fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
12822fceec13SIlpo Järvinen 		return -EINVAL;
12836a438bbeSStephen Hemminger 
12841da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
12851da177e4SLinus Torvalds 	if (nsize < 0)
12861da177e4SLinus Torvalds 		nsize = 0;
12871da177e4SLinus Torvalds 
12886cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
12891da177e4SLinus Torvalds 		return -ENOMEM;
12901da177e4SLinus Torvalds 
12911da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
1292eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
129351456b29SIan Morris 	if (!buff)
12941da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1295ef5cb973SHerbert Xu 
12963ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
12973ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1298b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1299b60b49eaSHerbert Xu 	buff->truesize += nlen;
1300b60b49eaSHerbert Xu 	skb->truesize -= nlen;
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
13031da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
13041da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
13051da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
13061da177e4SLinus Torvalds 
13071da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
13084de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
13094de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
13104de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1311e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1312a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
13131da177e4SLinus Torvalds 
131484fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
13151da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1316056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1317056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
13181da177e4SLinus Torvalds 						       nsize, 0);
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds 		skb_trim(skb, len);
13211da177e4SLinus Torvalds 
13221da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
13231da177e4SLinus Torvalds 	} else {
132484fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
13251da177e4SLinus Torvalds 		skb_split(skb, buff, len);
13261da177e4SLinus Torvalds 	}
13271da177e4SLinus Torvalds 
13281da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
13291da177e4SLinus Torvalds 
1330a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1331490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
13321da177e4SLinus Torvalds 
13336475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
13346475be16SDavid S. Miller 
13351da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
13365bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
13375bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
13381da177e4SLinus Torvalds 
1339b9f64820SYuchung Cheng 	/* Update delivered info for the new segment */
1340b9f64820SYuchung Cheng 	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1341b9f64820SYuchung Cheng 
13426475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
13436475be16SDavid S. Miller 	 * adjust the various packet counters.
13446475be16SDavid S. Miller 	 */
1345cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
13466475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
13476475be16SDavid S. Miller 			tcp_skb_pcount(buff);
13481da177e4SLinus Torvalds 
1349797108d1SIlpo Järvinen 		if (diff)
1350797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
13511da177e4SLinus Torvalds 	}
13521da177e4SLinus Torvalds 
13531da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1354f4a775d1SEric Dumazet 	__skb_header_release(buff);
1355*75c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1356e2080072SEric Dumazet 	list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
13571da177e4SLinus Torvalds 
13581da177e4SLinus Torvalds 	return 0;
13591da177e4SLinus Torvalds }
13601da177e4SLinus Torvalds 
1361f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled
1362f4d01666SEric Dumazet  * data is not copied, but immediately discarded.
13631da177e4SLinus Torvalds  */
13647162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len)
13651da177e4SLinus Torvalds {
13667b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
13671da177e4SLinus Torvalds 	int i, k, eat;
13681da177e4SLinus Torvalds 
13694fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
13704fa48bf3SEric Dumazet 	if (eat) {
13714fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
13724fa48bf3SEric Dumazet 		len -= eat;
13734fa48bf3SEric Dumazet 		if (!len)
13747162fb24SEric Dumazet 			return 0;
13754fa48bf3SEric Dumazet 	}
13761da177e4SLinus Torvalds 	eat = len;
13771da177e4SLinus Torvalds 	k = 0;
13787b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
13797b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
13807b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
13819e903e08SEric Dumazet 
13829e903e08SEric Dumazet 		if (size <= eat) {
1383aff65da0SIan Campbell 			skb_frag_unref(skb, i);
13849e903e08SEric Dumazet 			eat -= size;
13851da177e4SLinus Torvalds 		} else {
13867b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
13871da177e4SLinus Torvalds 			if (eat) {
13887b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
13897b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
13901da177e4SLinus Torvalds 				eat = 0;
13911da177e4SLinus Torvalds 			}
13921da177e4SLinus Torvalds 			k++;
13931da177e4SLinus Torvalds 		}
13941da177e4SLinus Torvalds 	}
13957b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
13961da177e4SLinus Torvalds 
13971da177e4SLinus Torvalds 	skb->data_len -= len;
13981da177e4SLinus Torvalds 	skb->len = skb->data_len;
13997162fb24SEric Dumazet 	return len;
14001da177e4SLinus Torvalds }
14011da177e4SLinus Torvalds 
140267edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
14031da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
14041da177e4SLinus Torvalds {
14057162fb24SEric Dumazet 	u32 delta_truesize;
14067162fb24SEric Dumazet 
140714bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
14081da177e4SLinus Torvalds 		return -ENOMEM;
14091da177e4SLinus Torvalds 
14107162fb24SEric Dumazet 	delta_truesize = __pskb_trim_head(skb, len);
14111da177e4SLinus Torvalds 
14121da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
141384fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
14141da177e4SLinus Torvalds 
14157162fb24SEric Dumazet 	if (delta_truesize) {
14167162fb24SEric Dumazet 		skb->truesize	   -= delta_truesize;
14177162fb24SEric Dumazet 		sk->sk_wmem_queued -= delta_truesize;
14187162fb24SEric Dumazet 		sk_mem_uncharge(sk, delta_truesize);
14191da177e4SLinus Torvalds 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
14207162fb24SEric Dumazet 	}
14211da177e4SLinus Torvalds 
14225b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
14231da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
14245bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 	return 0;
14271da177e4SLinus Torvalds }
14281da177e4SLinus Torvalds 
14291b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
14301b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
14315d424d5aSJohn Heffner {
1432cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1433cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14345d424d5aSJohn Heffner 	int mss_now;
14355d424d5aSJohn Heffner 
14365d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
14375d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
14385d424d5aSJohn Heffner 	 */
14395d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
14405d424d5aSJohn Heffner 
144167469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
144267469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
144367469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
144467469601SEric Dumazet 
144567469601SEric Dumazet 		if (dst && dst_allfrag(dst))
144667469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
144767469601SEric Dumazet 	}
144867469601SEric Dumazet 
14495d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
14505d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
14515d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
14525d424d5aSJohn Heffner 
14535d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
14545d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
14555d424d5aSJohn Heffner 
14565d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
14575d424d5aSJohn Heffner 	if (mss_now < 48)
14585d424d5aSJohn Heffner 		mss_now = 48;
14595d424d5aSJohn Heffner 	return mss_now;
14605d424d5aSJohn Heffner }
14615d424d5aSJohn Heffner 
14621b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
14631b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
14641b63edd6SYuchung Cheng {
14651b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
14661b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
14671b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
14681b63edd6SYuchung Cheng }
14691b63edd6SYuchung Cheng 
14705d424d5aSJohn Heffner /* Inverse of above */
147167469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
14725d424d5aSJohn Heffner {
1473cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1474cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
14755d424d5aSJohn Heffner 	int mtu;
14765d424d5aSJohn Heffner 
14775d424d5aSJohn Heffner 	mtu = mss +
14785d424d5aSJohn Heffner 	      tp->tcp_header_len +
14795d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
14805d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
14815d424d5aSJohn Heffner 
148267469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
148367469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
148467469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
148567469601SEric Dumazet 
148667469601SEric Dumazet 		if (dst && dst_allfrag(dst))
148767469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
148867469601SEric Dumazet 	}
14895d424d5aSJohn Heffner 	return mtu;
14905d424d5aSJohn Heffner }
1491556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu);
14925d424d5aSJohn Heffner 
149367edfef7SAndi Kleen /* MTU probing init per socket */
14945d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
14955d424d5aSJohn Heffner {
14965d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
14975d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
1498b0f9ca53SFan Du 	struct net *net = sock_net(sk);
14995d424d5aSJohn Heffner 
1500b0f9ca53SFan Du 	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
15015d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
15025d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
1503b0f9ca53SFan Du 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
15045d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
150505cbc0dbSFan Du 	if (icsk->icsk_mtup.enabled)
1506c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
15075d424d5aSJohn Heffner }
15084bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
15095d424d5aSJohn Heffner 
15101da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
15131da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
15141da177e4SLinus Torvalds 
15151da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1516caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
15171da177e4SLinus Torvalds    It also does not include TCP options.
15181da177e4SLinus Torvalds 
1519d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
15201da177e4SLinus Torvalds 
15211da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
15221da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
15231da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
15241da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
15251da177e4SLinus Torvalds 
15261da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
15271da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
15281da177e4SLinus Torvalds 
1529d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1530d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
15311da177e4SLinus Torvalds  */
15321da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
15331da177e4SLinus Torvalds {
15341da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1535d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
15365d424d5aSJohn Heffner 	int mss_now;
15371da177e4SLinus Torvalds 
15385d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
15395d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
15401da177e4SLinus Torvalds 
15415d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1542409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds 	/* And store cached results */
1545d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
15465d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
15475d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1548c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
15491da177e4SLinus Torvalds 
15501da177e4SLinus Torvalds 	return mss_now;
15511da177e4SLinus Torvalds }
15524bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
15551da177e4SLinus Torvalds  * and even PMTU discovery events into account.
15561da177e4SLinus Torvalds  */
15570c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
15581da177e4SLinus Torvalds {
1559cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1560cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1561c1b4a7e6SDavid S. Miller 	u32 mss_now;
156295c96174SEric Dumazet 	unsigned int header_len;
156333ad798cSAdam Langley 	struct tcp_out_options opts;
156433ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
15651da177e4SLinus Torvalds 
1566c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1567c1b4a7e6SDavid S. Miller 
15681da177e4SLinus Torvalds 	if (dst) {
15691da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1570d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
15711da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
15721da177e4SLinus Torvalds 	}
15731da177e4SLinus Torvalds 
157433ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
157533ad798cSAdam Langley 		     sizeof(struct tcphdr);
157633ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
157733ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
157833ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
157933ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
158033ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
158133ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
158233ad798cSAdam Langley 		mss_now -= delta;
158333ad798cSAdam Langley 	}
1584cfb6eeb4SYOSHIFUJI Hideaki 
15851da177e4SLinus Torvalds 	return mss_now;
15861da177e4SLinus Torvalds }
15871da177e4SLinus Torvalds 
158886fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
158986fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
159086fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
159186fd14adSWeiping Pan  */
159286fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1593a762a980SDavid S. Miller {
15949e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1595a762a980SDavid S. Miller 
159686fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
159786fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
159886fd14adSWeiping Pan 		/* Limited by application or receiver window. */
159986fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
160086fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
160186fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
160286fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
160386fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
160486fd14adSWeiping Pan 		}
160586fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
160686fd14adSWeiping Pan 	}
1607c2203cf7SEric Dumazet 	tp->snd_cwnd_stamp = tcp_jiffies32;
160886fd14adSWeiping Pan }
160986fd14adSWeiping Pan 
1610ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1611a762a980SDavid S. Miller {
16121b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1613a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1614a762a980SDavid S. Miller 
1615ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1616ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1617ca8a2263SNeal Cardwell 	 */
1618ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1619ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1620ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1621ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1622ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1623ca8a2263SNeal Cardwell 	}
1624e114a710SEric Dumazet 
162524901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1626a762a980SDavid S. Miller 		/* Network is feed fully. */
1627a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1628c2203cf7SEric Dumazet 		tp->snd_cwnd_stamp = tcp_jiffies32;
1629a762a980SDavid S. Miller 	} else {
1630a762a980SDavid S. Miller 		/* Network starves. */
1631a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1632a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1633a762a980SDavid S. Miller 
163415d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
1635c2203cf7SEric Dumazet 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
16361b1fc3fdSWei Wang 		    !ca_ops->cong_control)
1637a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1638b0f71bd3SFrancis Yan 
1639b0f71bd3SFrancis Yan 		/* The following conditions together indicate the starvation
1640b0f71bd3SFrancis Yan 		 * is caused by insufficient sender buffer:
1641b0f71bd3SFrancis Yan 		 * 1) just sent some data (see tcp_write_xmit)
1642b0f71bd3SFrancis Yan 		 * 2) not cwnd limited (this else condition)
1643*75c119afSEric Dumazet 		 * 3) no more data to send (tcp_write_queue_empty())
1644b0f71bd3SFrancis Yan 		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1645b0f71bd3SFrancis Yan 		 */
1646*75c119afSEric Dumazet 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1647b0f71bd3SFrancis Yan 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1648b0f71bd3SFrancis Yan 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1649b0f71bd3SFrancis Yan 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1650a762a980SDavid S. Miller 	}
1651a762a980SDavid S. Miller }
1652a762a980SDavid S. Miller 
1653d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1654d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1655d4589926SEric Dumazet {
1656d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1657d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1658d4589926SEric Dumazet }
1659d4589926SEric Dumazet 
1660d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1661d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1662d4589926SEric Dumazet  * The test is really :
1663d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1664d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1665d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1666d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
16670e3a4803SIlpo Järvinen  */
1668d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1669d4589926SEric Dumazet 				const struct sk_buff *skb)
1670d4589926SEric Dumazet {
1671d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1672d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1673d4589926SEric Dumazet }
1674d4589926SEric Dumazet 
1675d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1676d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1677d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1678d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1679d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1680d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1681d4589926SEric Dumazet  */
1682d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1683cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1684d4589926SEric Dumazet {
1685d4589926SEric Dumazet 	return partial &&
1686d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1687d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1688d4589926SEric Dumazet }
1689605ad7f1SEric Dumazet 
1690605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet,
1691605ad7f1SEric Dumazet  * to send one TSO packet per ms
1692605ad7f1SEric Dumazet  */
16931b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
16941b3878caSNeal Cardwell 		     int min_tso_segs)
1695605ad7f1SEric Dumazet {
1696605ad7f1SEric Dumazet 	u32 bytes, segs;
1697605ad7f1SEric Dumazet 
1698605ad7f1SEric Dumazet 	bytes = min(sk->sk_pacing_rate >> 10,
1699605ad7f1SEric Dumazet 		    sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1700605ad7f1SEric Dumazet 
1701605ad7f1SEric Dumazet 	/* Goal is to send at least one packet per ms,
1702605ad7f1SEric Dumazet 	 * not one big TSO packet every 100 ms.
1703605ad7f1SEric Dumazet 	 * This preserves ACK clocking and is consistent
1704605ad7f1SEric Dumazet 	 * with tcp_tso_should_defer() heuristic.
1705605ad7f1SEric Dumazet 	 */
17061b3878caSNeal Cardwell 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1707605ad7f1SEric Dumazet 
1708605ad7f1SEric Dumazet 	return min_t(u32, segs, sk->sk_gso_max_segs);
1709605ad7f1SEric Dumazet }
17101b3878caSNeal Cardwell EXPORT_SYMBOL(tcp_tso_autosize);
1711605ad7f1SEric Dumazet 
1712ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting.
1713ed6e7268SNeal Cardwell  * See if congestion control module wants to decide; otherwise, autosize.
1714ed6e7268SNeal Cardwell  */
1715ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1716ed6e7268SNeal Cardwell {
1717ed6e7268SNeal Cardwell 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1718ed6e7268SNeal Cardwell 	u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
1719ed6e7268SNeal Cardwell 
17201b3878caSNeal Cardwell 	return tso_segs ? :
17211b3878caSNeal Cardwell 		tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
1722ed6e7268SNeal Cardwell }
1723ed6e7268SNeal Cardwell 
1724d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1725d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1726d4589926SEric Dumazet 					const struct sk_buff *skb,
1727d4589926SEric Dumazet 					unsigned int mss_now,
1728d4589926SEric Dumazet 					unsigned int max_segs,
1729d4589926SEric Dumazet 					int nonagle)
1730c1b4a7e6SDavid S. Miller {
1731cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1732d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1733c1b4a7e6SDavid S. Miller 
173490840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
17351485348dSBen Hutchings 	max_len = mss_now * max_segs;
17360e3a4803SIlpo Järvinen 
17371485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
17381485348dSBen Hutchings 		return max_len;
17390e3a4803SIlpo Järvinen 
17405ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
17415ea3a748SIlpo Järvinen 
17421485348dSBen Hutchings 	if (max_len <= needed)
17431485348dSBen Hutchings 		return max_len;
17440e3a4803SIlpo Järvinen 
1745d4589926SEric Dumazet 	partial = needed % mss_now;
1746d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1747d4589926SEric Dumazet 	 * to include this last segment in this skb.
1748d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1749d4589926SEric Dumazet 	 */
1750cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1751d4589926SEric Dumazet 		return needed - partial;
1752d4589926SEric Dumazet 
1753d4589926SEric Dumazet 	return needed;
1754c1b4a7e6SDavid S. Miller }
1755c1b4a7e6SDavid S. Miller 
1756c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1757c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1758c1b4a7e6SDavid S. Miller  */
1759cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1760cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1761c1b4a7e6SDavid S. Miller {
1762d649a7a8SEric Dumazet 	u32 in_flight, cwnd, halfcwnd;
1763c1b4a7e6SDavid S. Miller 
1764c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
17654de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
17664de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1767c1b4a7e6SDavid S. Miller 		return 1;
1768c1b4a7e6SDavid S. Miller 
1769c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1770c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1771d649a7a8SEric Dumazet 	if (in_flight >= cwnd)
1772c1b4a7e6SDavid S. Miller 		return 0;
1773d649a7a8SEric Dumazet 
1774d649a7a8SEric Dumazet 	/* For better scheduling, ensure we have at least
1775d649a7a8SEric Dumazet 	 * 2 GSO packets in flight.
1776d649a7a8SEric Dumazet 	 */
1777d649a7a8SEric Dumazet 	halfcwnd = max(cwnd >> 1, 1U);
1778d649a7a8SEric Dumazet 	return min(halfcwnd, cwnd - in_flight);
1779c1b4a7e6SDavid S. Miller }
1780c1b4a7e6SDavid S. Miller 
1781b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
178267edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1783c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1784c1b4a7e6SDavid S. Miller  */
17855bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1786c1b4a7e6SDavid S. Miller {
1787c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1788c1b4a7e6SDavid S. Miller 
1789f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
17905bbb432cSEric Dumazet 		tcp_set_skb_tso_segs(skb, mss_now);
1791c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1792c1b4a7e6SDavid S. Miller 	}
1793c1b4a7e6SDavid S. Miller 	return tso_segs;
1794c1b4a7e6SDavid S. Miller }
1795c1b4a7e6SDavid S. Miller 
1796c1b4a7e6SDavid S. Miller 
1797a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1798c1b4a7e6SDavid S. Miller  * sent now.
1799c1b4a7e6SDavid S. Miller  */
1800a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1801c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1802c1b4a7e6SDavid S. Miller {
1803c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1804c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1805c1b4a7e6SDavid S. Miller 	 *
1806c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1807c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1808c1b4a7e6SDavid S. Miller 	 */
1809c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1810a2a385d6SEric Dumazet 		return true;
1811c1b4a7e6SDavid S. Miller 
18129b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
18139b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1814a2a385d6SEric Dumazet 		return true;
1815c1b4a7e6SDavid S. Miller 
1816cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1817a2a385d6SEric Dumazet 		return true;
1818c1b4a7e6SDavid S. Miller 
1819a2a385d6SEric Dumazet 	return false;
1820c1b4a7e6SDavid S. Miller }
1821c1b4a7e6SDavid S. Miller 
1822c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1823a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1824a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1825056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1826c1b4a7e6SDavid S. Miller {
1827c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1828c1b4a7e6SDavid S. Miller 
1829c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1830c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1831c1b4a7e6SDavid S. Miller 
183290840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1833c1b4a7e6SDavid S. Miller }
1834c1b4a7e6SDavid S. Miller 
1835c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1836c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1837c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1838c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1839c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1840c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1841c1b4a7e6SDavid S. Miller  */
1842*75c119afSEric Dumazet static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1843*75c119afSEric Dumazet 			struct sk_buff *skb, unsigned int len,
1844c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1845c1b4a7e6SDavid S. Miller {
1846c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1847c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
18489ce01461SIlpo Järvinen 	u8 flags;
1849c1b4a7e6SDavid S. Miller 
1850c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1851c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1852*75c119afSEric Dumazet 		return tcp_fragment(sk, tcp_queue, skb, len, mss_now, gfp);
1853c1b4a7e6SDavid S. Miller 
1854eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
185551456b29SIan Morris 	if (unlikely(!buff))
1856c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1857c1b4a7e6SDavid S. Miller 
18583ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
18593ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1860b60b49eaSHerbert Xu 	buff->truesize += nlen;
1861c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1862c1b4a7e6SDavid S. Miller 
1863c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1864c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1865c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1866c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1867c1b4a7e6SDavid S. Miller 
1868c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
18694de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
18704de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
18714de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1872c1b4a7e6SDavid S. Miller 
1873c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1874c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1875c1b4a7e6SDavid S. Miller 
1876a166140eSMartin KaFai Lau 	tcp_skb_fragment_eor(skb, buff);
1877a166140eSMartin KaFai Lau 
187884fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1879c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1880490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1881c1b4a7e6SDavid S. Miller 
1882c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
18835bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(skb, mss_now);
18845bbb432cSEric Dumazet 	tcp_set_skb_tso_segs(buff, mss_now);
1885c1b4a7e6SDavid S. Miller 
1886c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1887f4a775d1SEric Dumazet 	__skb_header_release(buff);
1888*75c119afSEric Dumazet 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1889c1b4a7e6SDavid S. Miller 
1890c1b4a7e6SDavid S. Miller 	return 0;
1891c1b4a7e6SDavid S. Miller }
1892c1b4a7e6SDavid S. Miller 
1893c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1894c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1895c1b4a7e6SDavid S. Miller  *
1896c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1897c1b4a7e6SDavid S. Miller  */
1898ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1899605ad7f1SEric Dumazet 				 bool *is_cwnd_limited, u32 max_segs)
1900c1b4a7e6SDavid S. Miller {
19016687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
190250c8339eSEric Dumazet 	u32 age, send_win, cong_win, limit, in_flight;
190350c8339eSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
190450c8339eSEric Dumazet 	struct sk_buff *head;
1905ad9f4f50SEric Dumazet 	int win_divisor;
1906c1b4a7e6SDavid S. Miller 
19074de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1908ae8064acSJohn Heffner 		goto send_now;
1909c1b4a7e6SDavid S. Miller 
191099d7662aSEric Dumazet 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1911ae8064acSJohn Heffner 		goto send_now;
1912ae8064acSJohn Heffner 
19135f852eb5SEric Dumazet 	/* Avoid bursty behavior by allowing defer
19145f852eb5SEric Dumazet 	 * only if the last write was recent.
19155f852eb5SEric Dumazet 	 */
1916d635fbe2SEric Dumazet 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
1917ae8064acSJohn Heffner 		goto send_now;
1918908a75c1SDavid S. Miller 
1919c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1920c1b4a7e6SDavid S. Miller 
1921056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1922c1b4a7e6SDavid S. Miller 
192390840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1924c1b4a7e6SDavid S. Miller 
1925c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1926c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1927c1b4a7e6SDavid S. Miller 
1928c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1929c1b4a7e6SDavid S. Miller 
1930ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1931605ad7f1SEric Dumazet 	if (limit >= max_segs * tp->mss_cache)
1932ae8064acSJohn Heffner 		goto send_now;
1933ba244fe9SDavid S. Miller 
193462ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
193562ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
193662ad2761SIlpo Järvinen 		goto send_now;
193762ad2761SIlpo Järvinen 
1938ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1939ad9f4f50SEric Dumazet 	if (win_divisor) {
1940c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1941c1b4a7e6SDavid S. Miller 
1942c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1943c1b4a7e6SDavid S. Miller 		 * just use it.
1944c1b4a7e6SDavid S. Miller 		 */
1945ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1946c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1947ae8064acSJohn Heffner 			goto send_now;
1948c1b4a7e6SDavid S. Miller 	} else {
1949c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1950c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1951c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1952c1b4a7e6SDavid S. Miller 		 * then send now.
1953c1b4a7e6SDavid S. Miller 		 */
19546b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1955ae8064acSJohn Heffner 			goto send_now;
1956c1b4a7e6SDavid S. Miller 	}
1957c1b4a7e6SDavid S. Miller 
1958*75c119afSEric Dumazet 	/* TODO : use tsorted_sent_queue ? */
1959*75c119afSEric Dumazet 	head = tcp_rtx_queue_head(sk);
1960*75c119afSEric Dumazet 	if (!head)
1961*75c119afSEric Dumazet 		goto send_now;
19629a568de4SEric Dumazet 	age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
196350c8339eSEric Dumazet 	/* If next ACK is likely to come too late (half srtt), do not defer */
196450c8339eSEric Dumazet 	if (age < (tp->srtt_us >> 4))
196550c8339eSEric Dumazet 		goto send_now;
196650c8339eSEric Dumazet 
19675f852eb5SEric Dumazet 	/* Ok, it looks like it is advisable to defer. */
1968ae8064acSJohn Heffner 
1969d2e1339fSBendik Rønning Opstad 	if (cong_win < send_win && cong_win <= skb->len)
1970ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1971ca8a2263SNeal Cardwell 
1972a2a385d6SEric Dumazet 	return true;
1973ae8064acSJohn Heffner 
1974ae8064acSJohn Heffner send_now:
1975a2a385d6SEric Dumazet 	return false;
1976c1b4a7e6SDavid S. Miller }
1977c1b4a7e6SDavid S. Miller 
197805cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk)
197905cbc0dbSFan Du {
198005cbc0dbSFan Du 	struct inet_connection_sock *icsk = inet_csk(sk);
198105cbc0dbSFan Du 	struct tcp_sock *tp = tcp_sk(sk);
198205cbc0dbSFan Du 	struct net *net = sock_net(sk);
198305cbc0dbSFan Du 	u32 interval;
198405cbc0dbSFan Du 	s32 delta;
198505cbc0dbSFan Du 
198605cbc0dbSFan Du 	interval = net->ipv4.sysctl_tcp_probe_interval;
1987c74df29aSEric Dumazet 	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
198805cbc0dbSFan Du 	if (unlikely(delta >= interval * HZ)) {
198905cbc0dbSFan Du 		int mss = tcp_current_mss(sk);
199005cbc0dbSFan Du 
199105cbc0dbSFan Du 		/* Update current search range */
199205cbc0dbSFan Du 		icsk->icsk_mtup.probe_size = 0;
199305cbc0dbSFan Du 		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
199405cbc0dbSFan Du 			sizeof(struct tcphdr) +
199505cbc0dbSFan Du 			icsk->icsk_af_ops->net_header_len;
199605cbc0dbSFan Du 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
199705cbc0dbSFan Du 
199805cbc0dbSFan Du 		/* Update probe time stamp */
1999c74df29aSEric Dumazet 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
200005cbc0dbSFan Du 	}
200105cbc0dbSFan Du }
200205cbc0dbSFan Du 
20035d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
200467edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
200567edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
200667edfef7SAndi Kleen  * changes resulting in larger path MTUs.
200767edfef7SAndi Kleen  *
20085d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
20095d424d5aSJohn Heffner  *         1 if a probe was sent,
2010056834d9SIlpo Järvinen  *         -1 otherwise
2011056834d9SIlpo Järvinen  */
20125d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
20135d424d5aSJohn Heffner {
20145d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
201512a59abcSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
20165d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
20176b58e0a5SFan Du 	struct net *net = sock_net(sk);
20185d424d5aSJohn Heffner 	int probe_size;
201991cc17c0SIlpo Järvinen 	int size_needed;
202012a59abcSEric Dumazet 	int copy, len;
20215d424d5aSJohn Heffner 	int mss_now;
20226b58e0a5SFan Du 	int interval;
20235d424d5aSJohn Heffner 
20245d424d5aSJohn Heffner 	/* Not currently probing/verifying,
20255d424d5aSJohn Heffner 	 * not in recovery,
20265d424d5aSJohn Heffner 	 * have enough cwnd, and
202712a59abcSEric Dumazet 	 * not SACKing (the variable headers throw things off)
202812a59abcSEric Dumazet 	 */
202912a59abcSEric Dumazet 	if (likely(!icsk->icsk_mtup.enabled ||
20305d424d5aSJohn Heffner 		   icsk->icsk_mtup.probe_size ||
20315d424d5aSJohn Heffner 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
20325d424d5aSJohn Heffner 		   tp->snd_cwnd < 11 ||
203312a59abcSEric Dumazet 		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
20345d424d5aSJohn Heffner 		return -1;
20355d424d5aSJohn Heffner 
20366b58e0a5SFan Du 	/* Use binary search for probe_size between tcp_mss_base,
20376b58e0a5SFan Du 	 * and current mss_clamp. if (search_high - search_low)
20386b58e0a5SFan Du 	 * smaller than a threshold, backoff from probing.
20396b58e0a5SFan Du 	 */
20400c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
20416b58e0a5SFan Du 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
20426b58e0a5SFan Du 				    icsk->icsk_mtup.search_low) >> 1);
204391cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
20446b58e0a5SFan Du 	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
204505cbc0dbSFan Du 	/* When misfortune happens, we are reprobing actively,
204605cbc0dbSFan Du 	 * and then reprobe timer has expired. We stick with current
204705cbc0dbSFan Du 	 * probing process by not resetting search range to its orignal.
204805cbc0dbSFan Du 	 */
20496b58e0a5SFan Du 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
205005cbc0dbSFan Du 		interval < net->ipv4.sysctl_tcp_probe_threshold) {
205105cbc0dbSFan Du 		/* Check whether enough time has elaplased for
205205cbc0dbSFan Du 		 * another round of probing.
205305cbc0dbSFan Du 		 */
205405cbc0dbSFan Du 		tcp_mtu_check_reprobe(sk);
20555d424d5aSJohn Heffner 		return -1;
20565d424d5aSJohn Heffner 	}
20575d424d5aSJohn Heffner 
20585d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
20597f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
20605d424d5aSJohn Heffner 		return -1;
20615d424d5aSJohn Heffner 
206291cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
20635d424d5aSJohn Heffner 		return -1;
206490840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
20655d424d5aSJohn Heffner 		return 0;
20665d424d5aSJohn Heffner 
2067d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2068d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2069d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
20705d424d5aSJohn Heffner 			return -1;
20715d424d5aSJohn Heffner 		else
20725d424d5aSJohn Heffner 			return 0;
20735d424d5aSJohn Heffner 	}
20745d424d5aSJohn Heffner 
20755d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
2076eb934478SEric Dumazet 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
207751456b29SIan Morris 	if (!nskb)
20785d424d5aSJohn Heffner 		return -1;
20793ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
20803ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
20815d424d5aSJohn Heffner 
2082fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
20835d424d5aSJohn Heffner 
20845d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
20855d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
20864de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
20875d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
20885d424d5aSJohn Heffner 	nskb->csum = 0;
208984fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
20905d424d5aSJohn Heffner 
209150c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
209250c4817eSIlpo Järvinen 
20935d424d5aSJohn Heffner 	len = 0;
2094234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
20955d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
20962fe664f1SDouglas Caetano dos Santos 		if (nskb->ip_summed) {
20975d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
20982fe664f1SDouglas Caetano dos Santos 		} else {
20992fe664f1SDouglas Caetano dos Santos 			__wsum csum = skb_copy_and_csum_bits(skb, 0,
2100056834d9SIlpo Järvinen 							     skb_put(nskb, copy),
21012fe664f1SDouglas Caetano dos Santos 							     copy, 0);
21022fe664f1SDouglas Caetano dos Santos 			nskb->csum = csum_block_add(nskb->csum, csum, len);
21032fe664f1SDouglas Caetano dos Santos 		}
21045d424d5aSJohn Heffner 
21055d424d5aSJohn Heffner 		if (skb->len <= copy) {
21065d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
21075d424d5aSJohn Heffner 			 * Throw it away. */
21084de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2109fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21103ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
21115d424d5aSJohn Heffner 		} else {
21124de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2113a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
21145d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
21155d424d5aSJohn Heffner 				skb_pull(skb, copy);
211684fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
2117056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
2118056834d9SIlpo Järvinen 								 skb->len, 0);
21195d424d5aSJohn Heffner 			} else {
21205d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
21215bbb432cSEric Dumazet 				tcp_set_skb_tso_segs(skb, mss_now);
21225d424d5aSJohn Heffner 			}
21235d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
21245d424d5aSJohn Heffner 		}
21255d424d5aSJohn Heffner 
21265d424d5aSJohn Heffner 		len += copy;
2127234b6860SIlpo Järvinen 
2128234b6860SIlpo Järvinen 		if (len >= probe_size)
2129234b6860SIlpo Järvinen 			break;
21305d424d5aSJohn Heffner 	}
21315bbb432cSEric Dumazet 	tcp_init_tso_segs(nskb, nskb->len);
21325d424d5aSJohn Heffner 
21335d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
21347faee5c0SEric Dumazet 	 * be resegmented into mss-sized pieces by tcp_write_xmit().
21357faee5c0SEric Dumazet 	 */
21365d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
21375d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
21385d424d5aSJohn Heffner 		 * effectively two packets. */
21395d424d5aSJohn Heffner 		tp->snd_cwnd--;
214066f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
21415d424d5aSJohn Heffner 
21425d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
21430e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
21440e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
21455d424d5aSJohn Heffner 
21465d424d5aSJohn Heffner 		return 1;
21475d424d5aSJohn Heffner 	}
21485d424d5aSJohn Heffner 
21495d424d5aSJohn Heffner 	return -1;
21505d424d5aSJohn Heffner }
21515d424d5aSJohn Heffner 
2152218af599SEric Dumazet static bool tcp_pacing_check(const struct sock *sk)
2153218af599SEric Dumazet {
2154218af599SEric Dumazet 	return tcp_needs_internal_pacing(sk) &&
2155218af599SEric Dumazet 	       hrtimer_active(&tcp_sk(sk)->pacing_timer);
2156218af599SEric Dumazet }
2157218af599SEric Dumazet 
2158f9616c35SEric Dumazet /* TCP Small Queues :
2159f9616c35SEric Dumazet  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2160f9616c35SEric Dumazet  * (These limits are doubled for retransmits)
2161f9616c35SEric Dumazet  * This allows for :
2162f9616c35SEric Dumazet  *  - better RTT estimation and ACK scheduling
2163f9616c35SEric Dumazet  *  - faster recovery
2164f9616c35SEric Dumazet  *  - high rates
2165f9616c35SEric Dumazet  * Alas, some drivers / subsystems require a fair amount
2166f9616c35SEric Dumazet  * of queued bytes to ensure line rate.
2167f9616c35SEric Dumazet  * One example is wifi aggregation (802.11 AMPDU)
2168f9616c35SEric Dumazet  */
2169f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2170f9616c35SEric Dumazet 				  unsigned int factor)
2171f9616c35SEric Dumazet {
2172f9616c35SEric Dumazet 	unsigned int limit;
2173f9616c35SEric Dumazet 
2174f9616c35SEric Dumazet 	limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
2175f9616c35SEric Dumazet 	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
2176f9616c35SEric Dumazet 	limit <<= factor;
2177f9616c35SEric Dumazet 
217814afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2179*75c119afSEric Dumazet 		/* Always send skb if rtx queue is empty.
218075eefc6cSEric Dumazet 		 * No need to wait for TX completion to call us back,
218175eefc6cSEric Dumazet 		 * after softirq/tasklet schedule.
218275eefc6cSEric Dumazet 		 * This helps when TX completions are delayed too much.
218375eefc6cSEric Dumazet 		 */
2184*75c119afSEric Dumazet 		if (tcp_rtx_queue_empty(sk))
218575eefc6cSEric Dumazet 			return false;
218675eefc6cSEric Dumazet 
21877aa5470cSEric Dumazet 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2188f9616c35SEric Dumazet 		/* It is possible TX completion already happened
2189f9616c35SEric Dumazet 		 * before we set TSQ_THROTTLED, so we must
2190f9616c35SEric Dumazet 		 * test again the condition.
2191f9616c35SEric Dumazet 		 */
2192f9616c35SEric Dumazet 		smp_mb__after_atomic();
219314afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2194f9616c35SEric Dumazet 			return true;
2195f9616c35SEric Dumazet 	}
2196f9616c35SEric Dumazet 	return false;
2197f9616c35SEric Dumazet }
2198f9616c35SEric Dumazet 
219905b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
220005b055e8SFrancis Yan {
2201628174ccSEric Dumazet 	const u32 now = tcp_jiffies32;
2202efe967cdSArnd Bergmann 	enum tcp_chrono old = tp->chrono_type;
220305b055e8SFrancis Yan 
2204efe967cdSArnd Bergmann 	if (old > TCP_CHRONO_UNSPEC)
2205efe967cdSArnd Bergmann 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
220605b055e8SFrancis Yan 	tp->chrono_start = now;
220705b055e8SFrancis Yan 	tp->chrono_type = new;
220805b055e8SFrancis Yan }
220905b055e8SFrancis Yan 
221005b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
221105b055e8SFrancis Yan {
221205b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
221305b055e8SFrancis Yan 
221405b055e8SFrancis Yan 	/* If there are multiple conditions worthy of tracking in a
22150f87230dSFrancis Yan 	 * chronograph then the highest priority enum takes precedence
22160f87230dSFrancis Yan 	 * over the other conditions. So that if something "more interesting"
221705b055e8SFrancis Yan 	 * starts happening, stop the previous chrono and start a new one.
221805b055e8SFrancis Yan 	 */
221905b055e8SFrancis Yan 	if (type > tp->chrono_type)
222005b055e8SFrancis Yan 		tcp_chrono_set(tp, type);
222105b055e8SFrancis Yan }
222205b055e8SFrancis Yan 
222305b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
222405b055e8SFrancis Yan {
222505b055e8SFrancis Yan 	struct tcp_sock *tp = tcp_sk(sk);
222605b055e8SFrancis Yan 
22270f87230dSFrancis Yan 
22280f87230dSFrancis Yan 	/* There are multiple conditions worthy of tracking in a
22290f87230dSFrancis Yan 	 * chronograph, so that the highest priority enum takes
22300f87230dSFrancis Yan 	 * precedence over the other conditions (see tcp_chrono_start).
22310f87230dSFrancis Yan 	 * If a condition stops, we only stop chrono tracking if
22320f87230dSFrancis Yan 	 * it's the "most interesting" or current chrono we are
22330f87230dSFrancis Yan 	 * tracking and starts busy chrono if we have pending data.
22340f87230dSFrancis Yan 	 */
2235*75c119afSEric Dumazet 	if (tcp_rtx_and_write_queues_empty(sk))
223605b055e8SFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
22370f87230dSFrancis Yan 	else if (type == tp->chrono_type)
22380f87230dSFrancis Yan 		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
223905b055e8SFrancis Yan }
224005b055e8SFrancis Yan 
22411da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
22421da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
22431da177e4SLinus Torvalds  * window for us.
22441da177e4SLinus Torvalds  *
2245f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2246f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2247f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
2248f8269a49SIlpo Järvinen  *
22496ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
22506ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
22516ba8a3b1SNandita Dukkipati 
2252a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
2253a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
22541da177e4SLinus Torvalds  */
2255a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2256d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
22571da177e4SLinus Torvalds {
22581da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
225992df7b51SDavid S. Miller 	struct sk_buff *skb;
2260c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
2261c1b4a7e6SDavid S. Miller 	int cwnd_quota;
22625d424d5aSJohn Heffner 	int result;
22635615f886SFrancis Yan 	bool is_cwnd_limited = false, is_rwnd_limited = false;
2264605ad7f1SEric Dumazet 	u32 max_segs;
22651da177e4SLinus Torvalds 
2266c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
22675d424d5aSJohn Heffner 
2268d5dd9175SIlpo Järvinen 	if (!push_one) {
22695d424d5aSJohn Heffner 		/* Do MTU probing. */
2270d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
2271d5dd9175SIlpo Järvinen 		if (!result) {
2272a2a385d6SEric Dumazet 			return false;
22735d424d5aSJohn Heffner 		} else if (result > 0) {
22745d424d5aSJohn Heffner 			sent_pkts = 1;
22755d424d5aSJohn Heffner 		}
2276d5dd9175SIlpo Järvinen 	}
22775d424d5aSJohn Heffner 
2278ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, mss_now);
22799a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
2280fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
2281c8ac3774SHerbert Xu 		unsigned int limit;
2282c8ac3774SHerbert Xu 
2283218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2284218af599SEric Dumazet 			break;
2285218af599SEric Dumazet 
22865bbb432cSEric Dumazet 		tso_segs = tcp_init_tso_segs(skb, mss_now);
2287c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
2288c1b4a7e6SDavid S. Miller 
22899d186cacSAndrey Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
22907faee5c0SEric Dumazet 			/* "skb_mstamp" is used as a start point for the retransmit timer */
2291e2080072SEric Dumazet 			tcp_update_skb_after_send(tp, skb);
2292ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
22939d186cacSAndrey Vagin 		}
2294ec342325SAndrew Vagin 
2295b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
22966ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
22976ba8a3b1SNandita Dukkipati 			if (push_one == 2)
22986ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
22996ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
23006ba8a3b1SNandita Dukkipati 			else
2301b68e9f85SHerbert Xu 				break;
23026ba8a3b1SNandita Dukkipati 		}
2303b68e9f85SHerbert Xu 
23045615f886SFrancis Yan 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
23055615f886SFrancis Yan 			is_rwnd_limited = true;
2306b68e9f85SHerbert Xu 			break;
23075615f886SFrancis Yan 		}
2308b68e9f85SHerbert Xu 
2309d6a4e26aSEric Dumazet 		if (tso_segs == 1) {
2310aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2311aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
2312aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
2313aa93466bSDavid S. Miller 				break;
2314c1b4a7e6SDavid S. Miller 		} else {
2315ca8a2263SNeal Cardwell 			if (!push_one &&
2316605ad7f1SEric Dumazet 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2317605ad7f1SEric Dumazet 						 max_segs))
2318aa93466bSDavid S. Miller 				break;
2319c1b4a7e6SDavid S. Miller 		}
2320aa93466bSDavid S. Miller 
2321605ad7f1SEric Dumazet 		limit = mss_now;
2322d6a4e26aSEric Dumazet 		if (tso_segs > 1 && !tcp_urg_mode(tp))
2323605ad7f1SEric Dumazet 			limit = tcp_mss_split_point(sk, skb, mss_now,
2324605ad7f1SEric Dumazet 						    min_t(unsigned int,
2325605ad7f1SEric Dumazet 							  cwnd_quota,
2326605ad7f1SEric Dumazet 							  max_segs),
2327605ad7f1SEric Dumazet 						    nonagle);
2328605ad7f1SEric Dumazet 
2329605ad7f1SEric Dumazet 		if (skb->len > limit &&
2330*75c119afSEric Dumazet 		    unlikely(tso_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
2331*75c119afSEric Dumazet 					  skb, limit, mss_now, gfp)))
2332605ad7f1SEric Dumazet 			break;
2333605ad7f1SEric Dumazet 
23347aa5470cSEric Dumazet 		if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
23357aa5470cSEric Dumazet 			clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
2336f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 0))
233746d3ceabSEric Dumazet 			break;
2338c9eeec26SEric Dumazet 
2339d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
23401da177e4SLinus Torvalds 			break;
23411da177e4SLinus Torvalds 
2342ec342325SAndrew Vagin repair:
23431da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
23441da177e4SLinus Torvalds 		 * This call will increment packets_out.
23451da177e4SLinus Torvalds 		 */
234666f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
23471da177e4SLinus Torvalds 
23481da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2349a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2350d5dd9175SIlpo Järvinen 
2351d5dd9175SIlpo Järvinen 		if (push_one)
2352d5dd9175SIlpo Järvinen 			break;
23531da177e4SLinus Torvalds 	}
23541da177e4SLinus Torvalds 
23555615f886SFrancis Yan 	if (is_rwnd_limited)
23565615f886SFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
23575615f886SFrancis Yan 	else
23585615f886SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
23595615f886SFrancis Yan 
2360aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2361684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2362684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
23636ba8a3b1SNandita Dukkipati 
23646ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
23656ba8a3b1SNandita Dukkipati 		if (push_one != 2)
23666ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
2367d2e1339fSBendik Rønning Opstad 		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2368ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2369a2a385d6SEric Dumazet 		return false;
23701da177e4SLinus Torvalds 	}
2371*75c119afSEric Dumazet 	return !tp->packets_out && !tcp_write_queue_empty(sk);
23726ba8a3b1SNandita Dukkipati }
23736ba8a3b1SNandita Dukkipati 
23746ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
23756ba8a3b1SNandita Dukkipati {
23766ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
23776ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
2378a2815817SNeal Cardwell 	u32 timeout, rto_delta_us;
23796ba8a3b1SNandita Dukkipati 
23806ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
23816ba8a3b1SNandita Dukkipati 	 * finishes.
23826ba8a3b1SNandita Dukkipati 	 */
2383f9b99582SYuchung Cheng 	if (tp->fastopen_rsk)
23846ba8a3b1SNandita Dukkipati 		return false;
23856ba8a3b1SNandita Dukkipati 
23866ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
23876ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
23886ba8a3b1SNandita Dukkipati 	 */
2389bec41a11SYuchung Cheng 	if ((sysctl_tcp_early_retrans != 3 && sysctl_tcp_early_retrans != 4) ||
2390bec41a11SYuchung Cheng 	    !tp->packets_out || !tcp_is_sack(tp) ||
2391bec41a11SYuchung Cheng 	    icsk->icsk_ca_state != TCP_CA_Open)
23926ba8a3b1SNandita Dukkipati 		return false;
23936ba8a3b1SNandita Dukkipati 
23946ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
2395*75c119afSEric Dumazet 	     !tcp_write_queue_empty(sk))
23966ba8a3b1SNandita Dukkipati 		return false;
23976ba8a3b1SNandita Dukkipati 
2398bb4d991aSYuchung Cheng 	/* Probe timeout is 2*rtt. Add minimum RTO to account
2399f9b99582SYuchung Cheng 	 * for delayed ack when there's one outstanding packet. If no RTT
2400f9b99582SYuchung Cheng 	 * sample is available then probe after TCP_TIMEOUT_INIT.
24016ba8a3b1SNandita Dukkipati 	 */
2402bb4d991aSYuchung Cheng 	if (tp->srtt_us) {
2403bb4d991aSYuchung Cheng 		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
24046ba8a3b1SNandita Dukkipati 		if (tp->packets_out == 1)
2405bb4d991aSYuchung Cheng 			timeout += TCP_RTO_MIN;
2406bb4d991aSYuchung Cheng 		else
2407bb4d991aSYuchung Cheng 			timeout += TCP_TIMEOUT_MIN;
2408bb4d991aSYuchung Cheng 	} else {
2409bb4d991aSYuchung Cheng 		timeout = TCP_TIMEOUT_INIT;
2410bb4d991aSYuchung Cheng 	}
24116ba8a3b1SNandita Dukkipati 
2412a2815817SNeal Cardwell 	/* If the RTO formula yields an earlier time, then use that time. */
2413a2815817SNeal Cardwell 	rto_delta_us = tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2414a2815817SNeal Cardwell 	if (rto_delta_us > 0)
2415a2815817SNeal Cardwell 		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
24166ba8a3b1SNandita Dukkipati 
24176ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
24186ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
24196ba8a3b1SNandita Dukkipati 	return true;
24206ba8a3b1SNandita Dukkipati }
24216ba8a3b1SNandita Dukkipati 
24221f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
24231f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
24241f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
24251f3279aeSEric Dumazet  */
24261f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
24271f3279aeSEric Dumazet 				    const struct sk_buff *skb)
24281f3279aeSEric Dumazet {
242939bb5e62SEric Dumazet 	if (unlikely(skb_fclone_busy(sk, skb))) {
2430c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk),
24311f3279aeSEric Dumazet 			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
24321f3279aeSEric Dumazet 		return true;
24331f3279aeSEric Dumazet 	}
24341f3279aeSEric Dumazet 	return false;
24351f3279aeSEric Dumazet }
24361f3279aeSEric Dumazet 
2437b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else
24386ba8a3b1SNandita Dukkipati  * retransmit the last segment.
24396ba8a3b1SNandita Dukkipati  */
24406ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
24416ba8a3b1SNandita Dukkipati {
24429b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
24436ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
24446ba8a3b1SNandita Dukkipati 	int pcount;
24456ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
24466ba8a3b1SNandita Dukkipati 
2447b340b264SYuchung Cheng 	skb = tcp_send_head(sk);
2448*75c119afSEric Dumazet 	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2449b340b264SYuchung Cheng 		pcount = tp->packets_out;
2450b340b264SYuchung Cheng 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2451b340b264SYuchung Cheng 		if (tp->packets_out > pcount)
2452b340b264SYuchung Cheng 			goto probe_sent;
24536ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24546ba8a3b1SNandita Dukkipati 	}
2455*75c119afSEric Dumazet 	skb = skb_rb_last(&sk->tcp_rtx_queue);
24566ba8a3b1SNandita Dukkipati 
24579b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
24589b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
24599b717a8dSNandita Dukkipati 		goto rearm_timer;
24609b717a8dSNandita Dukkipati 
24616ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
24626ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
24636ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24646ba8a3b1SNandita Dukkipati 
24651f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
24661f3279aeSEric Dumazet 		goto rearm_timer;
24671f3279aeSEric Dumazet 
24686ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
24696ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
24706ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24716ba8a3b1SNandita Dukkipati 
24726ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2473*75c119afSEric Dumazet 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2474*75c119afSEric Dumazet 					  (pcount - 1) * mss, mss,
24756cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
24766ba8a3b1SNandita Dukkipati 			goto rearm_timer;
2477*75c119afSEric Dumazet 		skb = skb_rb_next(skb);
24786ba8a3b1SNandita Dukkipati 	}
24796ba8a3b1SNandita Dukkipati 
24806ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
24816ba8a3b1SNandita Dukkipati 		goto rearm_timer;
24826ba8a3b1SNandita Dukkipati 
248310d3be56SEric Dumazet 	if (__tcp_retransmit_skb(sk, skb, 1))
2484b340b264SYuchung Cheng 		goto rearm_timer;
24856ba8a3b1SNandita Dukkipati 
24869b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
24879b717a8dSNandita Dukkipati 	tp->tlp_high_seq = tp->snd_nxt;
24889b717a8dSNandita Dukkipati 
2489b340b264SYuchung Cheng probe_sent:
2490c10d9310SEric Dumazet 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2491fcd16c0aSYuchung Cheng 	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2492fcd16c0aSYuchung Cheng 	inet_csk(sk)->icsk_pending = 0;
2493b340b264SYuchung Cheng rearm_timer:
2494fcd16c0aSYuchung Cheng 	tcp_rearm_rto(sk);
24951da177e4SLinus Torvalds }
24961da177e4SLinus Torvalds 
2497a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2498a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2499a762a980SDavid S. Miller  * The socket must be locked by the caller.
2500a762a980SDavid S. Miller  */
25019e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
25029e412ba7SIlpo Järvinen 			       int nonagle)
2503a762a980SDavid S. Miller {
2504726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2505726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2506726e07a8SIlpo Järvinen 	 * all will be happy.
2507726e07a8SIlpo Järvinen 	 */
2508726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2509726e07a8SIlpo Järvinen 		return;
2510726e07a8SIlpo Järvinen 
251199a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
25127450aaf6SEric Dumazet 			   sk_gfp_mask(sk, GFP_ATOMIC)))
25139e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2514a762a980SDavid S. Miller }
2515a762a980SDavid S. Miller 
2516c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2517c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2518c1b4a7e6SDavid S. Miller  */
2519c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2520c1b4a7e6SDavid S. Miller {
2521fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2522c1b4a7e6SDavid S. Miller 
2523c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2524c1b4a7e6SDavid S. Miller 
2525d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2526c1b4a7e6SDavid S. Miller }
2527c1b4a7e6SDavid S. Miller 
25281da177e4SLinus Torvalds /* This function returns the amount that we can raise the
25291da177e4SLinus Torvalds  * usable window based on the following constraints
25301da177e4SLinus Torvalds  *
25311da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
25321da177e4SLinus Torvalds  * 2. We limit memory per socket
25331da177e4SLinus Torvalds  *
25341da177e4SLinus Torvalds  * RFC 1122:
25351da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
25361da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
25371da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
25381da177e4SLinus Torvalds  *
25391da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
25401da177e4SLinus Torvalds  * it at least MSS bytes.
25411da177e4SLinus Torvalds  *
25421da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
25431da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
25441da177e4SLinus Torvalds  *
25451da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
25461da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
25471da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
25481da177e4SLinus Torvalds  * window to always advance by a single byte.
25491da177e4SLinus Torvalds  *
25501da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
25511da177e4SLinus Torvalds  * then this will not be a problem.
25521da177e4SLinus Torvalds  *
25531da177e4SLinus Torvalds  * BSD seems to make the following compromise:
25541da177e4SLinus Torvalds  *
25551da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
25561da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
25571da177e4SLinus Torvalds  *	then set the window to 0.
25581da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
25591da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
25601da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
25611da177e4SLinus Torvalds  *
25621da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
25631da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
25641da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
25651da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
25661da177e4SLinus Torvalds  * because the pipeline is full.
25671da177e4SLinus Torvalds  *
25681da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
25691da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
25701da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
25711da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
25721da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
25731da177e4SLinus Torvalds  *
25741da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
25751da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
25761da177e4SLinus Torvalds  *
25771da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
25781da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
25791da177e4SLinus Torvalds  */
25801da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
25811da177e4SLinus Torvalds {
2582463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
25831da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2584caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
25851da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
25861da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
25871da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
25881da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
25891da177e4SLinus Torvalds 	 */
2590463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
25911da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
259286c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
259386c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
25941da177e4SLinus Torvalds 	int window;
25951da177e4SLinus Torvalds 
259606425c30SEric Dumazet 	if (unlikely(mss > full_space)) {
25971da177e4SLinus Torvalds 		mss = full_space;
259806425c30SEric Dumazet 		if (mss <= 0)
259906425c30SEric Dumazet 			return 0;
260006425c30SEric Dumazet 	}
2601b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2602463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
26031da177e4SLinus Torvalds 
2604b8da51ebSEric Dumazet 		if (tcp_under_memory_pressure(sk))
2605056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2606056834d9SIlpo Järvinen 					       4U * tp->advmss);
26071da177e4SLinus Torvalds 
260886c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
260986c1a045SFlorian Westphal 		 * increase it due to wscale.
261086c1a045SFlorian Westphal 		 */
261186c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
261286c1a045SFlorian Westphal 
261386c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
261486c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
261586c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
261686c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
261786c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
261886c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
261986c1a045SFlorian Westphal 		 */
262086c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
26211da177e4SLinus Torvalds 			return 0;
26221da177e4SLinus Torvalds 	}
26231da177e4SLinus Torvalds 
26241da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
26251da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
26261da177e4SLinus Torvalds 
26271da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
26281da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
26291da177e4SLinus Torvalds 	 */
26301da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
26311da177e4SLinus Torvalds 		window = free_space;
26321da177e4SLinus Torvalds 
26331da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
26341da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
26351da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
26361da177e4SLinus Torvalds 		 */
26371935299dSGao Feng 		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
26381da177e4SLinus Torvalds 	} else {
26391935299dSGao Feng 		window = tp->rcv_wnd;
26401da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
26411da177e4SLinus Torvalds 		 * Window clamp already applied above.
26421da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
26431da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
26441da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
26451da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
26461da177e4SLinus Torvalds 		 * is too small.
26471da177e4SLinus Torvalds 		 */
26481da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
26491935299dSGao Feng 			window = rounddown(free_space, mss);
265084565070SJohn Heffner 		else if (mss == full_space &&
2651b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
265284565070SJohn Heffner 			window = free_space;
26531da177e4SLinus Torvalds 	}
26541da177e4SLinus Torvalds 
26551da177e4SLinus Torvalds 	return window;
26561da177e4SLinus Torvalds }
26571da177e4SLinus Torvalds 
2658cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2659082ac2d5SMartin KaFai Lau 			     const struct sk_buff *next_skb)
2660082ac2d5SMartin KaFai Lau {
26610a2cf20cSSoheil Hassas Yeganeh 	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
26620a2cf20cSSoheil Hassas Yeganeh 		const struct skb_shared_info *next_shinfo =
26630a2cf20cSSoheil Hassas Yeganeh 			skb_shinfo(next_skb);
2664082ac2d5SMartin KaFai Lau 		struct skb_shared_info *shinfo = skb_shinfo(skb);
2665082ac2d5SMartin KaFai Lau 
26660a2cf20cSSoheil Hassas Yeganeh 		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2667082ac2d5SMartin KaFai Lau 		shinfo->tskey = next_shinfo->tskey;
26682de8023eSMartin KaFai Lau 		TCP_SKB_CB(skb)->txstamp_ack |=
26692de8023eSMartin KaFai Lau 			TCP_SKB_CB(next_skb)->txstamp_ack;
2670082ac2d5SMartin KaFai Lau 	}
2671082ac2d5SMartin KaFai Lau }
2672082ac2d5SMartin KaFai Lau 
26734a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
2674f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
26751da177e4SLinus Torvalds {
26761da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2677*75c119afSEric Dumazet 	struct sk_buff *next_skb = skb_rb_next(skb);
2678058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
26791da177e4SLinus Torvalds 
2680058dc334SIlpo Järvinen 	skb_size = skb->len;
2681058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
26821da177e4SLinus Torvalds 
2683058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
26841da177e4SLinus Torvalds 
2685f8071cdeSEric Dumazet 	if (next_skb_size) {
2686f8071cdeSEric Dumazet 		if (next_skb_size <= skb_availroom(skb))
2687f8071cdeSEric Dumazet 			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2688f8071cdeSEric Dumazet 				      next_skb_size);
2689f8071cdeSEric Dumazet 		else if (!skb_shift(skb, next_skb, next_skb_size))
2690f8071cdeSEric Dumazet 			return false;
2691f8071cdeSEric Dumazet 	}
26926859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2693a6963a6bSIlpo Järvinen 
269452d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
269552d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
26961da177e4SLinus Torvalds 
269784fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
26981da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
26991da177e4SLinus Torvalds 
27001da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
27011da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
27021da177e4SLinus Torvalds 
2703e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
27044de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
27051da177e4SLinus Torvalds 
27061da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
27071da177e4SLinus Torvalds 	 * packet counting does not break.
27081da177e4SLinus Torvalds 	 */
27094828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2710a643b5d4SMartin KaFai Lau 	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2711b7689205SIlpo Järvinen 
2712b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2713ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2714ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2715ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2716b7689205SIlpo Järvinen 
2717797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2718797108d1SIlpo Järvinen 
2719082ac2d5SMartin KaFai Lau 	tcp_skb_collapse_tstamp(skb, next_skb);
2720082ac2d5SMartin KaFai Lau 
2721*75c119afSEric Dumazet 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
2722f8071cdeSEric Dumazet 	return true;
27231da177e4SLinus Torvalds }
27241da177e4SLinus Torvalds 
272567edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2726a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
27274a17fc3aSIlpo Järvinen {
27284a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2729a2a385d6SEric Dumazet 		return false;
27304a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2731a2a385d6SEric Dumazet 		return false;
27322331ccc5SEric Dumazet 	/* Some heuristics for collapsing over SACK'd could be invented */
27334a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2734a2a385d6SEric Dumazet 		return false;
27354a17fc3aSIlpo Järvinen 
2736a2a385d6SEric Dumazet 	return true;
27374a17fc3aSIlpo Järvinen }
27384a17fc3aSIlpo Järvinen 
273967edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
274067edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
274167edfef7SAndi Kleen  */
27424a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
27434a17fc3aSIlpo Järvinen 				     int space)
27444a17fc3aSIlpo Järvinen {
27454a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
27464a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2747a2a385d6SEric Dumazet 	bool first = true;
27484a17fc3aSIlpo Järvinen 
27494a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
27504a17fc3aSIlpo Järvinen 		return;
27514de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
27524a17fc3aSIlpo Järvinen 		return;
27534a17fc3aSIlpo Järvinen 
2754*75c119afSEric Dumazet 	skb_rbtree_walk_from_safe(skb, tmp) {
27554a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
27564a17fc3aSIlpo Järvinen 			break;
27574a17fc3aSIlpo Järvinen 
2758a643b5d4SMartin KaFai Lau 		if (!tcp_skb_can_collapse_to(to))
2759a643b5d4SMartin KaFai Lau 			break;
2760a643b5d4SMartin KaFai Lau 
27614a17fc3aSIlpo Järvinen 		space -= skb->len;
27624a17fc3aSIlpo Järvinen 
27634a17fc3aSIlpo Järvinen 		if (first) {
2764a2a385d6SEric Dumazet 			first = false;
27654a17fc3aSIlpo Järvinen 			continue;
27664a17fc3aSIlpo Järvinen 		}
27674a17fc3aSIlpo Järvinen 
27684a17fc3aSIlpo Järvinen 		if (space < 0)
27694a17fc3aSIlpo Järvinen 			break;
27704a17fc3aSIlpo Järvinen 
27714a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
27724a17fc3aSIlpo Järvinen 			break;
27734a17fc3aSIlpo Järvinen 
2774f8071cdeSEric Dumazet 		if (!tcp_collapse_retrans(sk, to))
2775f8071cdeSEric Dumazet 			break;
27764a17fc3aSIlpo Järvinen 	}
27774a17fc3aSIlpo Järvinen }
27784a17fc3aSIlpo Järvinen 
27791da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
27801da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
27811da177e4SLinus Torvalds  * error occurred which prevented the send.
27821da177e4SLinus Torvalds  */
278310d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
27841da177e4SLinus Torvalds {
27855d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
278610d3be56SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
27877d227cd2SSridhar Samudrala 	unsigned int cur_mss;
278810d3be56SEric Dumazet 	int diff, len, err;
27891da177e4SLinus Torvalds 
279010d3be56SEric Dumazet 
279110d3be56SEric Dumazet 	/* Inconclusive MTU probe */
279210d3be56SEric Dumazet 	if (icsk->icsk_mtup.probe_size)
27935d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
27945d424d5aSJohn Heffner 
27951da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2796caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
27971da177e4SLinus Torvalds 	 */
279814afee4bSReshetova, Elena 	if (refcount_read(&sk->sk_wmem_alloc) >
2799ffb4d6c8SEric Dumazet 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2800ffb4d6c8SEric Dumazet 		  sk->sk_sndbuf))
28011da177e4SLinus Torvalds 		return -EAGAIN;
28021da177e4SLinus Torvalds 
28031f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
28041f3279aeSEric Dumazet 		return -EBUSY;
28051f3279aeSEric Dumazet 
28061da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
28071da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
28081da177e4SLinus Torvalds 			BUG();
28091da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
28101da177e4SLinus Torvalds 			return -ENOMEM;
28111da177e4SLinus Torvalds 	}
28121da177e4SLinus Torvalds 
28137d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
28147d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
28157d227cd2SSridhar Samudrala 
28160c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
28177d227cd2SSridhar Samudrala 
28181da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
28191da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
28201da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
28211da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
28221da177e4SLinus Torvalds 	 */
28239d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
28249d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
28251da177e4SLinus Torvalds 		return -EAGAIN;
28261da177e4SLinus Torvalds 
282710d3be56SEric Dumazet 	len = cur_mss * segs;
282810d3be56SEric Dumazet 	if (skb->len > len) {
2829*75c119afSEric Dumazet 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
2830*75c119afSEric Dumazet 				 cur_mss, GFP_ATOMIC))
28311da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
283202276f3cSIlpo Järvinen 	} else {
2833c52e2421SEric Dumazet 		if (skb_unclone(skb, GFP_ATOMIC))
2834c52e2421SEric Dumazet 			return -ENOMEM;
283510d3be56SEric Dumazet 
283610d3be56SEric Dumazet 		diff = tcp_skb_pcount(skb);
283710d3be56SEric Dumazet 		tcp_set_skb_tso_segs(skb, cur_mss);
283810d3be56SEric Dumazet 		diff -= tcp_skb_pcount(skb);
283910d3be56SEric Dumazet 		if (diff)
284010d3be56SEric Dumazet 			tcp_adjust_pcount(sk, skb, diff);
284110d3be56SEric Dumazet 		if (skb->len < cur_mss)
284210d3be56SEric Dumazet 			tcp_retrans_try_collapse(sk, skb, cur_mss);
28431da177e4SLinus Torvalds 	}
28441da177e4SLinus Torvalds 
284549213555SDaniel Borkmann 	/* RFC3168, section 6.1.1.1. ECN fallback */
284649213555SDaniel Borkmann 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
284749213555SDaniel Borkmann 		tcp_ecn_clear_syn(sk, skb);
284849213555SDaniel Borkmann 
2849678550c6SYuchung Cheng 	/* Update global and local TCP statistics. */
2850678550c6SYuchung Cheng 	segs = tcp_skb_pcount(skb);
2851678550c6SYuchung Cheng 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2852678550c6SYuchung Cheng 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2853678550c6SYuchung Cheng 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2854678550c6SYuchung Cheng 	tp->total_retrans += segs;
2855678550c6SYuchung Cheng 
285650bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
285750bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
285850bceae9SThomas Graf 	 * beyond what csum_start can cover.
285950bceae9SThomas Graf 	 */
286050bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
286150bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
286210a81980SEric Dumazet 		struct sk_buff *nskb;
286310a81980SEric Dumazet 
2864e2080072SEric Dumazet 		tcp_skb_tsorted_save(skb) {
286510a81980SEric Dumazet 			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2866c84a5711SYuchung Cheng 			err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2867117632e6SEric Dumazet 				     -ENOBUFS;
2868e2080072SEric Dumazet 		} tcp_skb_tsorted_restore(skb);
2869e2080072SEric Dumazet 
28708c72c65bSEric Dumazet 		if (!err)
2871e2080072SEric Dumazet 			tcp_update_skb_after_send(tp, skb);
2872117632e6SEric Dumazet 	} else {
2873c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2874117632e6SEric Dumazet 	}
2875c84a5711SYuchung Cheng 
2876fc9f3501SEric Dumazet 	if (likely(!err)) {
2877c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2878678550c6SYuchung Cheng 	} else if (err != -EBUSY) {
2879678550c6SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2880fc9f3501SEric Dumazet 	}
2881c84a5711SYuchung Cheng 	return err;
288293b174adSYuchung Cheng }
288393b174adSYuchung Cheng 
288410d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
288593b174adSYuchung Cheng {
288693b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
288710d3be56SEric Dumazet 	int err = __tcp_retransmit_skb(sk, skb, segs);
28881da177e4SLinus Torvalds 
28891da177e4SLinus Torvalds 	if (err == 0) {
28901da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
28911da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2892e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
28931da177e4SLinus Torvalds 		}
28941da177e4SLinus Torvalds #endif
28951da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
28961da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
28971da177e4SLinus Torvalds 
28981da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
28991da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
29007faee5c0SEric Dumazet 			tp->retrans_stamp = tcp_skb_timestamp(skb);
29011da177e4SLinus Torvalds 
29021da177e4SLinus Torvalds 	}
29036e08d5e3SYuchung Cheng 
29046e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
29056e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
29066e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
29071da177e4SLinus Torvalds 	return err;
29081da177e4SLinus Torvalds }
29091da177e4SLinus Torvalds 
29101da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
29111da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
29121da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
29131da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
29141da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
29151da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
29161da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
29171da177e4SLinus Torvalds  */
29181da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
29191da177e4SLinus Torvalds {
29206687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2921*75c119afSEric Dumazet 	struct sk_buff *skb, *rtx_head = NULL, *hole = NULL;
29221da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2923840a3cbeSYuchung Cheng 	u32 max_segs;
292461eb55f4SIlpo Järvinen 	int mib_idx;
29256a438bbeSStephen Hemminger 
292645e77d31SIlpo Järvinen 	if (!tp->packets_out)
292745e77d31SIlpo Järvinen 		return;
292845e77d31SIlpo Järvinen 
29296a438bbeSStephen Hemminger 	skb = tp->retransmit_skb_hint;
2930*75c119afSEric Dumazet 	if (!skb) {
2931*75c119afSEric Dumazet 		rtx_head = tcp_rtx_queue_head(sk);
2932*75c119afSEric Dumazet 		skb = rtx_head;
2933618d9f25SIlpo Järvinen 	}
2934ed6e7268SNeal Cardwell 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
2935*75c119afSEric Dumazet 	skb_rbtree_walk_from(skb) {
2936dca0aaf8SEric Dumazet 		__u8 sacked;
293710d3be56SEric Dumazet 		int segs;
29381da177e4SLinus Torvalds 
2939218af599SEric Dumazet 		if (tcp_pacing_check(sk))
2940218af599SEric Dumazet 			break;
2941218af599SEric Dumazet 
29426a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
294351456b29SIan Morris 		if (!hole)
29446a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
29456a438bbeSStephen Hemminger 
294610d3be56SEric Dumazet 		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
294710d3be56SEric Dumazet 		if (segs <= 0)
29481da177e4SLinus Torvalds 			return;
2949dca0aaf8SEric Dumazet 		sacked = TCP_SKB_CB(skb)->sacked;
2950a3d2e9f8SEric Dumazet 		/* In case tcp_shift_skb_data() have aggregated large skbs,
2951a3d2e9f8SEric Dumazet 		 * we need to make sure not sending too bigs TSO packets
2952a3d2e9f8SEric Dumazet 		 */
2953a3d2e9f8SEric Dumazet 		segs = min_t(int, segs, max_segs);
29540e1c54c2SIlpo Järvinen 
2955840a3cbeSYuchung Cheng 		if (tp->retrans_out >= tp->lost_out) {
2956006f582cSIlpo Järvinen 			break;
29570e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
295851456b29SIan Morris 			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
29590e1c54c2SIlpo Järvinen 				hole = skb;
296061eb55f4SIlpo Järvinen 			continue;
29611da177e4SLinus Torvalds 
29620e1c54c2SIlpo Järvinen 		} else {
29630e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
29640e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
29650e1c54c2SIlpo Järvinen 			else
29660e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
29670e1c54c2SIlpo Järvinen 		}
29680e1c54c2SIlpo Järvinen 
29690e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
297061eb55f4SIlpo Järvinen 			continue;
297140b215e5SPavel Emelyanov 
2972f9616c35SEric Dumazet 		if (tcp_small_queue_check(sk, skb, 1))
2973f9616c35SEric Dumazet 			return;
2974f9616c35SEric Dumazet 
297510d3be56SEric Dumazet 		if (tcp_retransmit_skb(sk, skb, segs))
29761da177e4SLinus Torvalds 			return;
297724ab6becSYuchung Cheng 
2978de1d6578SYuchung Cheng 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
29791da177e4SLinus Torvalds 
2980684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2981a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2982a262f0cdSNandita Dukkipati 
2983*75c119afSEric Dumazet 		if (skb == rtx_head &&
298457dde7f7SYuchung Cheng 		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
2985463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
29863f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
29873f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
29881da177e4SLinus Torvalds 	}
29891da177e4SLinus Torvalds }
29901da177e4SLinus Torvalds 
2991d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite
2992d83769a5SEric Dumazet  * connection tear down and (memory) recovery.
2993845704a5SEric Dumazet  * Otherwise tcp_send_fin() could be tempted to either delay FIN
2994845704a5SEric Dumazet  * or even be forced to close flow without any FIN.
2995a6c5ea4cSEric Dumazet  * In general, we want to allow one skb per socket to avoid hangs
2996a6c5ea4cSEric Dumazet  * with edge trigger epoll()
2997d83769a5SEric Dumazet  */
2998a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size)
2999d83769a5SEric Dumazet {
3000e805605cSJohannes Weiner 	int amt;
3001d83769a5SEric Dumazet 
3002d83769a5SEric Dumazet 	if (size <= sk->sk_forward_alloc)
3003d83769a5SEric Dumazet 		return;
3004d83769a5SEric Dumazet 	amt = sk_mem_pages(size);
3005d83769a5SEric Dumazet 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3006e805605cSJohannes Weiner 	sk_memory_allocated_add(sk, amt);
3007e805605cSJohannes Weiner 
3008baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3009baac50bbSJohannes Weiner 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3010d83769a5SEric Dumazet }
3011d83769a5SEric Dumazet 
3012845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us.
3013845704a5SEric Dumazet  * We should try to send a FIN packet really hard, but eventually give up.
30141da177e4SLinus Torvalds  */
30151da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
30161da177e4SLinus Torvalds {
3017845704a5SEric Dumazet 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
30181da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30191da177e4SLinus Torvalds 
3020845704a5SEric Dumazet 	/* Optimization, tack on the FIN if we have one skb in write queue and
3021845704a5SEric Dumazet 	 * this skb was not yet sent, or we are under memory pressure.
3022845704a5SEric Dumazet 	 * Note: in the latter case, FIN packet will be sent after a timeout,
3023845704a5SEric Dumazet 	 * as TCP stack thinks it has already been transmitted.
30241da177e4SLinus Torvalds 	 */
3025*75c119afSEric Dumazet 	if (!tskb && tcp_under_memory_pressure(sk))
3026*75c119afSEric Dumazet 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
3027*75c119afSEric Dumazet 
3028*75c119afSEric Dumazet 	if (tskb) {
3029845704a5SEric Dumazet coalesce:
3030845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3031845704a5SEric Dumazet 		TCP_SKB_CB(tskb)->end_seq++;
30321da177e4SLinus Torvalds 		tp->write_seq++;
3033*75c119afSEric Dumazet 		if (tcp_write_queue_empty(sk)) {
3034845704a5SEric Dumazet 			/* This means tskb was already sent.
3035845704a5SEric Dumazet 			 * Pretend we included the FIN on previous transmit.
3036845704a5SEric Dumazet 			 * We need to set tp->snd_nxt to the value it would have
3037845704a5SEric Dumazet 			 * if FIN had been sent. This is because retransmit path
3038845704a5SEric Dumazet 			 * does not change tp->snd_nxt.
3039845704a5SEric Dumazet 			 */
3040845704a5SEric Dumazet 			tp->snd_nxt++;
3041845704a5SEric Dumazet 			return;
3042845704a5SEric Dumazet 		}
30431da177e4SLinus Torvalds 	} else {
3044845704a5SEric Dumazet 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3045845704a5SEric Dumazet 		if (unlikely(!skb)) {
3046845704a5SEric Dumazet 			if (tskb)
3047845704a5SEric Dumazet 				goto coalesce;
3048845704a5SEric Dumazet 			return;
30491da177e4SLinus Torvalds 		}
3050e2080072SEric Dumazet 		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3051d83769a5SEric Dumazet 		skb_reserve(skb, MAX_TCP_HEADER);
3052a6c5ea4cSEric Dumazet 		sk_forced_mem_schedule(sk, skb->truesize);
30531da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3054e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
3055a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
30561da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
30571da177e4SLinus Torvalds 	}
3058845704a5SEric Dumazet 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
30591da177e4SLinus Torvalds }
30601da177e4SLinus Torvalds 
30611da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
30621da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
30631da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
306465bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
30651da177e4SLinus Torvalds  */
3066dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
30671da177e4SLinus Torvalds {
30681da177e4SLinus Torvalds 	struct sk_buff *skb;
30691da177e4SLinus Torvalds 
30707cc2b043SGao Feng 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
30717cc2b043SGao Feng 
30721da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
30731da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
30741da177e4SLinus Torvalds 	if (!skb) {
30754e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
30761da177e4SLinus Torvalds 		return;
30771da177e4SLinus Torvalds 	}
30781da177e4SLinus Torvalds 
30791da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
30801da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
3081e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3082a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
30839a568de4SEric Dumazet 	tcp_mstamp_refresh(tcp_sk(sk));
30841da177e4SLinus Torvalds 	/* Send it off. */
3085dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
30864e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
30871da177e4SLinus Torvalds }
30881da177e4SLinus Torvalds 
308967edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
309067edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
30911da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
30921da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
30931da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
30941da177e4SLinus Torvalds  */
30951da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
30961da177e4SLinus Torvalds {
30971da177e4SLinus Torvalds 	struct sk_buff *skb;
30981da177e4SLinus Torvalds 
3099*75c119afSEric Dumazet 	skb = tcp_rtx_queue_head(sk);
310051456b29SIan Morris 	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3101*75c119afSEric Dumazet 		pr_err("%s: wrong queue state\n", __func__);
31021da177e4SLinus Torvalds 		return -EFAULT;
31031da177e4SLinus Torvalds 	}
31044de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
31051da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
3106e2080072SEric Dumazet 			struct sk_buff *nskb;
3107e2080072SEric Dumazet 
3108e2080072SEric Dumazet 			tcp_skb_tsorted_save(skb) {
3109e2080072SEric Dumazet 				nskb = skb_copy(skb, GFP_ATOMIC);
3110e2080072SEric Dumazet 			} tcp_skb_tsorted_restore(skb);
311151456b29SIan Morris 			if (!nskb)
31121da177e4SLinus Torvalds 				return -ENOMEM;
3113e2080072SEric Dumazet 			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3114*75c119afSEric Dumazet 			tcp_rtx_queue_unlink_and_free(skb, sk);
3115f4a775d1SEric Dumazet 			__skb_header_release(nskb);
3116*75c119afSEric Dumazet 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
31173ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
31183ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
31191da177e4SLinus Torvalds 			skb = nskb;
31201da177e4SLinus Torvalds 		}
31211da177e4SLinus Torvalds 
31224de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3123735d3831SFlorian Westphal 		tcp_ecn_send_synack(sk, skb);
31241da177e4SLinus Torvalds 	}
3125dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
31261da177e4SLinus Torvalds }
31271da177e4SLinus Torvalds 
31284aea39c1SEric Dumazet /**
31294aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
31304aea39c1SEric Dumazet  * sk: listener socket
31314aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
31324aea39c1SEric Dumazet  * req: request_sock pointer
31334aea39c1SEric Dumazet  *
31344aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
31354aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
31364aea39c1SEric Dumazet  */
31375d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3138e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
3139ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
3140b3d05147SEric Dumazet 				enum tcp_synack_type synack_type)
31411da177e4SLinus Torvalds {
31422e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
31435d062de7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
314480f03e27SEric Dumazet 	struct tcp_md5sig_key *md5 = NULL;
31455d062de7SEric Dumazet 	struct tcp_out_options opts;
31465d062de7SEric Dumazet 	struct sk_buff *skb;
3147bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
31485d062de7SEric Dumazet 	struct tcphdr *th;
3149f5fff5dcSTom Quetchenbach 	int mss;
31501da177e4SLinus Torvalds 
3151ca6fb065SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
31524aea39c1SEric Dumazet 	if (unlikely(!skb)) {
31534aea39c1SEric Dumazet 		dst_release(dst);
31541da177e4SLinus Torvalds 		return NULL;
31554aea39c1SEric Dumazet 	}
31561da177e4SLinus Torvalds 	/* Reserve space for headers. */
31571da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
31581da177e4SLinus Torvalds 
3159b3d05147SEric Dumazet 	switch (synack_type) {
3160b3d05147SEric Dumazet 	case TCP_SYNACK_NORMAL:
31619e17f8a4SEric Dumazet 		skb_set_owner_w(skb, req_to_sk(req));
3162b3d05147SEric Dumazet 		break;
3163b3d05147SEric Dumazet 	case TCP_SYNACK_COOKIE:
3164b3d05147SEric Dumazet 		/* Under synflood, we do not attach skb to a socket,
3165b3d05147SEric Dumazet 		 * to avoid false sharing.
3166b3d05147SEric Dumazet 		 */
3167b3d05147SEric Dumazet 		break;
3168b3d05147SEric Dumazet 	case TCP_SYNACK_FASTOPEN:
3169ca6fb065SEric Dumazet 		/* sk is a const pointer, because we want to express multiple
3170ca6fb065SEric Dumazet 		 * cpu might call us concurrently.
3171ca6fb065SEric Dumazet 		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3172ca6fb065SEric Dumazet 		 */
3173ca6fb065SEric Dumazet 		skb_set_owner_w(skb, (struct sock *)sk);
3174b3d05147SEric Dumazet 		break;
3175ca6fb065SEric Dumazet 	}
31764aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
31771da177e4SLinus Torvalds 
31783541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3179f5fff5dcSTom Quetchenbach 
318033ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
31818b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
31828b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
31839a568de4SEric Dumazet 		skb->skb_mstamp = cookie_init_timestamp(req);
31848b5f12d0SFlorian Westphal 	else
31858b5f12d0SFlorian Westphal #endif
31869a568de4SEric Dumazet 		skb->skb_mstamp = tcp_clock_us();
318780f03e27SEric Dumazet 
318880f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG
318980f03e27SEric Dumazet 	rcu_read_lock();
3190fd3a154aSEric Dumazet 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
319180f03e27SEric Dumazet #endif
319258d607d3SEric Dumazet 	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
319337bfbddaSEric Dumazet 	tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
319437bfbddaSEric Dumazet 			  sizeof(*th);
319533ad798cSAdam Langley 
3196aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
3197aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
31981da177e4SLinus Torvalds 
3199ea1627c2SEric Dumazet 	th = (struct tcphdr *)skb->data;
32001da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
32011da177e4SLinus Torvalds 	th->syn = 1;
32021da177e4SLinus Torvalds 	th->ack = 1;
32036ac705b1SEric Dumazet 	tcp_ecn_make_synack(req, th);
3204b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
3205634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
3206e05a90ecSJamal Hadi Salim 	skb->mark = ireq->ir_mark;
3207e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
3208e870a8efSIlpo Järvinen 	 * not even correctly set)
3209e870a8efSIlpo Järvinen 	 */
3210e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
3211a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
32124957faadSWilliam Allen Simpson 
32131da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
32148336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
32158336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
32161da177e4SLinus Torvalds 
32171da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3218ed53d0abSEric Dumazet 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
32195d062de7SEric Dumazet 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
32201da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
322190bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3222cfb6eeb4SYOSHIFUJI Hideaki 
3223cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3224cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
322580f03e27SEric Dumazet 	if (md5)
3226bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
322739f8e58eSEric Dumazet 					       md5, req_to_sk(req), skb);
322880f03e27SEric Dumazet 	rcu_read_unlock();
3229cfb6eeb4SYOSHIFUJI Hideaki #endif
3230cfb6eeb4SYOSHIFUJI Hideaki 
3231b50edd78SEric Dumazet 	/* Do not fool tcpdump (if any), clean our debris */
32322456e855SThomas Gleixner 	skb->tstamp = 0;
32331da177e4SLinus Torvalds 	return skb;
32341da177e4SLinus Torvalds }
32354bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
32361da177e4SLinus Torvalds 
323781164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
323881164413SDaniel Borkmann {
323981164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
324081164413SDaniel Borkmann 	const struct tcp_congestion_ops *ca;
324181164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
324281164413SDaniel Borkmann 
324381164413SDaniel Borkmann 	if (ca_key == TCP_CA_UNSPEC)
324481164413SDaniel Borkmann 		return;
324581164413SDaniel Borkmann 
324681164413SDaniel Borkmann 	rcu_read_lock();
324781164413SDaniel Borkmann 	ca = tcp_ca_find_key(ca_key);
324881164413SDaniel Borkmann 	if (likely(ca && try_module_get(ca->owner))) {
324981164413SDaniel Borkmann 		module_put(icsk->icsk_ca_ops->owner);
325081164413SDaniel Borkmann 		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
325181164413SDaniel Borkmann 		icsk->icsk_ca_ops = ca;
325281164413SDaniel Borkmann 	}
325381164413SDaniel Borkmann 	rcu_read_unlock();
325481164413SDaniel Borkmann }
325581164413SDaniel Borkmann 
325667edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
3257f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
32581da177e4SLinus Torvalds {
3259cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
32601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32611da177e4SLinus Torvalds 	__u8 rcv_wscale;
326213d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
32631da177e4SLinus Torvalds 
32641da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
32651da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
32661da177e4SLinus Torvalds 	 */
32675d2ed052SEric Dumazet 	tp->tcp_header_len = sizeof(struct tcphdr);
32685d2ed052SEric Dumazet 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
32695d2ed052SEric Dumazet 		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
32701da177e4SLinus Torvalds 
3271cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
327200db4124SIan Morris 	if (tp->af_specific->md5_lookup(sk, sk))
3273cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3274cfb6eeb4SYOSHIFUJI Hideaki #endif
3275cfb6eeb4SYOSHIFUJI Hideaki 
32761da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
32771da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
32781da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
32791da177e4SLinus Torvalds 	tp->max_window = 0;
32805d424d5aSJohn Heffner 	tcp_mtup_init(sk);
32811da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
32821da177e4SLinus Torvalds 
328381164413SDaniel Borkmann 	tcp_ca_dst_init(sk, dst);
328481164413SDaniel Borkmann 
32851da177e4SLinus Torvalds 	if (!tp->window_clamp)
32861da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
32873541f9e8SEric Dumazet 	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3288f5fff5dcSTom Quetchenbach 
32891da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
32901da177e4SLinus Torvalds 
3291e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
3292e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3293e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3294e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
3295e88c64f0SHagen Paul Pfeifer 
329613d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf(sk);
329713d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
329813d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
329913d3b1ebSLawrence Brakmo 
33001da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
33011da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
33021da177e4SLinus Torvalds 				  &tp->rcv_wnd,
33031da177e4SLinus Torvalds 				  &tp->window_clamp,
33049bb37ef0SEric Dumazet 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
330531d12926Slaurent chavey 				  &rcv_wscale,
330613d3b1ebSLawrence Brakmo 				  rcv_wnd);
33071da177e4SLinus Torvalds 
33081da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
33091da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
33101da177e4SLinus Torvalds 
33111da177e4SLinus Torvalds 	sk->sk_err = 0;
33121da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
33131da177e4SLinus Torvalds 	tp->snd_wnd = 0;
3314ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
33151da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
33161da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
331733f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
3318370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
3319ee995283SPavel Emelyanov 
3320ee995283SPavel Emelyanov 	if (likely(!tp->repair))
33211da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
3322c7781a6eSAndrew Vagin 	else
332370eabf0eSEric Dumazet 		tp->rcv_tstamp = tcp_jiffies32;
3324ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
3325ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
33261da177e4SLinus Torvalds 
33278550f328SLawrence Brakmo 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3328463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
33291da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
33301da177e4SLinus Torvalds }
33311da177e4SLinus Torvalds 
3332783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3333783237e8SYuchung Cheng {
3334783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3335783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3336783237e8SYuchung Cheng 
3337783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
3338f4a775d1SEric Dumazet 	__skb_header_release(skb);
3339783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
3340783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
3341783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
3342783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
3343783237e8SYuchung Cheng }
3344783237e8SYuchung Cheng 
3345783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
3346783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
3347783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3348783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
3349783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
3350783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
3351783237e8SYuchung Cheng  */
3352783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3353783237e8SYuchung Cheng {
3354783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
3355783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
3356065263f4SWei Wang 	int space, err = 0;
3357355a901eSEric Dumazet 	struct sk_buff *syn_data;
3358783237e8SYuchung Cheng 
335967da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3360065263f4SWei Wang 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3361783237e8SYuchung Cheng 		goto fallback;
3362783237e8SYuchung Cheng 
3363783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3364783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
3365783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3366783237e8SYuchung Cheng 	 */
33673541f9e8SEric Dumazet 	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
33683541f9e8SEric Dumazet 
33691b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3370783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3371783237e8SYuchung Cheng 
3372f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3373f5ddcbbbSEric Dumazet 
3374f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3375f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3376f5ddcbbbSEric Dumazet 
3377eb934478SEric Dumazet 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3378355a901eSEric Dumazet 	if (!syn_data)
3379783237e8SYuchung Cheng 		goto fallback;
3380355a901eSEric Dumazet 	syn_data->ip_summed = CHECKSUM_PARTIAL;
3381355a901eSEric Dumazet 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
338207e100f9SEric Dumazet 	if (space) {
338307e100f9SEric Dumazet 		int copied = copy_from_iter(skb_put(syn_data, space), space,
338457be5bdaSAl Viro 					    &fo->data->msg_iter);
338557be5bdaSAl Viro 		if (unlikely(!copied)) {
3386355a901eSEric Dumazet 			kfree_skb(syn_data);
3387783237e8SYuchung Cheng 			goto fallback;
3388783237e8SYuchung Cheng 		}
338957be5bdaSAl Viro 		if (copied != space) {
339057be5bdaSAl Viro 			skb_trim(syn_data, copied);
339157be5bdaSAl Viro 			space = copied;
339257be5bdaSAl Viro 		}
339307e100f9SEric Dumazet 	}
3394355a901eSEric Dumazet 	/* No more data pending in inet_wait_for_connect() */
3395355a901eSEric Dumazet 	if (space == fo->size)
3396355a901eSEric Dumazet 		fo->data = NULL;
3397355a901eSEric Dumazet 	fo->copied = space;
3398783237e8SYuchung Cheng 
3399355a901eSEric Dumazet 	tcp_connect_queue_skb(sk, syn_data);
34000f87230dSFrancis Yan 	if (syn_data->len)
34010f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3402355a901eSEric Dumazet 
3403355a901eSEric Dumazet 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3404355a901eSEric Dumazet 
3405355a901eSEric Dumazet 	syn->skb_mstamp = syn_data->skb_mstamp;
3406355a901eSEric Dumazet 
3407355a901eSEric Dumazet 	/* Now full SYN+DATA was cloned and sent (or not),
3408355a901eSEric Dumazet 	 * remove the SYN from the original skb (syn_data)
3409355a901eSEric Dumazet 	 * we keep in write queue in case of a retransmit, as we
3410355a901eSEric Dumazet 	 * also have the SYN packet (with no data) in the same queue.
3411431a9124SEric Dumazet 	 */
3412355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->seq++;
3413355a901eSEric Dumazet 	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3414355a901eSEric Dumazet 	if (!err) {
341567da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
3416*75c119afSEric Dumazet 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3417f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3418783237e8SYuchung Cheng 		goto done;
3419783237e8SYuchung Cheng 	}
3420783237e8SYuchung Cheng 
3421*75c119afSEric Dumazet 	/* data was not sent, put it in write_queue */
3422*75c119afSEric Dumazet 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3423b5b7db8dSEric Dumazet 	tp->packets_out -= tcp_skb_pcount(syn_data);
3424b5b7db8dSEric Dumazet 
3425783237e8SYuchung Cheng fallback:
3426783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3427783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3428783237e8SYuchung Cheng 		fo->cookie.len = 0;
3429783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3430783237e8SYuchung Cheng 	if (err)
3431783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3432783237e8SYuchung Cheng done:
3433783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3434783237e8SYuchung Cheng 	return err;
3435783237e8SYuchung Cheng }
3436783237e8SYuchung Cheng 
343767edfef7SAndi Kleen /* Build a SYN and send it off. */
34381da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
34391da177e4SLinus Torvalds {
34401da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
34411da177e4SLinus Torvalds 	struct sk_buff *buff;
3442ee586811SEric Paris 	int err;
34431da177e4SLinus Torvalds 
34449872a4bdSLawrence Brakmo 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
34458ba60924SEric Dumazet 
34468ba60924SEric Dumazet 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
34478ba60924SEric Dumazet 		return -EHOSTUNREACH; /* Routing failure or similar. */
34488ba60924SEric Dumazet 
34491da177e4SLinus Torvalds 	tcp_connect_init(sk);
34501da177e4SLinus Torvalds 
34512b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
34522b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
34532b916477SAndrey Vagin 		return 0;
34542b916477SAndrey Vagin 	}
34552b916477SAndrey Vagin 
3456eb934478SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3457355a901eSEric Dumazet 	if (unlikely(!buff))
34581da177e4SLinus Torvalds 		return -ENOBUFS;
34591da177e4SLinus Torvalds 
3460a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
34619a568de4SEric Dumazet 	tcp_mstamp_refresh(tp);
34629a568de4SEric Dumazet 	tp->retrans_stamp = tcp_time_stamp(tp);
3463783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3464735d3831SFlorian Westphal 	tcp_ecn_send_syn(sk, buff);
3465*75c119afSEric Dumazet 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
34661da177e4SLinus Torvalds 
3467783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3468783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3469783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3470ee586811SEric Paris 	if (err == -ECONNREFUSED)
3471ee586811SEric Paris 		return err;
3472bd37a088SWei Yongjun 
3473bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3474bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3475bd37a088SWei Yongjun 	 */
3476bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3477bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
3478b5b7db8dSEric Dumazet 	buff = tcp_send_head(sk);
3479b5b7db8dSEric Dumazet 	if (unlikely(buff)) {
3480b5b7db8dSEric Dumazet 		tp->snd_nxt	= TCP_SKB_CB(buff)->seq;
3481b5b7db8dSEric Dumazet 		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3482b5b7db8dSEric Dumazet 	}
348381cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
34841da177e4SLinus Torvalds 
34851da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
34863f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
34873f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
34881da177e4SLinus Torvalds 	return 0;
34891da177e4SLinus Torvalds }
34904bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
34911da177e4SLinus Torvalds 
34921da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
34931da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
34941da177e4SLinus Torvalds  * for details.
34951da177e4SLinus Torvalds  */
34961da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
34971da177e4SLinus Torvalds {
3498463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3499463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
35001da177e4SLinus Torvalds 	unsigned long timeout;
35011da177e4SLinus Torvalds 
35029890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
35039890092eSFlorian Westphal 
35041da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3505463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
35061da177e4SLinus Torvalds 		int max_ato = HZ / 2;
35071da177e4SLinus Torvalds 
3508056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3509056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
35101da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
35111da177e4SLinus Torvalds 
35121da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
35131da177e4SLinus Torvalds 
35141da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3515463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
35161da177e4SLinus Torvalds 		 * directly.
35171da177e4SLinus Torvalds 		 */
3518740b0f18SEric Dumazet 		if (tp->srtt_us) {
3519740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3520740b0f18SEric Dumazet 					TCP_DELACK_MIN);
35211da177e4SLinus Torvalds 
35221da177e4SLinus Torvalds 			if (rtt < max_ato)
35231da177e4SLinus Torvalds 				max_ato = rtt;
35241da177e4SLinus Torvalds 		}
35251da177e4SLinus Torvalds 
35261da177e4SLinus Torvalds 		ato = min(ato, max_ato);
35271da177e4SLinus Torvalds 	}
35281da177e4SLinus Torvalds 
35291da177e4SLinus Torvalds 	/* Stay within the limit we were given */
35301da177e4SLinus Torvalds 	timeout = jiffies + ato;
35311da177e4SLinus Torvalds 
35321da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3533463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
35341da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
35351da177e4SLinus Torvalds 		 * send ACK now.
35361da177e4SLinus Torvalds 		 */
3537463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3538463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
35391da177e4SLinus Torvalds 			tcp_send_ack(sk);
35401da177e4SLinus Torvalds 			return;
35411da177e4SLinus Torvalds 		}
35421da177e4SLinus Torvalds 
3543463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3544463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
35451da177e4SLinus Torvalds 	}
3546463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3547463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3548463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
35491da177e4SLinus Torvalds }
35501da177e4SLinus Torvalds 
35511da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
35521da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
35531da177e4SLinus Torvalds {
35541da177e4SLinus Torvalds 	struct sk_buff *buff;
35551da177e4SLinus Torvalds 
3556058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3557058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3558058dc334SIlpo Järvinen 		return;
3559058dc334SIlpo Järvinen 
35609890092eSFlorian Westphal 	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
35619890092eSFlorian Westphal 
35621da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
35631da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
35641da177e4SLinus Torvalds 	 * sock.
35651da177e4SLinus Torvalds 	 */
35667450aaf6SEric Dumazet 	buff = alloc_skb(MAX_TCP_HEADER,
35677450aaf6SEric Dumazet 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
35687450aaf6SEric Dumazet 	if (unlikely(!buff)) {
3569463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3570463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
35713f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
35723f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
35731da177e4SLinus Torvalds 		return;
35741da177e4SLinus Torvalds 	}
35751da177e4SLinus Torvalds 
35761da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
35771da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3578a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
35791da177e4SLinus Torvalds 
358098781965SEric Dumazet 	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
358198781965SEric Dumazet 	 * too much.
358298781965SEric Dumazet 	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
358398781965SEric Dumazet 	 */
358498781965SEric Dumazet 	skb_set_tcp_pure_ack(buff);
358598781965SEric Dumazet 
35861da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
35877450aaf6SEric Dumazet 	tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
35881da177e4SLinus Torvalds }
3589e3118e83SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_send_ack);
35901da177e4SLinus Torvalds 
35911da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
35921da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
35931da177e4SLinus Torvalds  *
35941da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
35951da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
35961da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
35971da177e4SLinus Torvalds  *
35981da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
35991da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
36001da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
36011da177e4SLinus Torvalds  */
3602e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
36031da177e4SLinus Torvalds {
36041da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36051da177e4SLinus Torvalds 	struct sk_buff *skb;
36061da177e4SLinus Torvalds 
36071da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
36087450aaf6SEric Dumazet 	skb = alloc_skb(MAX_TCP_HEADER,
36097450aaf6SEric Dumazet 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
361051456b29SIan Morris 	if (!skb)
36111da177e4SLinus Torvalds 		return -1;
36121da177e4SLinus Torvalds 
36131da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
36141da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
36151da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
36161da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
36171da177e4SLinus Torvalds 	 * send it.
36181da177e4SLinus Torvalds 	 */
3619a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3620e2e8009fSRenato Westphal 	NET_INC_STATS(sock_net(sk), mib);
36217450aaf6SEric Dumazet 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
36221da177e4SLinus Torvalds }
36231da177e4SLinus Torvalds 
3624385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */
3625ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3626ee995283SPavel Emelyanov {
3627ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3628ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
36299a568de4SEric Dumazet 		tcp_mstamp_refresh(tcp_sk(sk));
3630e520af48SEric Dumazet 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3631ee995283SPavel Emelyanov 	}
3632ee995283SPavel Emelyanov }
3633ee995283SPavel Emelyanov 
363467edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
3635e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib)
36361da177e4SLinus Torvalds {
36371da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
36381da177e4SLinus Torvalds 	struct sk_buff *skb;
36391da177e4SLinus Torvalds 
3640058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3641058dc334SIlpo Järvinen 		return -1;
3642058dc334SIlpo Järvinen 
364300db4124SIan Morris 	skb = tcp_send_head(sk);
364400db4124SIan Morris 	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
36451da177e4SLinus Torvalds 		int err;
36460c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
364790840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
36481da177e4SLinus Torvalds 
36491da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
36501da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
36511da177e4SLinus Torvalds 
36521da177e4SLinus Torvalds 		/* We are probing the opening of a window
36531da177e4SLinus Torvalds 		 * but the window size is != 0
36541da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
36551da177e4SLinus Torvalds 		 */
36561da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
36571da177e4SLinus Torvalds 		    skb->len > mss) {
36581da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
36594de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3660*75c119afSEric Dumazet 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
3661*75c119afSEric Dumazet 					 skb, seg_size, mss, GFP_ATOMIC))
36621da177e4SLinus Torvalds 				return -1;
36631da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
36645bbb432cSEric Dumazet 			tcp_set_skb_tso_segs(skb, mss);
36651da177e4SLinus Torvalds 
36664de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3667dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
366866f5fe62SIlpo Järvinen 		if (!err)
366966f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
36701da177e4SLinus Torvalds 		return err;
36711da177e4SLinus Torvalds 	} else {
367233f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3673e520af48SEric Dumazet 			tcp_xmit_probe_skb(sk, 1, mib);
3674e520af48SEric Dumazet 		return tcp_xmit_probe_skb(sk, 0, mib);
36751da177e4SLinus Torvalds 	}
36761da177e4SLinus Torvalds }
36771da177e4SLinus Torvalds 
36781da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
36791da177e4SLinus Torvalds  * a partial packet else a zero probe.
36801da177e4SLinus Torvalds  */
36811da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
36821da177e4SLinus Torvalds {
3683463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
36841da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3685c6214a97SNikolay Borisov 	struct net *net = sock_net(sk);
3686fcdd1cf4SEric Dumazet 	unsigned long probe_max;
36871da177e4SLinus Torvalds 	int err;
36881da177e4SLinus Torvalds 
3689e520af48SEric Dumazet 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
36901da177e4SLinus Torvalds 
3691*75c119afSEric Dumazet 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
36921da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
36936687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3694463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
36951da177e4SLinus Torvalds 		return;
36961da177e4SLinus Torvalds 	}
36971da177e4SLinus Torvalds 
36981da177e4SLinus Torvalds 	if (err <= 0) {
3699c6214a97SNikolay Borisov 		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3700463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
37016687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3702fcdd1cf4SEric Dumazet 		probe_max = TCP_RTO_MAX;
37031da177e4SLinus Torvalds 	} else {
37041da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
37056687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
37061da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
37071da177e4SLinus Torvalds 		 *
37081da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
37091da177e4SLinus Torvalds 		 */
37106687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
37116687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3712fcdd1cf4SEric Dumazet 		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
37131da177e4SLinus Torvalds 	}
3714fcdd1cf4SEric Dumazet 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
371521c8fe99SEric Dumazet 				  tcp_probe0_when(sk, probe_max),
3716fcdd1cf4SEric Dumazet 				  TCP_RTO_MAX);
37171da177e4SLinus Torvalds }
37185db92c99SOctavian Purdila 
3719ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
37205db92c99SOctavian Purdila {
37215db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
37225db92c99SOctavian Purdila 	struct flowi fl;
37235db92c99SOctavian Purdila 	int res;
37245db92c99SOctavian Purdila 
372558d607d3SEric Dumazet 	tcp_rsk(req)->txhash = net_tx_rndhash();
3726b3d05147SEric Dumazet 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
37275db92c99SOctavian Purdila 	if (!res) {
372890bbcc60SEric Dumazet 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
372902a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
37307e32b443SYuchung Cheng 		if (unlikely(tcp_passive_fastopen(sk)))
37317e32b443SYuchung Cheng 			tcp_sk(sk)->total_retrans++;
37325db92c99SOctavian Purdila 	}
37335db92c99SOctavian Purdila 	return res;
37345db92c99SOctavian Purdila }
37355db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3736