xref: /linux/net/ipv4/tcp_output.c (revision 4fab9071950c2021d846e18351e0f46a1cffd67b)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt
3891df42beSJoe Perches 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
425a0e3ad6STejun Heo #include <linux/gfp.h>
431da177e4SLinus Torvalds #include <linux/module.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
471da177e4SLinus Torvalds 
4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4915d99e02SRick Jones  * interpret the window field as a signed quantity.
5015d99e02SRick Jones  */
51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5215d99e02SRick Jones 
5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */
5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
5546d3ceabSEric Dumazet 
561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
571da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
581da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
591da177e4SLinus Torvalds  */
60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
611da177e4SLinus Torvalds 
62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
645d424d5aSJohn Heffner 
6535089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6735089bb2SDavid S. Miller 
68c9bee3b7SEric Dumazet unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
69c9bee3b7SEric Dumazet EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
70c9bee3b7SEric Dumazet 
7146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
7246d3ceabSEric Dumazet 			   int push_one, gfp_t gfp);
73519855c5SWilliam Allen Simpson 
7467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
75cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
766ff03ac3SIlpo Järvinen {
776ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
786ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7966f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
809e412ba7SIlpo Järvinen 
81fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
821da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
838512430eSIlpo Järvinen 
8466f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
856ba8a3b1SNandita Dukkipati 	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
866a5dc9e5SEric Dumazet 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
87750ea2baSYuchung Cheng 		tcp_rearm_rto(sk);
881da177e4SLinus Torvalds 	}
89f19c29e3SYuchung Cheng 
90f7324acdSDavid S. Miller 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
91f19c29e3SYuchung Cheng 		      tcp_skb_pcount(skb));
926a5dc9e5SEric Dumazet }
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
951da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
961da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
971da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
981da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
991da177e4SLinus Torvalds  */
100cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk)
1011da177e4SLinus Torvalds {
102cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1039e412ba7SIlpo Järvinen 
10490840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
1051da177e4SLinus Torvalds 		return tp->snd_nxt;
1061da177e4SLinus Torvalds 	else
10790840defSIlpo Järvinen 		return tcp_wnd_end(tp);
1081da177e4SLinus Torvalds }
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1111da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1121da177e4SLinus Torvalds  *
1131da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1141da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1151da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1161da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1171da177e4SLinus Torvalds  *    large MSS.
1181da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1191da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1201da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1211da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1221da177e4SLinus Torvalds  *    probably even Jumbo".
1231da177e4SLinus Torvalds  */
1241da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1251da177e4SLinus Torvalds {
1261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
127cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1281da177e4SLinus Torvalds 	int mss = tp->advmss;
1291da177e4SLinus Torvalds 
1300dbaee3bSDavid S. Miller 	if (dst) {
1310dbaee3bSDavid S. Miller 		unsigned int metric = dst_metric_advmss(dst);
1320dbaee3bSDavid S. Miller 
1330dbaee3bSDavid S. Miller 		if (metric < mss) {
1340dbaee3bSDavid S. Miller 			mss = metric;
1351da177e4SLinus Torvalds 			tp->advmss = mss;
1361da177e4SLinus Torvalds 		}
1370dbaee3bSDavid S. Miller 	}
1381da177e4SLinus Torvalds 
1391da177e4SLinus Torvalds 	return (__u16)mss;
1401da177e4SLinus Torvalds }
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1431da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
144cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
1451da177e4SLinus Torvalds {
146463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1471da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1481da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1491da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1501da177e4SLinus Torvalds 
1516687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1521da177e4SLinus Torvalds 
1536687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1541da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1551da177e4SLinus Torvalds 
156463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1571da177e4SLinus Torvalds 		cwnd >>= 1;
1581da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1591da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1601da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1611da177e4SLinus Torvalds }
1621da177e4SLinus Torvalds 
16367edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
16440efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
165cf533ea5SEric Dumazet 				struct sock *sk)
1661da177e4SLinus Torvalds {
167463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
168463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
169bcefe17cSCong Wang 	const struct dst_entry *dst = __sk_dst_get(sk);
1701da177e4SLinus Torvalds 
17135089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
17235089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
173463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds 	tp->lsndtime = now;
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1781da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1791da177e4SLinus Torvalds 	 */
180bcefe17cSCong Wang 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
181bcefe17cSCong Wang 	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
182463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
1831da177e4SLinus Torvalds }
1841da177e4SLinus Torvalds 
18567edfef7SAndi Kleen /* Account for an ACK we sent. */
18640efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1871da177e4SLinus Torvalds {
188463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
189463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1901da177e4SLinus Torvalds }
1911da177e4SLinus Torvalds 
19285f16525SYuchung Cheng 
19385f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss)
19485f16525SYuchung Cheng {
19585f16525SYuchung Cheng 	/* Initial receive window should be twice of TCP_INIT_CWND to
1969ef71e0cSWeiping Pan 	 * enable proper sending of new unsent data during fast recovery
19785f16525SYuchung Cheng 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
19885f16525SYuchung Cheng 	 * limit when mss is larger than 1460.
19985f16525SYuchung Cheng 	 */
20085f16525SYuchung Cheng 	u32 init_rwnd = TCP_INIT_CWND * 2;
20185f16525SYuchung Cheng 
20285f16525SYuchung Cheng 	if (mss > 1460)
20385f16525SYuchung Cheng 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
20485f16525SYuchung Cheng 	return init_rwnd;
20585f16525SYuchung Cheng }
20685f16525SYuchung Cheng 
2071da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
2081da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
2091da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
2101da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
2111da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
2121da177e4SLinus Torvalds  * This MUST be enforced by all callers.
2131da177e4SLinus Torvalds  */
2141da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
2151da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
21631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
21731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
2181da177e4SLinus Torvalds {
2191da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
2221da177e4SLinus Torvalds 	if (*window_clamp == 0)
2231da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
2241da177e4SLinus Torvalds 	space = min(*window_clamp, space);
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
2271da177e4SLinus Torvalds 	if (space > mss)
2281da177e4SLinus Torvalds 		space = (space / mss) * mss;
2291da177e4SLinus Torvalds 
2301da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
23115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
23215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
23315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
23415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
23515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
23615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2371da177e4SLinus Torvalds 	 */
23815d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2391da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
24015d99e02SRick Jones 	else
24115d99e02SRick Jones 		(*rcv_wnd) = space;
24215d99e02SRick Jones 
2431da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2441da177e4SLinus Torvalds 	if (wscale_ok) {
2451da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2461da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2471da177e4SLinus Torvalds 		 */
2481da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
249316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2501da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2511da177e4SLinus Torvalds 			space >>= 1;
2521da177e4SLinus Torvalds 			(*rcv_wscale)++;
2531da177e4SLinus Torvalds 		}
2541da177e4SLinus Torvalds 	}
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
25785f16525SYuchung Cheng 		if (!init_rcv_wnd) /* Use default unless specified otherwise */
25885f16525SYuchung Cheng 			init_rcv_wnd = tcp_default_init_rwnd(mss);
259b1afde60SNandita Dukkipati 		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
2601da177e4SLinus Torvalds 	}
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2631da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2641da177e4SLinus Torvalds }
2654bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window);
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2681da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2691da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2701da177e4SLinus Torvalds  * frame.
2711da177e4SLinus Torvalds  */
27240efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2731da177e4SLinus Torvalds {
2741da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2758e165e20SFlorian Westphal 	u32 old_win = tp->rcv_wnd;
2761da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2771da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds 	/* Never shrink the offered window */
2801da177e4SLinus Torvalds 	if (new_win < cur_win) {
2811da177e4SLinus Torvalds 		/* Danger Will Robinson!
2821da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2831da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2841da177e4SLinus Torvalds 		 * window in time.  --DaveM
2851da177e4SLinus Torvalds 		 *
2861da177e4SLinus Torvalds 		 * Relax Will Robinson.
2871da177e4SLinus Torvalds 		 */
2888e165e20SFlorian Westphal 		if (new_win == 0)
2898e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
2908e165e20SFlorian Westphal 				      LINUX_MIB_TCPWANTZEROWINDOWADV);
291607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2921da177e4SLinus Torvalds 	}
2931da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2941da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2971da177e4SLinus Torvalds 	 * scaled window.
2981da177e4SLinus Torvalds 	 */
29915d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
3001da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
3011da177e4SLinus Torvalds 	else
3021da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
3051da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
3061da177e4SLinus Torvalds 
3071da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
3088e165e20SFlorian Westphal 	if (new_win == 0) {
3091da177e4SLinus Torvalds 		tp->pred_flags = 0;
3108e165e20SFlorian Westphal 		if (old_win)
3118e165e20SFlorian Westphal 			NET_INC_STATS(sock_net(sk),
3128e165e20SFlorian Westphal 				      LINUX_MIB_TCPTOZEROWINDOWADV);
3138e165e20SFlorian Westphal 	} else if (old_win == 0) {
3148e165e20SFlorian Westphal 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
3158e165e20SFlorian Westphal 	}
3161da177e4SLinus Torvalds 
3171da177e4SLinus Torvalds 	return new_win;
3181da177e4SLinus Torvalds }
3191da177e4SLinus Torvalds 
32067edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
321cf533ea5SEric Dumazet static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
322bdf1ee5dSIlpo Järvinen {
3234de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
324bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
3254de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
326bdf1ee5dSIlpo Järvinen }
327bdf1ee5dSIlpo Järvinen 
32867edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
329bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
330bdf1ee5dSIlpo Järvinen {
331bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
332bdf1ee5dSIlpo Järvinen 
333bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
3345d134f1cSHannes Frederic Sowa 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
3354de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
336bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
337bdf1ee5dSIlpo Järvinen 	}
338bdf1ee5dSIlpo Järvinen }
339bdf1ee5dSIlpo Järvinen 
340bdf1ee5dSIlpo Järvinen static __inline__ void
341cf533ea5SEric Dumazet TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
342bdf1ee5dSIlpo Järvinen {
343bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
344bdf1ee5dSIlpo Järvinen 		th->ece = 1;
345bdf1ee5dSIlpo Järvinen }
346bdf1ee5dSIlpo Järvinen 
34767edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
34867edfef7SAndi Kleen  * be sent.
34967edfef7SAndi Kleen  */
350bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
351bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
352bdf1ee5dSIlpo Järvinen {
353bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
354bdf1ee5dSIlpo Järvinen 
355bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
356bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
357bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
358bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
359bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
360bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
361bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
362bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
363bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
364bdf1ee5dSIlpo Järvinen 			}
365bdf1ee5dSIlpo Järvinen 		} else {
366bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
367bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
368bdf1ee5dSIlpo Järvinen 		}
369bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
370bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
371bdf1ee5dSIlpo Järvinen 	}
372bdf1ee5dSIlpo Järvinen }
373bdf1ee5dSIlpo Järvinen 
374e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
375e870a8efSIlpo Järvinen  * auto increment end seqno.
376e870a8efSIlpo Järvinen  */
377e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
378e870a8efSIlpo Järvinen {
3797b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
3807b7fc97aSEric Dumazet 
3812e8e18efSDavid S. Miller 	skb->ip_summed = CHECKSUM_PARTIAL;
382e870a8efSIlpo Järvinen 	skb->csum = 0;
383e870a8efSIlpo Järvinen 
3844de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags;
385e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
386e870a8efSIlpo Järvinen 
3877b7fc97aSEric Dumazet 	shinfo->gso_segs = 1;
3887b7fc97aSEric Dumazet 	shinfo->gso_size = 0;
3897b7fc97aSEric Dumazet 	shinfo->gso_type = 0;
390e870a8efSIlpo Järvinen 
391e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
392a3433f35SChangli Gao 	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
393e870a8efSIlpo Järvinen 		seq++;
394e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
395e870a8efSIlpo Järvinen }
396e870a8efSIlpo Järvinen 
397a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp)
39833f5f57eSIlpo Järvinen {
39933f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
40033f5f57eSIlpo Järvinen }
40133f5f57eSIlpo Järvinen 
40233ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
40333ad798cSAdam Langley #define OPTION_TS		(1 << 1)
40433ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
40589e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
4062100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE	(1 << 8)
40733ad798cSAdam Langley 
40833ad798cSAdam Langley struct tcp_out_options {
4092100c8d2SYuchung Cheng 	u16 options;		/* bit field of OPTION_* */
4102100c8d2SYuchung Cheng 	u16 mss;		/* 0 to disable */
41133ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
41233ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
413bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
414bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
4152100c8d2SYuchung Cheng 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
4162100c8d2SYuchung Cheng 	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
41733ad798cSAdam Langley };
41833ad798cSAdam Langley 
41967edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
42067edfef7SAndi Kleen  *
42167edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
422fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
423fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
4248e3bff96Sstephen hemminger  * inter-operability perspective it seems that we're somewhat stuck with
425fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
426fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
427fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
428fd6149d3SIlpo Järvinen  *
429fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
430fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
431fd6149d3SIlpo Järvinen  */
43233ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
433bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
434bd0388aeSWilliam Allen Simpson {
4352100c8d2SYuchung Cheng 	u16 options = opts->options;	/* mungable copy */
436bd0388aeSWilliam Allen Simpson 
437bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
4381a2c6181SChristoph Paasch 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
4391a2c6181SChristoph Paasch 			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
440bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
441bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
44233ad798cSAdam Langley 		ptr += 4;
44333ad798cSAdam Langley 	}
44433ad798cSAdam Langley 
445fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
446fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
447fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
448fd6149d3SIlpo Järvinen 			       opts->mss);
449fd6149d3SIlpo Järvinen 	}
450fd6149d3SIlpo Järvinen 
451bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
452bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
45333ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
45433ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
45533ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
45633ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
457bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
45833ad798cSAdam Langley 		} else {
459496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
46040efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
46140efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
46240efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
46340efc6faSStephen Hemminger 		}
46433ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
46533ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
46633ad798cSAdam Langley 	}
46733ad798cSAdam Langley 
468bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
46933ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
47033ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
47133ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
47233ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
47333ad798cSAdam Langley 	}
47433ad798cSAdam Langley 
475bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
47633ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
47733ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
47833ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
47933ad798cSAdam Langley 			       opts->ws);
48033ad798cSAdam Langley 	}
48133ad798cSAdam Langley 
48233ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
48333ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
48433ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
48540efc6faSStephen Hemminger 		int this_sack;
48640efc6faSStephen Hemminger 
48740efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
48840efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
48940efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
49033ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
49140efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
4922de979bdSStephen Hemminger 
49333ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
49433ad798cSAdam Langley 		     ++this_sack) {
49540efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
49640efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
49740efc6faSStephen Hemminger 		}
4982de979bdSStephen Hemminger 
49940efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
50040efc6faSStephen Hemminger 	}
5012100c8d2SYuchung Cheng 
5022100c8d2SYuchung Cheng 	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
5032100c8d2SYuchung Cheng 		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
5042100c8d2SYuchung Cheng 
5052100c8d2SYuchung Cheng 		*ptr++ = htonl((TCPOPT_EXP << 24) |
5062100c8d2SYuchung Cheng 			       ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
5072100c8d2SYuchung Cheng 			       TCPOPT_FASTOPEN_MAGIC);
5082100c8d2SYuchung Cheng 
5092100c8d2SYuchung Cheng 		memcpy(ptr, foc->val, foc->len);
5102100c8d2SYuchung Cheng 		if ((foc->len & 3) == 2) {
5112100c8d2SYuchung Cheng 			u8 *align = ((u8 *)ptr) + foc->len;
5122100c8d2SYuchung Cheng 			align[0] = align[1] = TCPOPT_NOP;
5132100c8d2SYuchung Cheng 		}
5142100c8d2SYuchung Cheng 		ptr += (foc->len + 3) >> 2;
5152100c8d2SYuchung Cheng 	}
51640efc6faSStephen Hemminger }
51740efc6faSStephen Hemminger 
51867edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
51967edfef7SAndi Kleen  * network wire format yet.
52067edfef7SAndi Kleen  */
52195c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
52233ad798cSAdam Langley 				struct tcp_out_options *opts,
523cf533ea5SEric Dumazet 				struct tcp_md5sig_key **md5)
524cf533ea5SEric Dumazet {
52533ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
52695c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
527783237e8SYuchung Cheng 	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
52833ad798cSAdam Langley 
529cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
53033ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
53133ad798cSAdam Langley 	if (*md5) {
53233ad798cSAdam Langley 		opts->options |= OPTION_MD5;
533bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
534cfb6eeb4SYOSHIFUJI Hideaki 	}
53533ad798cSAdam Langley #else
53633ad798cSAdam Langley 	*md5 = NULL;
537cfb6eeb4SYOSHIFUJI Hideaki #endif
53833ad798cSAdam Langley 
53933ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
54033ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
54133ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
54233ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
54333ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
54433ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
54533ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
54633ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
54733ad798cSAdam Langley 	 * going out.  */
54833ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
549bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
55033ad798cSAdam Langley 
551bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
55233ad798cSAdam Langley 		opts->options |= OPTION_TS;
553ee684b6fSAndrey Vagin 		opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
55433ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
555bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
55633ad798cSAdam Langley 	}
557bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
55833ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
55989e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
560bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
56133ad798cSAdam Langley 	}
562bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
56333ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
564b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
565bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
56633ad798cSAdam Langley 	}
56733ad798cSAdam Langley 
568783237e8SYuchung Cheng 	if (fastopen && fastopen->cookie.len >= 0) {
569783237e8SYuchung Cheng 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
570783237e8SYuchung Cheng 		need = (need + 3) & ~3U;  /* Align to 32 bits */
571783237e8SYuchung Cheng 		if (remaining >= need) {
572783237e8SYuchung Cheng 			opts->options |= OPTION_FAST_OPEN_COOKIE;
573783237e8SYuchung Cheng 			opts->fastopen_cookie = &fastopen->cookie;
574783237e8SYuchung Cheng 			remaining -= need;
575783237e8SYuchung Cheng 			tp->syn_fastopen = 1;
576783237e8SYuchung Cheng 		}
577783237e8SYuchung Cheng 	}
578bd0388aeSWilliam Allen Simpson 
579bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
58033ad798cSAdam Langley }
58133ad798cSAdam Langley 
58267edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
58395c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk,
58433ad798cSAdam Langley 				   struct request_sock *req,
58595c96174SEric Dumazet 				   unsigned int mss, struct sk_buff *skb,
58633ad798cSAdam Langley 				   struct tcp_out_options *opts,
5874957faadSWilliam Allen Simpson 				   struct tcp_md5sig_key **md5,
5888336886fSJerry Chu 				   struct tcp_fastopen_cookie *foc)
5894957faadSWilliam Allen Simpson {
59033ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
59195c96174SEric Dumazet 	unsigned int remaining = MAX_TCP_OPTION_SPACE;
59233ad798cSAdam Langley 
59333ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
59433ad798cSAdam Langley 	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
59533ad798cSAdam Langley 	if (*md5) {
59633ad798cSAdam Langley 		opts->options |= OPTION_MD5;
5974957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
5984957faadSWilliam Allen Simpson 
5994957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6004957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6014957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6024957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6034957faadSWilliam Allen Simpson 		 */
604de213e5eSEric Dumazet 		ireq->tstamp_ok &= !ireq->sack_ok;
60533ad798cSAdam Langley 	}
60633ad798cSAdam Langley #else
60733ad798cSAdam Langley 	*md5 = NULL;
60833ad798cSAdam Langley #endif
60933ad798cSAdam Langley 
6104957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
61133ad798cSAdam Langley 	opts->mss = mss;
6124957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
61333ad798cSAdam Langley 
61433ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
61533ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
61689e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6174957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
61833ad798cSAdam Langley 	}
619de213e5eSEric Dumazet 	if (likely(ireq->tstamp_ok)) {
62033ad798cSAdam Langley 		opts->options |= OPTION_TS;
62133ad798cSAdam Langley 		opts->tsval = TCP_SKB_CB(skb)->when;
62233ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
6234957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
62433ad798cSAdam Langley 	}
62533ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
62633ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
627de213e5eSEric Dumazet 		if (unlikely(!ireq->tstamp_ok))
6284957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
62933ad798cSAdam Langley 	}
63089278c9dSYuchung Cheng 	if (foc != NULL && foc->len >= 0) {
6318336886fSJerry Chu 		u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
6328336886fSJerry Chu 		need = (need + 3) & ~3U;  /* Align to 32 bits */
6338336886fSJerry Chu 		if (remaining >= need) {
6348336886fSJerry Chu 			opts->options |= OPTION_FAST_OPEN_COOKIE;
6358336886fSJerry Chu 			opts->fastopen_cookie = foc;
6368336886fSJerry Chu 			remaining -= need;
6378336886fSJerry Chu 		}
6388336886fSJerry Chu 	}
6394957faadSWilliam Allen Simpson 
6404957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
64133ad798cSAdam Langley }
64233ad798cSAdam Langley 
64367edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
64467edfef7SAndi Kleen  * final wire format yet.
64567edfef7SAndi Kleen  */
64695c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
64733ad798cSAdam Langley 					struct tcp_out_options *opts,
648cf533ea5SEric Dumazet 					struct tcp_md5sig_key **md5)
649cf533ea5SEric Dumazet {
65033ad798cSAdam Langley 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
65133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
65295c96174SEric Dumazet 	unsigned int size = 0;
653cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
65433ad798cSAdam Langley 
6555843ef42SAndi Kleen 	opts->options = 0;
6565843ef42SAndi Kleen 
65733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
65833ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
65933ad798cSAdam Langley 	if (unlikely(*md5)) {
66033ad798cSAdam Langley 		opts->options |= OPTION_MD5;
66133ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
66233ad798cSAdam Langley 	}
66333ad798cSAdam Langley #else
66433ad798cSAdam Langley 	*md5 = NULL;
66533ad798cSAdam Langley #endif
66633ad798cSAdam Langley 
66733ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
66833ad798cSAdam Langley 		opts->options |= OPTION_TS;
669ee684b6fSAndrey Vagin 		opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
67033ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
67133ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
67233ad798cSAdam Langley 	}
67333ad798cSAdam Langley 
674cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
675cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
67695c96174SEric Dumazet 		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
67733ad798cSAdam Langley 		opts->num_sack_blocks =
67895c96174SEric Dumazet 			min_t(unsigned int, eff_sacks,
67933ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
68033ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
68133ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
68233ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
68333ad798cSAdam Langley 	}
68433ad798cSAdam Langley 
68533ad798cSAdam Langley 	return size;
68640efc6faSStephen Hemminger }
6871da177e4SLinus Torvalds 
68846d3ceabSEric Dumazet 
68946d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ)
69046d3ceabSEric Dumazet  *
69146d3ceabSEric Dumazet  * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
69246d3ceabSEric Dumazet  * to reduce RTT and bufferbloat.
69346d3ceabSEric Dumazet  * We do this using a special skb destructor (tcp_wfree).
69446d3ceabSEric Dumazet  *
69546d3ceabSEric Dumazet  * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
69646d3ceabSEric Dumazet  * needs to be reallocated in a driver.
6978e3bff96Sstephen hemminger  * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
69846d3ceabSEric Dumazet  *
69946d3ceabSEric Dumazet  * Since transmit from skb destructor is forbidden, we use a tasklet
70046d3ceabSEric Dumazet  * to process all sockets that eventually need to send more skbs.
70146d3ceabSEric Dumazet  * We use one tasklet per cpu, with its own queue of sockets.
70246d3ceabSEric Dumazet  */
70346d3ceabSEric Dumazet struct tsq_tasklet {
70446d3ceabSEric Dumazet 	struct tasklet_struct	tasklet;
70546d3ceabSEric Dumazet 	struct list_head	head; /* queue of tcp sockets */
70646d3ceabSEric Dumazet };
70746d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
70846d3ceabSEric Dumazet 
7096f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk)
7106f458dfbSEric Dumazet {
7116f458dfbSEric Dumazet 	if ((1 << sk->sk_state) &
7126f458dfbSEric Dumazet 	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
7136f458dfbSEric Dumazet 	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK))
714bf06200eSJohn Ogness 		tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
715bf06200eSJohn Ogness 			       0, GFP_ATOMIC);
7166f458dfbSEric Dumazet }
71746d3ceabSEric Dumazet /*
7188e3bff96Sstephen hemminger  * One tasklet per cpu tries to send more skbs.
71946d3ceabSEric Dumazet  * We run in tasklet context but need to disable irqs when
7208e3bff96Sstephen hemminger  * transferring tsq->head because tcp_wfree() might
72146d3ceabSEric Dumazet  * interrupt us (non NAPI drivers)
72246d3ceabSEric Dumazet  */
72346d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data)
72446d3ceabSEric Dumazet {
72546d3ceabSEric Dumazet 	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
72646d3ceabSEric Dumazet 	LIST_HEAD(list);
72746d3ceabSEric Dumazet 	unsigned long flags;
72846d3ceabSEric Dumazet 	struct list_head *q, *n;
72946d3ceabSEric Dumazet 	struct tcp_sock *tp;
73046d3ceabSEric Dumazet 	struct sock *sk;
73146d3ceabSEric Dumazet 
73246d3ceabSEric Dumazet 	local_irq_save(flags);
73346d3ceabSEric Dumazet 	list_splice_init(&tsq->head, &list);
73446d3ceabSEric Dumazet 	local_irq_restore(flags);
73546d3ceabSEric Dumazet 
73646d3ceabSEric Dumazet 	list_for_each_safe(q, n, &list) {
73746d3ceabSEric Dumazet 		tp = list_entry(q, struct tcp_sock, tsq_node);
73846d3ceabSEric Dumazet 		list_del(&tp->tsq_node);
73946d3ceabSEric Dumazet 
74046d3ceabSEric Dumazet 		sk = (struct sock *)tp;
74146d3ceabSEric Dumazet 		bh_lock_sock(sk);
74246d3ceabSEric Dumazet 
74346d3ceabSEric Dumazet 		if (!sock_owned_by_user(sk)) {
7446f458dfbSEric Dumazet 			tcp_tsq_handler(sk);
74546d3ceabSEric Dumazet 		} else {
74646d3ceabSEric Dumazet 			/* defer the work to tcp_release_cb() */
7476f458dfbSEric Dumazet 			set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
74846d3ceabSEric Dumazet 		}
74946d3ceabSEric Dumazet 		bh_unlock_sock(sk);
75046d3ceabSEric Dumazet 
75146d3ceabSEric Dumazet 		clear_bit(TSQ_QUEUED, &tp->tsq_flags);
75246d3ceabSEric Dumazet 		sk_free(sk);
75346d3ceabSEric Dumazet 	}
75446d3ceabSEric Dumazet }
75546d3ceabSEric Dumazet 
7566f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) |		\
7576f458dfbSEric Dumazet 			  (1UL << TCP_WRITE_TIMER_DEFERRED) |	\
758563d34d0SEric Dumazet 			  (1UL << TCP_DELACK_TIMER_DEFERRED) |	\
759563d34d0SEric Dumazet 			  (1UL << TCP_MTU_REDUCED_DEFERRED))
76046d3ceabSEric Dumazet /**
76146d3ceabSEric Dumazet  * tcp_release_cb - tcp release_sock() callback
76246d3ceabSEric Dumazet  * @sk: socket
76346d3ceabSEric Dumazet  *
76446d3ceabSEric Dumazet  * called from release_sock() to perform protocol dependent
76546d3ceabSEric Dumazet  * actions before socket release.
76646d3ceabSEric Dumazet  */
76746d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk)
76846d3ceabSEric Dumazet {
76946d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
7706f458dfbSEric Dumazet 	unsigned long flags, nflags;
77146d3ceabSEric Dumazet 
7726f458dfbSEric Dumazet 	/* perform an atomic operation only if at least one flag is set */
7736f458dfbSEric Dumazet 	do {
7746f458dfbSEric Dumazet 		flags = tp->tsq_flags;
7756f458dfbSEric Dumazet 		if (!(flags & TCP_DEFERRED_ALL))
7766f458dfbSEric Dumazet 			return;
7776f458dfbSEric Dumazet 		nflags = flags & ~TCP_DEFERRED_ALL;
7786f458dfbSEric Dumazet 	} while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
7796f458dfbSEric Dumazet 
7806f458dfbSEric Dumazet 	if (flags & (1UL << TCP_TSQ_DEFERRED))
7816f458dfbSEric Dumazet 		tcp_tsq_handler(sk);
7826f458dfbSEric Dumazet 
783c3f9b018SEric Dumazet 	/* Here begins the tricky part :
784c3f9b018SEric Dumazet 	 * We are called from release_sock() with :
785c3f9b018SEric Dumazet 	 * 1) BH disabled
786c3f9b018SEric Dumazet 	 * 2) sk_lock.slock spinlock held
787c3f9b018SEric Dumazet 	 * 3) socket owned by us (sk->sk_lock.owned == 1)
788c3f9b018SEric Dumazet 	 *
789c3f9b018SEric Dumazet 	 * But following code is meant to be called from BH handlers,
790c3f9b018SEric Dumazet 	 * so we should keep BH disabled, but early release socket ownership
791c3f9b018SEric Dumazet 	 */
792c3f9b018SEric Dumazet 	sock_release_ownership(sk);
793c3f9b018SEric Dumazet 
794144d56e9SEric Dumazet 	if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
7956f458dfbSEric Dumazet 		tcp_write_timer_handler(sk);
796144d56e9SEric Dumazet 		__sock_put(sk);
797144d56e9SEric Dumazet 	}
798144d56e9SEric Dumazet 	if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
7996f458dfbSEric Dumazet 		tcp_delack_timer_handler(sk);
800144d56e9SEric Dumazet 		__sock_put(sk);
801144d56e9SEric Dumazet 	}
802144d56e9SEric Dumazet 	if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
803*4fab9071SNeal Cardwell 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
804144d56e9SEric Dumazet 		__sock_put(sk);
805144d56e9SEric Dumazet 	}
80646d3ceabSEric Dumazet }
80746d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb);
80846d3ceabSEric Dumazet 
80946d3ceabSEric Dumazet void __init tcp_tasklet_init(void)
81046d3ceabSEric Dumazet {
81146d3ceabSEric Dumazet 	int i;
81246d3ceabSEric Dumazet 
81346d3ceabSEric Dumazet 	for_each_possible_cpu(i) {
81446d3ceabSEric Dumazet 		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
81546d3ceabSEric Dumazet 
81646d3ceabSEric Dumazet 		INIT_LIST_HEAD(&tsq->head);
81746d3ceabSEric Dumazet 		tasklet_init(&tsq->tasklet,
81846d3ceabSEric Dumazet 			     tcp_tasklet_func,
81946d3ceabSEric Dumazet 			     (unsigned long)tsq);
82046d3ceabSEric Dumazet 	}
82146d3ceabSEric Dumazet }
82246d3ceabSEric Dumazet 
82346d3ceabSEric Dumazet /*
82446d3ceabSEric Dumazet  * Write buffer destructor automatically called from kfree_skb.
8258e3bff96Sstephen hemminger  * We can't xmit new skbs from this context, as we might already
82646d3ceabSEric Dumazet  * hold qdisc lock.
82746d3ceabSEric Dumazet  */
828d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb)
82946d3ceabSEric Dumazet {
83046d3ceabSEric Dumazet 	struct sock *sk = skb->sk;
83146d3ceabSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
83246d3ceabSEric Dumazet 
83346d3ceabSEric Dumazet 	if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
83446d3ceabSEric Dumazet 	    !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
83546d3ceabSEric Dumazet 		unsigned long flags;
83646d3ceabSEric Dumazet 		struct tsq_tasklet *tsq;
83746d3ceabSEric Dumazet 
83846d3ceabSEric Dumazet 		/* Keep a ref on socket.
83946d3ceabSEric Dumazet 		 * This last ref will be released in tcp_tasklet_func()
84046d3ceabSEric Dumazet 		 */
84146d3ceabSEric Dumazet 		atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
84246d3ceabSEric Dumazet 
84346d3ceabSEric Dumazet 		/* queue this socket to tasklet queue */
84446d3ceabSEric Dumazet 		local_irq_save(flags);
84546d3ceabSEric Dumazet 		tsq = &__get_cpu_var(tsq_tasklet);
84646d3ceabSEric Dumazet 		list_add(&tp->tsq_node, &tsq->head);
84746d3ceabSEric Dumazet 		tasklet_schedule(&tsq->tasklet);
84846d3ceabSEric Dumazet 		local_irq_restore(flags);
84946d3ceabSEric Dumazet 	} else {
85046d3ceabSEric Dumazet 		sock_wfree(skb);
85146d3ceabSEric Dumazet 	}
85246d3ceabSEric Dumazet }
85346d3ceabSEric Dumazet 
8541da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
8551da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
8561da177e4SLinus Torvalds  * transmission and possible later retransmissions.
8571da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
8581da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
8591da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
8601da177e4SLinus Torvalds  * device.
8611da177e4SLinus Torvalds  *
8621da177e4SLinus Torvalds  * We are working here with either a clone of the original
8631da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
8641da177e4SLinus Torvalds  */
865056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
866056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
8671da177e4SLinus Torvalds {
8686687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
869dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
870dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
871dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
87233ad798cSAdam Langley 	struct tcp_out_options opts;
87395c96174SEric Dumazet 	unsigned int tcp_options_size, tcp_header_size;
874cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
8751da177e4SLinus Torvalds 	struct tcphdr *th;
8761da177e4SLinus Torvalds 	int err;
8771da177e4SLinus Torvalds 
878dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
879dfb4b9dcSDavid S. Miller 
880ccdbb6e9SEric Dumazet 	if (clone_it) {
881740b0f18SEric Dumazet 		skb_mstamp_get(&skb->skb_mstamp);
882dfb4b9dcSDavid S. Miller 
883dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
884dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
885dfb4b9dcSDavid S. Miller 		else
886dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
887dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
888dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
88921962692SEric Dumazet 		/* Our usage of tstamp should remain private */
89021962692SEric Dumazet 		skb->tstamp.tv64 = 0;
891dfb4b9dcSDavid S. Miller 	}
892dfb4b9dcSDavid S. Miller 
893dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
894dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
895dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
89633ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
8971da177e4SLinus Torvalds 
8984de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
89933ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
90033ad798cSAdam Langley 	else
90133ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
90233ad798cSAdam Langley 							   &md5);
90333ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
9041da177e4SLinus Torvalds 
905547669d4SEric Dumazet 	if (tcp_packets_in_flight(tp) == 0)
9066687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
907547669d4SEric Dumazet 
908547669d4SEric Dumazet 	/* if no packet is in qdisc/device queue, then allow XPS to select
909547669d4SEric Dumazet 	 * another queue.
910547669d4SEric Dumazet 	 */
911547669d4SEric Dumazet 	skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
9121da177e4SLinus Torvalds 
913aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
914aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
91546d3ceabSEric Dumazet 
91646d3ceabSEric Dumazet 	skb_orphan(skb);
91746d3ceabSEric Dumazet 	skb->sk = sk;
918c9eeec26SEric Dumazet 	skb->destructor = tcp_wfree;
919b73c3d0eSTom Herbert 	skb_set_hash_from_sk(skb, sk);
92046d3ceabSEric Dumazet 	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
9211da177e4SLinus Torvalds 
9221da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
923aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
924c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
925c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
9261da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
9271da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
928df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
9294de075e0SEric Dumazet 					tcb->tcp_flags);
930dfb4b9dcSDavid S. Miller 
9314de075e0SEric Dumazet 	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
9321da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
9331da177e4SLinus Torvalds 		 * is never scaled.
9341da177e4SLinus Torvalds 		 */
935600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
9361da177e4SLinus Torvalds 	} else {
9371da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
9381da177e4SLinus Torvalds 	}
9391da177e4SLinus Torvalds 	th->check		= 0;
9401da177e4SLinus Torvalds 	th->urg_ptr		= 0;
9411da177e4SLinus Torvalds 
94233f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
9437691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
9447691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
9451da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
9461da177e4SLinus Torvalds 			th->urg = 1;
9477691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
9480eae88f3SEric Dumazet 			th->urg_ptr = htons(0xFFFF);
9497691367dSHerbert Xu 			th->urg = 1;
9507691367dSHerbert Xu 		}
9511da177e4SLinus Torvalds 	}
9521da177e4SLinus Torvalds 
953bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
9544de075e0SEric Dumazet 	if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
9559e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
956dfb4b9dcSDavid S. Miller 
957cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
958cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
959cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
960a465419bSEric Dumazet 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
961bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
96249a72dfbSAdam Langley 					       md5, sk, NULL, skb);
963cfb6eeb4SYOSHIFUJI Hideaki 	}
964cfb6eeb4SYOSHIFUJI Hideaki #endif
965cfb6eeb4SYOSHIFUJI Hideaki 
966bb296246SHerbert Xu 	icsk->icsk_af_ops->send_check(sk, skb);
9671da177e4SLinus Torvalds 
9684de075e0SEric Dumazet 	if (likely(tcb->tcp_flags & TCPHDR_ACK))
969fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
9701da177e4SLinus Torvalds 
9711da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
972cf533ea5SEric Dumazet 		tcp_event_data_sent(tp, sk);
9731da177e4SLinus Torvalds 
974bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
975aa2ea058STom Herbert 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
976aa2ea058STom Herbert 			      tcp_skb_pcount(skb));
9771da177e4SLinus Torvalds 
978b0270e91SEric Dumazet 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
97983de47cdSHua Zhong 	if (likely(err <= 0))
9801da177e4SLinus Torvalds 		return err;
9811da177e4SLinus Torvalds 
9825ee2c941SChristoph Paasch 	tcp_enter_cwr(sk);
9831da177e4SLinus Torvalds 
984b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
9851da177e4SLinus Torvalds }
9861da177e4SLinus Torvalds 
98767edfef7SAndi Kleen /* This routine just queues the buffer for sending.
9881da177e4SLinus Torvalds  *
9891da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
9901da177e4SLinus Torvalds  * otherwise socket can stall.
9911da177e4SLinus Torvalds  */
9921da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
9931da177e4SLinus Torvalds {
9941da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9951da177e4SLinus Torvalds 
9961da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
9971da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
9981da177e4SLinus Torvalds 	skb_header_release(skb);
999fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
10003ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
10013ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
10021da177e4SLinus Torvalds }
10031da177e4SLinus Torvalds 
100467edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
1005cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1006056834d9SIlpo Järvinen 				 unsigned int mss_now)
1007f6302d1dSDavid S. Miller {
10087b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
10097b7fc97aSEric Dumazet 
1010c52e2421SEric Dumazet 	/* Make sure we own this skb before messing gso_size/gso_segs */
1011c52e2421SEric Dumazet 	WARN_ON_ONCE(skb_cloned(skb));
1012c52e2421SEric Dumazet 
10138f26fb1cSEric Dumazet 	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1014f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
1015f6302d1dSDavid S. Miller 		 * non-TSO case.
1016f6302d1dSDavid S. Miller 		 */
10177b7fc97aSEric Dumazet 		shinfo->gso_segs = 1;
10187b7fc97aSEric Dumazet 		shinfo->gso_size = 0;
10197b7fc97aSEric Dumazet 		shinfo->gso_type = 0;
1020f6302d1dSDavid S. Miller 	} else {
10217b7fc97aSEric Dumazet 		shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
10227b7fc97aSEric Dumazet 		shinfo->gso_size = mss_now;
10237b7fc97aSEric Dumazet 		shinfo->gso_type = sk->sk_gso_type;
10241da177e4SLinus Torvalds 	}
10251da177e4SLinus Torvalds }
10261da177e4SLinus Torvalds 
102791fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
102868f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
102991fed7a1SIlpo Järvinen  */
1030cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
103191fed7a1SIlpo Järvinen 				   int decr)
103291fed7a1SIlpo Järvinen {
1033a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1034a47e5a98SIlpo Järvinen 
1035dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
103691fed7a1SIlpo Järvinen 		return;
103791fed7a1SIlpo Järvinen 
10386859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
103991fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
104091fed7a1SIlpo Järvinen }
104191fed7a1SIlpo Järvinen 
1042797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
1043797108d1SIlpo Järvinen  * tweaks to fix counters
1044797108d1SIlpo Järvinen  */
1045cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1046797108d1SIlpo Järvinen {
1047797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1048797108d1SIlpo Järvinen 
1049797108d1SIlpo Järvinen 	tp->packets_out -= decr;
1050797108d1SIlpo Järvinen 
1051797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1052797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
1053797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1054797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
1055797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1056797108d1SIlpo Järvinen 		tp->lost_out -= decr;
1057797108d1SIlpo Järvinen 
1058797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
1059797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
1060797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1061797108d1SIlpo Järvinen 
1062797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
1063797108d1SIlpo Järvinen 
1064797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
1065797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
106652cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1067797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
1068797108d1SIlpo Järvinen 
1069797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
1070797108d1SIlpo Järvinen }
1071797108d1SIlpo Järvinen 
1072490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1073490cc7d0SWillem de Bruijn {
1074490cc7d0SWillem de Bruijn 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1075490cc7d0SWillem de Bruijn 
1076490cc7d0SWillem de Bruijn 	if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
1077490cc7d0SWillem de Bruijn 	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1078490cc7d0SWillem de Bruijn 		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1079490cc7d0SWillem de Bruijn 		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1080490cc7d0SWillem de Bruijn 
1081490cc7d0SWillem de Bruijn 		shinfo->tx_flags &= ~tsflags;
1082490cc7d0SWillem de Bruijn 		shinfo2->tx_flags |= tsflags;
1083490cc7d0SWillem de Bruijn 		swap(shinfo->tskey, shinfo2->tskey);
1084490cc7d0SWillem de Bruijn 	}
1085490cc7d0SWillem de Bruijn }
1086490cc7d0SWillem de Bruijn 
10871da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
10881da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
10891da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
10901da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
10911da177e4SLinus Torvalds  */
1092056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
10936cc55e09SOctavian Purdila 		 unsigned int mss_now, gfp_t gfp)
10941da177e4SLinus Torvalds {
10951da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10961da177e4SLinus Torvalds 	struct sk_buff *buff;
10976475be16SDavid S. Miller 	int nsize, old_factor;
1098b60b49eaSHerbert Xu 	int nlen;
10999ce01461SIlpo Järvinen 	u8 flags;
11001da177e4SLinus Torvalds 
11012fceec13SIlpo Järvinen 	if (WARN_ON(len > skb->len))
11022fceec13SIlpo Järvinen 		return -EINVAL;
11036a438bbeSStephen Hemminger 
11041da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
11051da177e4SLinus Torvalds 	if (nsize < 0)
11061da177e4SLinus Torvalds 		nsize = 0;
11071da177e4SLinus Torvalds 
11086cc55e09SOctavian Purdila 	if (skb_unclone(skb, gfp))
11091da177e4SLinus Torvalds 		return -ENOMEM;
11101da177e4SLinus Torvalds 
11111da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
11126cc55e09SOctavian Purdila 	buff = sk_stream_alloc_skb(sk, nsize, gfp);
11131da177e4SLinus Torvalds 	if (buff == NULL)
11141da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1115ef5cb973SHerbert Xu 
11163ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
11173ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1118b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1119b60b49eaSHerbert Xu 	buff->truesize += nlen;
1120b60b49eaSHerbert Xu 	skb->truesize -= nlen;
11211da177e4SLinus Torvalds 
11221da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
11231da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
11241da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
11251da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
11261da177e4SLinus Torvalds 
11271da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
11284de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
11294de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
11304de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1131e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
11321da177e4SLinus Torvalds 
113384fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
11341da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1135056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1136056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
11371da177e4SLinus Torvalds 						       nsize, 0);
11381da177e4SLinus Torvalds 
11391da177e4SLinus Torvalds 		skb_trim(skb, len);
11401da177e4SLinus Torvalds 
11411da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
11421da177e4SLinus Torvalds 	} else {
114384fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
11441da177e4SLinus Torvalds 		skb_split(skb, buff, len);
11451da177e4SLinus Torvalds 	}
11461da177e4SLinus Torvalds 
11471da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
11481da177e4SLinus Torvalds 
11491da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
11501da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
11511da177e4SLinus Torvalds 	 */
11521da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1153a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
1154490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
11551da177e4SLinus Torvalds 
11566475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
11576475be16SDavid S. Miller 
11581da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
1159846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1160846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
11611da177e4SLinus Torvalds 
11626475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
11636475be16SDavid S. Miller 	 * adjust the various packet counters.
11646475be16SDavid S. Miller 	 */
1165cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
11666475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
11676475be16SDavid S. Miller 			tcp_skb_pcount(buff);
11681da177e4SLinus Torvalds 
1169797108d1SIlpo Järvinen 		if (diff)
1170797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
11711da177e4SLinus Torvalds 	}
11721da177e4SLinus Torvalds 
11731da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1174f44b5271SDavid S. Miller 	skb_header_release(buff);
1175fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
11761da177e4SLinus Torvalds 
11771da177e4SLinus Torvalds 	return 0;
11781da177e4SLinus Torvalds }
11791da177e4SLinus Torvalds 
11801da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
11811da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
11821da177e4SLinus Torvalds  * immediately discarded.
11831da177e4SLinus Torvalds  */
1184f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
11851da177e4SLinus Torvalds {
11867b7fc97aSEric Dumazet 	struct skb_shared_info *shinfo;
11871da177e4SLinus Torvalds 	int i, k, eat;
11881da177e4SLinus Torvalds 
11894fa48bf3SEric Dumazet 	eat = min_t(int, len, skb_headlen(skb));
11904fa48bf3SEric Dumazet 	if (eat) {
11914fa48bf3SEric Dumazet 		__skb_pull(skb, eat);
11924fa48bf3SEric Dumazet 		len -= eat;
11934fa48bf3SEric Dumazet 		if (!len)
11944fa48bf3SEric Dumazet 			return;
11954fa48bf3SEric Dumazet 	}
11961da177e4SLinus Torvalds 	eat = len;
11971da177e4SLinus Torvalds 	k = 0;
11987b7fc97aSEric Dumazet 	shinfo = skb_shinfo(skb);
11997b7fc97aSEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++) {
12007b7fc97aSEric Dumazet 		int size = skb_frag_size(&shinfo->frags[i]);
12019e903e08SEric Dumazet 
12029e903e08SEric Dumazet 		if (size <= eat) {
1203aff65da0SIan Campbell 			skb_frag_unref(skb, i);
12049e903e08SEric Dumazet 			eat -= size;
12051da177e4SLinus Torvalds 		} else {
12067b7fc97aSEric Dumazet 			shinfo->frags[k] = shinfo->frags[i];
12071da177e4SLinus Torvalds 			if (eat) {
12087b7fc97aSEric Dumazet 				shinfo->frags[k].page_offset += eat;
12097b7fc97aSEric Dumazet 				skb_frag_size_sub(&shinfo->frags[k], eat);
12101da177e4SLinus Torvalds 				eat = 0;
12111da177e4SLinus Torvalds 			}
12121da177e4SLinus Torvalds 			k++;
12131da177e4SLinus Torvalds 		}
12141da177e4SLinus Torvalds 	}
12157b7fc97aSEric Dumazet 	shinfo->nr_frags = k;
12161da177e4SLinus Torvalds 
121727a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
12181da177e4SLinus Torvalds 	skb->data_len -= len;
12191da177e4SLinus Torvalds 	skb->len = skb->data_len;
12201da177e4SLinus Torvalds }
12211da177e4SLinus Torvalds 
122267edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
12231da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
12241da177e4SLinus Torvalds {
122514bbd6a5SPravin B Shelar 	if (skb_unclone(skb, GFP_ATOMIC))
12261da177e4SLinus Torvalds 		return -ENOMEM;
12271da177e4SLinus Torvalds 
12284fa48bf3SEric Dumazet 	__pskb_trim_head(skb, len);
12291da177e4SLinus Torvalds 
12301da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
123184fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
12321da177e4SLinus Torvalds 
12331da177e4SLinus Torvalds 	skb->truesize	     -= len;
12341da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
12353ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
12361da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
12371da177e4SLinus Torvalds 
12385b35e1e6SNeal Cardwell 	/* Any change of skb->len requires recalculation of tso factor. */
12391da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
12405b35e1e6SNeal Cardwell 		tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
12411da177e4SLinus Torvalds 
12421da177e4SLinus Torvalds 	return 0;
12431da177e4SLinus Torvalds }
12441da177e4SLinus Torvalds 
12451b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options.  */
12461b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
12475d424d5aSJohn Heffner {
1248cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1249cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12505d424d5aSJohn Heffner 	int mss_now;
12515d424d5aSJohn Heffner 
12525d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
12535d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
12545d424d5aSJohn Heffner 	 */
12555d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
12565d424d5aSJohn Heffner 
125767469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
125867469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
125967469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
126067469601SEric Dumazet 
126167469601SEric Dumazet 		if (dst && dst_allfrag(dst))
126267469601SEric Dumazet 			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
126367469601SEric Dumazet 	}
126467469601SEric Dumazet 
12655d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
12665d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
12675d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
12685d424d5aSJohn Heffner 
12695d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
12705d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
12715d424d5aSJohn Heffner 
12725d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
12735d424d5aSJohn Heffner 	if (mss_now < 48)
12745d424d5aSJohn Heffner 		mss_now = 48;
12755d424d5aSJohn Heffner 	return mss_now;
12765d424d5aSJohn Heffner }
12775d424d5aSJohn Heffner 
12781b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here.  */
12791b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu)
12801b63edd6SYuchung Cheng {
12811b63edd6SYuchung Cheng 	/* Subtract TCP options size, not including SACKs */
12821b63edd6SYuchung Cheng 	return __tcp_mtu_to_mss(sk, pmtu) -
12831b63edd6SYuchung Cheng 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
12841b63edd6SYuchung Cheng }
12851b63edd6SYuchung Cheng 
12865d424d5aSJohn Heffner /* Inverse of above */
128767469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss)
12885d424d5aSJohn Heffner {
1289cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1290cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
12915d424d5aSJohn Heffner 	int mtu;
12925d424d5aSJohn Heffner 
12935d424d5aSJohn Heffner 	mtu = mss +
12945d424d5aSJohn Heffner 	      tp->tcp_header_len +
12955d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
12965d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
12975d424d5aSJohn Heffner 
129867469601SEric Dumazet 	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
129967469601SEric Dumazet 	if (icsk->icsk_af_ops->net_frag_header_len) {
130067469601SEric Dumazet 		const struct dst_entry *dst = __sk_dst_get(sk);
130167469601SEric Dumazet 
130267469601SEric Dumazet 		if (dst && dst_allfrag(dst))
130367469601SEric Dumazet 			mtu += icsk->icsk_af_ops->net_frag_header_len;
130467469601SEric Dumazet 	}
13055d424d5aSJohn Heffner 	return mtu;
13065d424d5aSJohn Heffner }
13075d424d5aSJohn Heffner 
130867edfef7SAndi Kleen /* MTU probing init per socket */
13095d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
13105d424d5aSJohn Heffner {
13115d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
13125d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
13135d424d5aSJohn Heffner 
13145d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
13155d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
13165d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
13175d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
13185d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
13195d424d5aSJohn Heffner }
13204bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init);
13215d424d5aSJohn Heffner 
13221da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
13231da177e4SLinus Torvalds 
13241da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
13251da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
13261da177e4SLinus Torvalds 
13271da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1328caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
13291da177e4SLinus Torvalds    It also does not include TCP options.
13301da177e4SLinus Torvalds 
1331d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
13321da177e4SLinus Torvalds 
13331da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
13341da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
13351da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
13361da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
13371da177e4SLinus Torvalds 
13381da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
13391da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
13401da177e4SLinus Torvalds 
1341d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1342d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
13431da177e4SLinus Torvalds  */
13441da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
13451da177e4SLinus Torvalds {
13461da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1347d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
13485d424d5aSJohn Heffner 	int mss_now;
13491da177e4SLinus Torvalds 
13505d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
13515d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
13521da177e4SLinus Torvalds 
13535d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1354409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 	/* And store cached results */
1357d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
13585d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
13595d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1360c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
13611da177e4SLinus Torvalds 
13621da177e4SLinus Torvalds 	return mss_now;
13631da177e4SLinus Torvalds }
13644bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss);
13651da177e4SLinus Torvalds 
13661da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
13671da177e4SLinus Torvalds  * and even PMTU discovery events into account.
13681da177e4SLinus Torvalds  */
13690c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
13701da177e4SLinus Torvalds {
1371cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1372cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
1373c1b4a7e6SDavid S. Miller 	u32 mss_now;
137495c96174SEric Dumazet 	unsigned int header_len;
137533ad798cSAdam Langley 	struct tcp_out_options opts;
137633ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
13771da177e4SLinus Torvalds 
1378c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1379c1b4a7e6SDavid S. Miller 
13801da177e4SLinus Torvalds 	if (dst) {
13811da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1382d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
13831da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
13841da177e4SLinus Torvalds 	}
13851da177e4SLinus Torvalds 
138633ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
138733ad798cSAdam Langley 		     sizeof(struct tcphdr);
138833ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
138933ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
139033ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
139133ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
139233ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
139333ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
139433ad798cSAdam Langley 		mss_now -= delta;
139533ad798cSAdam Langley 	}
1396cfb6eeb4SYOSHIFUJI Hideaki 
13971da177e4SLinus Torvalds 	return mss_now;
13981da177e4SLinus Torvalds }
13991da177e4SLinus Torvalds 
140086fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
140186fd14adSWeiping Pan  * As additional protections, we do not touch cwnd in retransmission phases,
140286fd14adSWeiping Pan  * and if application hit its sndbuf limit recently.
140386fd14adSWeiping Pan  */
140486fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk)
1405a762a980SDavid S. Miller {
14069e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1407a762a980SDavid S. Miller 
140886fd14adSWeiping Pan 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
140986fd14adSWeiping Pan 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
141086fd14adSWeiping Pan 		/* Limited by application or receiver window. */
141186fd14adSWeiping Pan 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
141286fd14adSWeiping Pan 		u32 win_used = max(tp->snd_cwnd_used, init_win);
141386fd14adSWeiping Pan 		if (win_used < tp->snd_cwnd) {
141486fd14adSWeiping Pan 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
141586fd14adSWeiping Pan 			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
141686fd14adSWeiping Pan 		}
141786fd14adSWeiping Pan 		tp->snd_cwnd_used = 0;
141886fd14adSWeiping Pan 	}
141986fd14adSWeiping Pan 	tp->snd_cwnd_stamp = tcp_time_stamp;
142086fd14adSWeiping Pan }
142186fd14adSWeiping Pan 
1422ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1423a762a980SDavid S. Miller {
1424a762a980SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1425a762a980SDavid S. Miller 
1426ca8a2263SNeal Cardwell 	/* Track the maximum number of outstanding packets in each
1427ca8a2263SNeal Cardwell 	 * window, and remember whether we were cwnd-limited then.
1428ca8a2263SNeal Cardwell 	 */
1429ca8a2263SNeal Cardwell 	if (!before(tp->snd_una, tp->max_packets_seq) ||
1430ca8a2263SNeal Cardwell 	    tp->packets_out > tp->max_packets_out) {
1431ca8a2263SNeal Cardwell 		tp->max_packets_out = tp->packets_out;
1432ca8a2263SNeal Cardwell 		tp->max_packets_seq = tp->snd_nxt;
1433ca8a2263SNeal Cardwell 		tp->is_cwnd_limited = is_cwnd_limited;
1434ca8a2263SNeal Cardwell 	}
1435e114a710SEric Dumazet 
143624901551SEric Dumazet 	if (tcp_is_cwnd_limited(sk)) {
1437a762a980SDavid S. Miller 		/* Network is feed fully. */
1438a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1439a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1440a762a980SDavid S. Miller 	} else {
1441a762a980SDavid S. Miller 		/* Network starves. */
1442a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1443a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1444a762a980SDavid S. Miller 
144515d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
144615d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1447a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1448a762a980SDavid S. Miller 	}
1449a762a980SDavid S. Miller }
1450a762a980SDavid S. Miller 
1451d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */
1452d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp)
1453d4589926SEric Dumazet {
1454d4589926SEric Dumazet 	return after(tp->snd_sml, tp->snd_una) &&
1455d4589926SEric Dumazet 		!after(tp->snd_sml, tp->snd_nxt);
1456d4589926SEric Dumazet }
1457d4589926SEric Dumazet 
1458d4589926SEric Dumazet /* Update snd_sml if this skb is under mss
1459d4589926SEric Dumazet  * Note that a TSO packet might end with a sub-mss segment
1460d4589926SEric Dumazet  * The test is really :
1461d4589926SEric Dumazet  * if ((skb->len % mss) != 0)
1462d4589926SEric Dumazet  *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1463d4589926SEric Dumazet  * But we can avoid doing the divide again given we already have
1464d4589926SEric Dumazet  *  skb_pcount = skb->len / mss_now
14650e3a4803SIlpo Järvinen  */
1466d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1467d4589926SEric Dumazet 				const struct sk_buff *skb)
1468d4589926SEric Dumazet {
1469d4589926SEric Dumazet 	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1470d4589926SEric Dumazet 		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1471d4589926SEric Dumazet }
1472d4589926SEric Dumazet 
1473d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules:
1474d4589926SEric Dumazet  * 1. It is full sized. (provided by caller in %partial bool)
1475d4589926SEric Dumazet  * 2. Or it contains FIN. (already checked by caller)
1476d4589926SEric Dumazet  * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1477d4589926SEric Dumazet  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1478d4589926SEric Dumazet  *    With Minshall's modification: all sent small packets are ACKed.
1479d4589926SEric Dumazet  */
1480d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1481cc93fc51SPeter Pan(潘卫平) 			    int nonagle)
1482d4589926SEric Dumazet {
1483d4589926SEric Dumazet 	return partial &&
1484d4589926SEric Dumazet 		((nonagle & TCP_NAGLE_CORK) ||
1485d4589926SEric Dumazet 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1486d4589926SEric Dumazet }
1487d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */
1488d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk,
1489d4589926SEric Dumazet 					const struct sk_buff *skb,
1490d4589926SEric Dumazet 					unsigned int mss_now,
1491d4589926SEric Dumazet 					unsigned int max_segs,
1492d4589926SEric Dumazet 					int nonagle)
1493c1b4a7e6SDavid S. Miller {
1494cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1495d4589926SEric Dumazet 	u32 partial, needed, window, max_len;
1496c1b4a7e6SDavid S. Miller 
149790840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
14981485348dSBen Hutchings 	max_len = mss_now * max_segs;
14990e3a4803SIlpo Järvinen 
15001485348dSBen Hutchings 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
15011485348dSBen Hutchings 		return max_len;
15020e3a4803SIlpo Järvinen 
15035ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
15045ea3a748SIlpo Järvinen 
15051485348dSBen Hutchings 	if (max_len <= needed)
15061485348dSBen Hutchings 		return max_len;
15070e3a4803SIlpo Järvinen 
1508d4589926SEric Dumazet 	partial = needed % mss_now;
1509d4589926SEric Dumazet 	/* If last segment is not a full MSS, check if Nagle rules allow us
1510d4589926SEric Dumazet 	 * to include this last segment in this skb.
1511d4589926SEric Dumazet 	 * Otherwise, we'll split the skb at last MSS boundary
1512d4589926SEric Dumazet 	 */
1513cc93fc51SPeter Pan(潘卫平) 	if (tcp_nagle_check(partial != 0, tp, nonagle))
1514d4589926SEric Dumazet 		return needed - partial;
1515d4589926SEric Dumazet 
1516d4589926SEric Dumazet 	return needed;
1517c1b4a7e6SDavid S. Miller }
1518c1b4a7e6SDavid S. Miller 
1519c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1520c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1521c1b4a7e6SDavid S. Miller  */
1522cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1523cf533ea5SEric Dumazet 					 const struct sk_buff *skb)
1524c1b4a7e6SDavid S. Miller {
1525c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1526c1b4a7e6SDavid S. Miller 
1527c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
15284de075e0SEric Dumazet 	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
15294de075e0SEric Dumazet 	    tcp_skb_pcount(skb) == 1)
1530c1b4a7e6SDavid S. Miller 		return 1;
1531c1b4a7e6SDavid S. Miller 
1532c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1533c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1534c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1535c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1536c1b4a7e6SDavid S. Miller 
1537c1b4a7e6SDavid S. Miller 	return 0;
1538c1b4a7e6SDavid S. Miller }
1539c1b4a7e6SDavid S. Miller 
1540b595076aSUwe Kleine-König /* Initialize TSO state of a skb.
154167edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1542c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1543c1b4a7e6SDavid S. Miller  */
1544cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
1545056834d9SIlpo Järvinen 			     unsigned int mss_now)
1546c1b4a7e6SDavid S. Miller {
1547c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1548c1b4a7e6SDavid S. Miller 
1549f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1550846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1551c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1552c1b4a7e6SDavid S. Miller 	}
1553c1b4a7e6SDavid S. Miller 	return tso_segs;
1554c1b4a7e6SDavid S. Miller }
1555c1b4a7e6SDavid S. Miller 
1556c1b4a7e6SDavid S. Miller 
1557a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be
1558c1b4a7e6SDavid S. Miller  * sent now.
1559c1b4a7e6SDavid S. Miller  */
1560a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1561c1b4a7e6SDavid S. Miller 				  unsigned int cur_mss, int nonagle)
1562c1b4a7e6SDavid S. Miller {
1563c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1564c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1565c1b4a7e6SDavid S. Miller 	 *
1566c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1567c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1568c1b4a7e6SDavid S. Miller 	 */
1569c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1570a2a385d6SEric Dumazet 		return true;
1571c1b4a7e6SDavid S. Miller 
15729b44190dSYuchung Cheng 	/* Don't use the nagle rule for urgent data (or for the final FIN). */
15739b44190dSYuchung Cheng 	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1574a2a385d6SEric Dumazet 		return true;
1575c1b4a7e6SDavid S. Miller 
1576cc93fc51SPeter Pan(潘卫平) 	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1577a2a385d6SEric Dumazet 		return true;
1578c1b4a7e6SDavid S. Miller 
1579a2a385d6SEric Dumazet 	return false;
1580c1b4a7e6SDavid S. Miller }
1581c1b4a7e6SDavid S. Miller 
1582c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1583a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1584a2a385d6SEric Dumazet 			     const struct sk_buff *skb,
1585056834d9SIlpo Järvinen 			     unsigned int cur_mss)
1586c1b4a7e6SDavid S. Miller {
1587c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1588c1b4a7e6SDavid S. Miller 
1589c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1590c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1591c1b4a7e6SDavid S. Miller 
159290840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1593c1b4a7e6SDavid S. Miller }
1594c1b4a7e6SDavid S. Miller 
1595fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1596c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1597c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1598c1b4a7e6SDavid S. Miller  */
1599cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1600c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1601c1b4a7e6SDavid S. Miller {
1602cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1603c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1604c1b4a7e6SDavid S. Miller 
1605846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1606c1b4a7e6SDavid S. Miller 
1607c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1608c1b4a7e6SDavid S. Miller 		return 0;
1609c1b4a7e6SDavid S. Miller 
1610c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1611056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1612c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1613c1b4a7e6SDavid S. Miller 
1614c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1615c1b4a7e6SDavid S. Miller }
1616c1b4a7e6SDavid S. Miller 
161767edfef7SAndi Kleen /* Test if sending is allowed right now. */
1618a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk)
1619c1b4a7e6SDavid S. Miller {
1620cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1621fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1622c1b4a7e6SDavid S. Miller 
1623a02cec21SEric Dumazet 	return skb &&
16240c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1625c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1626a02cec21SEric Dumazet 			      tp->nonagle : TCP_NAGLE_PUSH));
1627c1b4a7e6SDavid S. Miller }
1628c1b4a7e6SDavid S. Miller 
1629c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1630c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1631c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1632c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1633c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1634c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1635c1b4a7e6SDavid S. Miller  */
1636056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1637c4ead4c5SEric Dumazet 			unsigned int mss_now, gfp_t gfp)
1638c1b4a7e6SDavid S. Miller {
1639c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1640c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
16419ce01461SIlpo Järvinen 	u8 flags;
1642c1b4a7e6SDavid S. Miller 
1643c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1644c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
16456cc55e09SOctavian Purdila 		return tcp_fragment(sk, skb, len, mss_now, gfp);
1646c1b4a7e6SDavid S. Miller 
1647c4ead4c5SEric Dumazet 	buff = sk_stream_alloc_skb(sk, 0, gfp);
1648c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1649c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1650c1b4a7e6SDavid S. Miller 
16513ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
16523ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1653b60b49eaSHerbert Xu 	buff->truesize += nlen;
1654c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1655c1b4a7e6SDavid S. Miller 
1656c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1657c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1658c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1659c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1660c1b4a7e6SDavid S. Miller 
1661c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
16624de075e0SEric Dumazet 	flags = TCP_SKB_CB(skb)->tcp_flags;
16634de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
16644de075e0SEric Dumazet 	TCP_SKB_CB(buff)->tcp_flags = flags;
1665c1b4a7e6SDavid S. Miller 
1666c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1667c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1668c1b4a7e6SDavid S. Miller 
166984fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1670c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1671490cc7d0SWillem de Bruijn 	tcp_fragment_tstamp(skb, buff);
1672c1b4a7e6SDavid S. Miller 
1673c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1674846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1675846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1676c1b4a7e6SDavid S. Miller 
1677c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1678c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1679fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1680c1b4a7e6SDavid S. Miller 
1681c1b4a7e6SDavid S. Miller 	return 0;
1682c1b4a7e6SDavid S. Miller }
1683c1b4a7e6SDavid S. Miller 
1684c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1685c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1686c1b4a7e6SDavid S. Miller  *
1687c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1688c1b4a7e6SDavid S. Miller  */
1689ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1690ca8a2263SNeal Cardwell 				 bool *is_cwnd_limited)
1691c1b4a7e6SDavid S. Miller {
16929e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
16936687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1694c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1695ad9f4f50SEric Dumazet 	int win_divisor;
1696c1b4a7e6SDavid S. Miller 
16974de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1698ae8064acSJohn Heffner 		goto send_now;
1699c1b4a7e6SDavid S. Miller 
17006687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1701ae8064acSJohn Heffner 		goto send_now;
1702ae8064acSJohn Heffner 
1703ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1704bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1705a2acde07SIlpo Järvinen 	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1706ae8064acSJohn Heffner 		goto send_now;
1707908a75c1SDavid S. Miller 
1708c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1709c1b4a7e6SDavid S. Miller 
1710056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1711c1b4a7e6SDavid S. Miller 
171290840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1713c1b4a7e6SDavid S. Miller 
1714c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1715c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1716c1b4a7e6SDavid S. Miller 
1717c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1718c1b4a7e6SDavid S. Miller 
1719ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
17201485348dSBen Hutchings 	if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
172195bd09ebSEric Dumazet 			   tp->xmit_size_goal_segs * tp->mss_cache))
1722ae8064acSJohn Heffner 		goto send_now;
1723ba244fe9SDavid S. Miller 
172462ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
172562ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
172662ad2761SIlpo Järvinen 		goto send_now;
172762ad2761SIlpo Järvinen 
1728ad9f4f50SEric Dumazet 	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1729ad9f4f50SEric Dumazet 	if (win_divisor) {
1730c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1731c1b4a7e6SDavid S. Miller 
1732c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1733c1b4a7e6SDavid S. Miller 		 * just use it.
1734c1b4a7e6SDavid S. Miller 		 */
1735ad9f4f50SEric Dumazet 		chunk /= win_divisor;
1736c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1737ae8064acSJohn Heffner 			goto send_now;
1738c1b4a7e6SDavid S. Miller 	} else {
1739c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1740c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1741c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1742c1b4a7e6SDavid S. Miller 		 * then send now.
1743c1b4a7e6SDavid S. Miller 		 */
17446b5a5c0dSNeal Cardwell 		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1745ae8064acSJohn Heffner 			goto send_now;
1746c1b4a7e6SDavid S. Miller 	}
1747c1b4a7e6SDavid S. Miller 
1748f4541d60SEric Dumazet 	/* Ok, it looks like it is advisable to defer.
1749f4541d60SEric Dumazet 	 * Do not rearm the timer if already set to not break TCP ACK clocking.
1750f4541d60SEric Dumazet 	 */
1751f4541d60SEric Dumazet 	if (!tp->tso_deferred)
1752ae8064acSJohn Heffner 		tp->tso_deferred = 1 | (jiffies << 1);
1753ae8064acSJohn Heffner 
1754ca8a2263SNeal Cardwell 	if (cong_win < send_win && cong_win < skb->len)
1755ca8a2263SNeal Cardwell 		*is_cwnd_limited = true;
1756ca8a2263SNeal Cardwell 
1757a2a385d6SEric Dumazet 	return true;
1758ae8064acSJohn Heffner 
1759ae8064acSJohn Heffner send_now:
1760ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1761a2a385d6SEric Dumazet 	return false;
1762c1b4a7e6SDavid S. Miller }
1763c1b4a7e6SDavid S. Miller 
17645d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
176567edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
176667edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
176767edfef7SAndi Kleen  * changes resulting in larger path MTUs.
176867edfef7SAndi Kleen  *
17695d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
17705d424d5aSJohn Heffner  *         1 if a probe was sent,
1771056834d9SIlpo Järvinen  *         -1 otherwise
1772056834d9SIlpo Järvinen  */
17735d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
17745d424d5aSJohn Heffner {
17755d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
17765d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
17775d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
17785d424d5aSJohn Heffner 	int len;
17795d424d5aSJohn Heffner 	int probe_size;
178091cc17c0SIlpo Järvinen 	int size_needed;
17815d424d5aSJohn Heffner 	int copy;
17825d424d5aSJohn Heffner 	int mss_now;
17835d424d5aSJohn Heffner 
17845d424d5aSJohn Heffner 	/* Not currently probing/verifying,
17855d424d5aSJohn Heffner 	 * not in recovery,
17865d424d5aSJohn Heffner 	 * have enough cwnd, and
17875d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
17885d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
17895d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
17905d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
17915d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1792cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
17935d424d5aSJohn Heffner 		return -1;
17945d424d5aSJohn Heffner 
17955d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
17960c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
17975d424d5aSJohn Heffner 	probe_size = 2 * tp->mss_cache;
179891cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
17995d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
18005d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
18015d424d5aSJohn Heffner 		return -1;
18025d424d5aSJohn Heffner 	}
18035d424d5aSJohn Heffner 
18045d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
18057f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
18065d424d5aSJohn Heffner 		return -1;
18075d424d5aSJohn Heffner 
180891cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
18095d424d5aSJohn Heffner 		return -1;
181090840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
18115d424d5aSJohn Heffner 		return 0;
18125d424d5aSJohn Heffner 
1813d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1814d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1815d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
18165d424d5aSJohn Heffner 			return -1;
18175d424d5aSJohn Heffner 		else
18185d424d5aSJohn Heffner 			return 0;
18195d424d5aSJohn Heffner 	}
18205d424d5aSJohn Heffner 
18215d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
18225d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
18235d424d5aSJohn Heffner 		return -1;
18243ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
18253ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
18265d424d5aSJohn Heffner 
1827fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
18285d424d5aSJohn Heffner 
18295d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
18305d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
18314de075e0SEric Dumazet 	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
18325d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
18335d424d5aSJohn Heffner 	nskb->csum = 0;
183484fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
18355d424d5aSJohn Heffner 
183650c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
183750c4817eSIlpo Järvinen 
18385d424d5aSJohn Heffner 	len = 0;
1839234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
18405d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
18415d424d5aSJohn Heffner 		if (nskb->ip_summed)
18425d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
18435d424d5aSJohn Heffner 		else
18445d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1845056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1846056834d9SIlpo Järvinen 							    copy, nskb->csum);
18475d424d5aSJohn Heffner 
18485d424d5aSJohn Heffner 		if (skb->len <= copy) {
18495d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
18505d424d5aSJohn Heffner 			 * Throw it away. */
18514de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1852fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
18533ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
18545d424d5aSJohn Heffner 		} else {
18554de075e0SEric Dumazet 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
1856a3433f35SChangli Gao 						   ~(TCPHDR_FIN|TCPHDR_PSH);
18575d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
18585d424d5aSJohn Heffner 				skb_pull(skb, copy);
185984fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1860056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1861056834d9SIlpo Järvinen 								 skb->len, 0);
18625d424d5aSJohn Heffner 			} else {
18635d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
18645d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
18655d424d5aSJohn Heffner 			}
18665d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
18675d424d5aSJohn Heffner 		}
18685d424d5aSJohn Heffner 
18695d424d5aSJohn Heffner 		len += copy;
1870234b6860SIlpo Järvinen 
1871234b6860SIlpo Järvinen 		if (len >= probe_size)
1872234b6860SIlpo Järvinen 			break;
18735d424d5aSJohn Heffner 	}
18745d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
18755d424d5aSJohn Heffner 
18765d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
18775d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
18785d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
18795d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
18805d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
18815d424d5aSJohn Heffner 		 * effectively two packets. */
18825d424d5aSJohn Heffner 		tp->snd_cwnd--;
188366f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
18845d424d5aSJohn Heffner 
18855d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
18860e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
18870e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
18885d424d5aSJohn Heffner 
18895d424d5aSJohn Heffner 		return 1;
18905d424d5aSJohn Heffner 	}
18915d424d5aSJohn Heffner 
18925d424d5aSJohn Heffner 	return -1;
18935d424d5aSJohn Heffner }
18945d424d5aSJohn Heffner 
18951da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
18961da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
18971da177e4SLinus Torvalds  * window for us.
18981da177e4SLinus Torvalds  *
1899f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1900f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1901f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
1902f8269a49SIlpo Järvinen  *
19036ba8a3b1SNandita Dukkipati  * Send at most one packet when push_one > 0. Temporarily ignore
19046ba8a3b1SNandita Dukkipati  * cwnd limit to force at most one packet out when push_one == 2.
19056ba8a3b1SNandita Dukkipati 
1906a2a385d6SEric Dumazet  * Returns true, if no segments are in flight and we have queued segments,
1907a2a385d6SEric Dumazet  * but cannot send anything now because of SWS or another problem.
19081da177e4SLinus Torvalds  */
1909a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1910d5dd9175SIlpo Järvinen 			   int push_one, gfp_t gfp)
19111da177e4SLinus Torvalds {
19121da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
191392df7b51SDavid S. Miller 	struct sk_buff *skb;
1914c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1915c1b4a7e6SDavid S. Miller 	int cwnd_quota;
19165d424d5aSJohn Heffner 	int result;
1917ca8a2263SNeal Cardwell 	bool is_cwnd_limited = false;
19181da177e4SLinus Torvalds 
1919c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
19205d424d5aSJohn Heffner 
1921d5dd9175SIlpo Järvinen 	if (!push_one) {
19225d424d5aSJohn Heffner 		/* Do MTU probing. */
1923d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
1924d5dd9175SIlpo Järvinen 		if (!result) {
1925a2a385d6SEric Dumazet 			return false;
19265d424d5aSJohn Heffner 		} else if (result > 0) {
19275d424d5aSJohn Heffner 			sent_pkts = 1;
19285d424d5aSJohn Heffner 		}
1929d5dd9175SIlpo Järvinen 	}
19305d424d5aSJohn Heffner 
1931fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1932c8ac3774SHerbert Xu 		unsigned int limit;
1933c8ac3774SHerbert Xu 
1934b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1935c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1936c1b4a7e6SDavid S. Miller 
19379d186cacSAndrey Vagin 		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
19389d186cacSAndrey Vagin 			/* "when" is used as a start point for the retransmit timer */
19399d186cacSAndrey Vagin 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
1940ec342325SAndrew Vagin 			goto repair; /* Skip network transmission */
19419d186cacSAndrey Vagin 		}
1942ec342325SAndrew Vagin 
1943b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
19446ba8a3b1SNandita Dukkipati 		if (!cwnd_quota) {
1945ca8a2263SNeal Cardwell 			is_cwnd_limited = true;
19466ba8a3b1SNandita Dukkipati 			if (push_one == 2)
19476ba8a3b1SNandita Dukkipati 				/* Force out a loss probe pkt. */
19486ba8a3b1SNandita Dukkipati 				cwnd_quota = 1;
19496ba8a3b1SNandita Dukkipati 			else
1950b68e9f85SHerbert Xu 				break;
19516ba8a3b1SNandita Dukkipati 		}
1952b68e9f85SHerbert Xu 
1953b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1954b68e9f85SHerbert Xu 			break;
1955b68e9f85SHerbert Xu 
1956c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1957aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1958aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1959aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1960aa93466bSDavid S. Miller 				break;
1961c1b4a7e6SDavid S. Miller 		} else {
1962ca8a2263SNeal Cardwell 			if (!push_one &&
1963ca8a2263SNeal Cardwell 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited))
1964aa93466bSDavid S. Miller 				break;
1965c1b4a7e6SDavid S. Miller 		}
1966aa93466bSDavid S. Miller 
1967c9eeec26SEric Dumazet 		/* TCP Small Queues :
1968c9eeec26SEric Dumazet 		 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1969c9eeec26SEric Dumazet 		 * This allows for :
1970c9eeec26SEric Dumazet 		 *  - better RTT estimation and ACK scheduling
1971c9eeec26SEric Dumazet 		 *  - faster recovery
1972c9eeec26SEric Dumazet 		 *  - high rates
197398e09386SEric Dumazet 		 * Alas, some drivers / subsystems require a fair amount
197498e09386SEric Dumazet 		 * of queued bytes to ensure line rate.
197598e09386SEric Dumazet 		 * One example is wifi aggregation (802.11 AMPDU)
197646d3ceabSEric Dumazet 		 */
197798e09386SEric Dumazet 		limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
197898e09386SEric Dumazet 			      sk->sk_pacing_rate >> 10);
1979c9eeec26SEric Dumazet 
1980c9eeec26SEric Dumazet 		if (atomic_read(&sk->sk_wmem_alloc) > limit) {
198146d3ceabSEric Dumazet 			set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1982bf06200eSJohn Ogness 			/* It is possible TX completion already happened
1983bf06200eSJohn Ogness 			 * before we set TSQ_THROTTLED, so we must
1984bf06200eSJohn Ogness 			 * test again the condition.
1985bf06200eSJohn Ogness 			 */
19864e857c58SPeter Zijlstra 			smp_mb__after_atomic();
1987bf06200eSJohn Ogness 			if (atomic_read(&sk->sk_wmem_alloc) > limit)
198846d3ceabSEric Dumazet 				break;
198946d3ceabSEric Dumazet 		}
1990c9eeec26SEric Dumazet 
1991c8ac3774SHerbert Xu 		limit = mss_now;
1992f8269a49SIlpo Järvinen 		if (tso_segs > 1 && !tcp_urg_mode(tp))
19930e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
19941485348dSBen Hutchings 						    min_t(unsigned int,
19951485348dSBen Hutchings 							  cwnd_quota,
1996d4589926SEric Dumazet 							  sk->sk_gso_max_segs),
1997d4589926SEric Dumazet 						    nonagle);
1998c8ac3774SHerbert Xu 
1999c8ac3774SHerbert Xu 		if (skb->len > limit &&
2000c4ead4c5SEric Dumazet 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
20011da177e4SLinus Torvalds 			break;
20021da177e4SLinus Torvalds 
20031da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
2004c1b4a7e6SDavid S. Miller 
2005d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
20061da177e4SLinus Torvalds 			break;
20071da177e4SLinus Torvalds 
2008ec342325SAndrew Vagin repair:
20091da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
20101da177e4SLinus Torvalds 		 * This call will increment packets_out.
20111da177e4SLinus Torvalds 		 */
201266f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
20131da177e4SLinus Torvalds 
20141da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
2015a262f0cdSNandita Dukkipati 		sent_pkts += tcp_skb_pcount(skb);
2016d5dd9175SIlpo Järvinen 
2017d5dd9175SIlpo Järvinen 		if (push_one)
2018d5dd9175SIlpo Järvinen 			break;
20191da177e4SLinus Torvalds 	}
20201da177e4SLinus Torvalds 
2021aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
2022684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2023684bad11SYuchung Cheng 			tp->prr_out += sent_pkts;
20246ba8a3b1SNandita Dukkipati 
20256ba8a3b1SNandita Dukkipati 		/* Send one loss probe per tail loss episode. */
20266ba8a3b1SNandita Dukkipati 		if (push_one != 2)
20276ba8a3b1SNandita Dukkipati 			tcp_schedule_loss_probe(sk);
2028ca8a2263SNeal Cardwell 		tcp_cwnd_validate(sk, is_cwnd_limited);
2029a2a385d6SEric Dumazet 		return false;
20301da177e4SLinus Torvalds 	}
20316ba8a3b1SNandita Dukkipati 	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
20326ba8a3b1SNandita Dukkipati }
20336ba8a3b1SNandita Dukkipati 
20346ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk)
20356ba8a3b1SNandita Dukkipati {
20366ba8a3b1SNandita Dukkipati 	struct inet_connection_sock *icsk = inet_csk(sk);
20376ba8a3b1SNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
20386ba8a3b1SNandita Dukkipati 	u32 timeout, tlp_time_stamp, rto_time_stamp;
2039740b0f18SEric Dumazet 	u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
20406ba8a3b1SNandita Dukkipati 
20416ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
20426ba8a3b1SNandita Dukkipati 		return false;
20436ba8a3b1SNandita Dukkipati 	/* No consecutive loss probes. */
20446ba8a3b1SNandita Dukkipati 	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
20456ba8a3b1SNandita Dukkipati 		tcp_rearm_rto(sk);
20466ba8a3b1SNandita Dukkipati 		return false;
20476ba8a3b1SNandita Dukkipati 	}
20486ba8a3b1SNandita Dukkipati 	/* Don't do any loss probe on a Fast Open connection before 3WHS
20496ba8a3b1SNandita Dukkipati 	 * finishes.
20506ba8a3b1SNandita Dukkipati 	 */
20516ba8a3b1SNandita Dukkipati 	if (sk->sk_state == TCP_SYN_RECV)
20526ba8a3b1SNandita Dukkipati 		return false;
20536ba8a3b1SNandita Dukkipati 
20546ba8a3b1SNandita Dukkipati 	/* TLP is only scheduled when next timer event is RTO. */
20556ba8a3b1SNandita Dukkipati 	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
20566ba8a3b1SNandita Dukkipati 		return false;
20576ba8a3b1SNandita Dukkipati 
20586ba8a3b1SNandita Dukkipati 	/* Schedule a loss probe in 2*RTT for SACK capable connections
20596ba8a3b1SNandita Dukkipati 	 * in Open state, that are either limited by cwnd or application.
20606ba8a3b1SNandita Dukkipati 	 */
2061740b0f18SEric Dumazet 	if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out ||
20626ba8a3b1SNandita Dukkipati 	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
20636ba8a3b1SNandita Dukkipati 		return false;
20646ba8a3b1SNandita Dukkipati 
20656ba8a3b1SNandita Dukkipati 	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
20666ba8a3b1SNandita Dukkipati 	     tcp_send_head(sk))
20676ba8a3b1SNandita Dukkipati 		return false;
20686ba8a3b1SNandita Dukkipati 
20696ba8a3b1SNandita Dukkipati 	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
20706ba8a3b1SNandita Dukkipati 	 * for delayed ack when there's one outstanding packet.
20716ba8a3b1SNandita Dukkipati 	 */
20726ba8a3b1SNandita Dukkipati 	timeout = rtt << 1;
20736ba8a3b1SNandita Dukkipati 	if (tp->packets_out == 1)
20746ba8a3b1SNandita Dukkipati 		timeout = max_t(u32, timeout,
20756ba8a3b1SNandita Dukkipati 				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
20766ba8a3b1SNandita Dukkipati 	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
20776ba8a3b1SNandita Dukkipati 
20786ba8a3b1SNandita Dukkipati 	/* If RTO is shorter, just schedule TLP in its place. */
20796ba8a3b1SNandita Dukkipati 	tlp_time_stamp = tcp_time_stamp + timeout;
20806ba8a3b1SNandita Dukkipati 	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
20816ba8a3b1SNandita Dukkipati 	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
20826ba8a3b1SNandita Dukkipati 		s32 delta = rto_time_stamp - tcp_time_stamp;
20836ba8a3b1SNandita Dukkipati 		if (delta > 0)
20846ba8a3b1SNandita Dukkipati 			timeout = delta;
20856ba8a3b1SNandita Dukkipati 	}
20866ba8a3b1SNandita Dukkipati 
20876ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
20886ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
20896ba8a3b1SNandita Dukkipati 	return true;
20906ba8a3b1SNandita Dukkipati }
20916ba8a3b1SNandita Dukkipati 
20921f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of
20931f3279aeSEric Dumazet  * a packet is still in a qdisc or driver queue.
20941f3279aeSEric Dumazet  * In this case, there is very little point doing a retransmit !
20951f3279aeSEric Dumazet  * Note: This is called from BH context only.
20961f3279aeSEric Dumazet  */
20971f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk,
20981f3279aeSEric Dumazet 				    const struct sk_buff *skb)
20991f3279aeSEric Dumazet {
21001f3279aeSEric Dumazet 	const struct sk_buff *fclone = skb + 1;
21011f3279aeSEric Dumazet 
21021f3279aeSEric Dumazet 	if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
21031f3279aeSEric Dumazet 		     fclone->fclone == SKB_FCLONE_CLONE)) {
21041f3279aeSEric Dumazet 		NET_INC_STATS_BH(sock_net(sk),
21051f3279aeSEric Dumazet 				 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
21061f3279aeSEric Dumazet 		return true;
21071f3279aeSEric Dumazet 	}
21081f3279aeSEric Dumazet 	return false;
21091f3279aeSEric Dumazet }
21101f3279aeSEric Dumazet 
21116ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else
21126ba8a3b1SNandita Dukkipati  * retransmit the last segment.
21136ba8a3b1SNandita Dukkipati  */
21146ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk)
21156ba8a3b1SNandita Dukkipati {
21169b717a8dSNandita Dukkipati 	struct tcp_sock *tp = tcp_sk(sk);
21176ba8a3b1SNandita Dukkipati 	struct sk_buff *skb;
21186ba8a3b1SNandita Dukkipati 	int pcount;
21196ba8a3b1SNandita Dukkipati 	int mss = tcp_current_mss(sk);
21206ba8a3b1SNandita Dukkipati 	int err = -1;
21216ba8a3b1SNandita Dukkipati 
21226ba8a3b1SNandita Dukkipati 	if (tcp_send_head(sk) != NULL) {
21236ba8a3b1SNandita Dukkipati 		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
21246ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21256ba8a3b1SNandita Dukkipati 	}
21266ba8a3b1SNandita Dukkipati 
21279b717a8dSNandita Dukkipati 	/* At most one outstanding TLP retransmission. */
21289b717a8dSNandita Dukkipati 	if (tp->tlp_high_seq)
21299b717a8dSNandita Dukkipati 		goto rearm_timer;
21309b717a8dSNandita Dukkipati 
21316ba8a3b1SNandita Dukkipati 	/* Retransmit last segment. */
21326ba8a3b1SNandita Dukkipati 	skb = tcp_write_queue_tail(sk);
21336ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb))
21346ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21356ba8a3b1SNandita Dukkipati 
21361f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
21371f3279aeSEric Dumazet 		goto rearm_timer;
21381f3279aeSEric Dumazet 
21396ba8a3b1SNandita Dukkipati 	pcount = tcp_skb_pcount(skb);
21406ba8a3b1SNandita Dukkipati 	if (WARN_ON(!pcount))
21416ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21426ba8a3b1SNandita Dukkipati 
21436ba8a3b1SNandita Dukkipati 	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
21446cc55e09SOctavian Purdila 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
21456cc55e09SOctavian Purdila 					  GFP_ATOMIC)))
21466ba8a3b1SNandita Dukkipati 			goto rearm_timer;
21476ba8a3b1SNandita Dukkipati 		skb = tcp_write_queue_tail(sk);
21486ba8a3b1SNandita Dukkipati 	}
21496ba8a3b1SNandita Dukkipati 
21506ba8a3b1SNandita Dukkipati 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
21516ba8a3b1SNandita Dukkipati 		goto rearm_timer;
21526ba8a3b1SNandita Dukkipati 
21536ba8a3b1SNandita Dukkipati 	err = __tcp_retransmit_skb(sk, skb);
21546ba8a3b1SNandita Dukkipati 
21559b717a8dSNandita Dukkipati 	/* Record snd_nxt for loss detection. */
21569b717a8dSNandita Dukkipati 	if (likely(!err))
21579b717a8dSNandita Dukkipati 		tp->tlp_high_seq = tp->snd_nxt;
21589b717a8dSNandita Dukkipati 
21596ba8a3b1SNandita Dukkipati rearm_timer:
21606ba8a3b1SNandita Dukkipati 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
21616ba8a3b1SNandita Dukkipati 				  inet_csk(sk)->icsk_rto,
21626ba8a3b1SNandita Dukkipati 				  TCP_RTO_MAX);
21636ba8a3b1SNandita Dukkipati 
21646ba8a3b1SNandita Dukkipati 	if (likely(!err))
21656ba8a3b1SNandita Dukkipati 		NET_INC_STATS_BH(sock_net(sk),
21666ba8a3b1SNandita Dukkipati 				 LINUX_MIB_TCPLOSSPROBES);
21671da177e4SLinus Torvalds }
21681da177e4SLinus Torvalds 
2169a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
2170a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
2171a762a980SDavid S. Miller  * The socket must be locked by the caller.
2172a762a980SDavid S. Miller  */
21739e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
21749e412ba7SIlpo Järvinen 			       int nonagle)
2175a762a980SDavid S. Miller {
2176726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
2177726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
2178726e07a8SIlpo Järvinen 	 * all will be happy.
2179726e07a8SIlpo Järvinen 	 */
2180726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
2181726e07a8SIlpo Järvinen 		return;
2182726e07a8SIlpo Järvinen 
218399a1dec7SMel Gorman 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
218499a1dec7SMel Gorman 			   sk_gfp_atomic(sk, GFP_ATOMIC)))
21859e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
2186a762a980SDavid S. Miller }
2187a762a980SDavid S. Miller 
2188c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
2189c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
2190c1b4a7e6SDavid S. Miller  */
2191c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
2192c1b4a7e6SDavid S. Miller {
2193fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
2194c1b4a7e6SDavid S. Miller 
2195c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
2196c1b4a7e6SDavid S. Miller 
2197d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2198c1b4a7e6SDavid S. Miller }
2199c1b4a7e6SDavid S. Miller 
22001da177e4SLinus Torvalds /* This function returns the amount that we can raise the
22011da177e4SLinus Torvalds  * usable window based on the following constraints
22021da177e4SLinus Torvalds  *
22031da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
22041da177e4SLinus Torvalds  * 2. We limit memory per socket
22051da177e4SLinus Torvalds  *
22061da177e4SLinus Torvalds  * RFC 1122:
22071da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
22081da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
22091da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
22101da177e4SLinus Torvalds  *
22111da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
22121da177e4SLinus Torvalds  * it at least MSS bytes.
22131da177e4SLinus Torvalds  *
22141da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
22151da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
22161da177e4SLinus Torvalds  *
22171da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
22181da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
22191da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
22201da177e4SLinus Torvalds  * window to always advance by a single byte.
22211da177e4SLinus Torvalds  *
22221da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
22231da177e4SLinus Torvalds  * then this will not be a problem.
22241da177e4SLinus Torvalds  *
22251da177e4SLinus Torvalds  * BSD seems to make the following compromise:
22261da177e4SLinus Torvalds  *
22271da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
22281da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
22291da177e4SLinus Torvalds  *	then set the window to 0.
22301da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
22311da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
22321da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
22331da177e4SLinus Torvalds  *
22341da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
22351da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
22361da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
22371da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
22381da177e4SLinus Torvalds  * because the pipeline is full.
22391da177e4SLinus Torvalds  *
22401da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
22411da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
22421da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
22431da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
22441da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
22451da177e4SLinus Torvalds  *
22461da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
22471da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
22481da177e4SLinus Torvalds  *
22491da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
22501da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
22511da177e4SLinus Torvalds  */
22521da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
22531da177e4SLinus Torvalds {
2254463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
22551da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2256caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
22571da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
22581da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
22591da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
22601da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
22611da177e4SLinus Torvalds 	 */
2262463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
22631da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
226486c1a045SFlorian Westphal 	int allowed_space = tcp_full_space(sk);
226586c1a045SFlorian Westphal 	int full_space = min_t(int, tp->window_clamp, allowed_space);
22661da177e4SLinus Torvalds 	int window;
22671da177e4SLinus Torvalds 
22681da177e4SLinus Torvalds 	if (mss > full_space)
22691da177e4SLinus Torvalds 		mss = full_space;
22701da177e4SLinus Torvalds 
2271b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
2272463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
22731da177e4SLinus Torvalds 
2274180d8cd9SGlauber Costa 		if (sk_under_memory_pressure(sk))
2275056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2276056834d9SIlpo Järvinen 					       4U * tp->advmss);
22771da177e4SLinus Torvalds 
227886c1a045SFlorian Westphal 		/* free_space might become our new window, make sure we don't
227986c1a045SFlorian Westphal 		 * increase it due to wscale.
228086c1a045SFlorian Westphal 		 */
228186c1a045SFlorian Westphal 		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
228286c1a045SFlorian Westphal 
228386c1a045SFlorian Westphal 		/* if free space is less than mss estimate, or is below 1/16th
228486c1a045SFlorian Westphal 		 * of the maximum allowed, try to move to zero-window, else
228586c1a045SFlorian Westphal 		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
228686c1a045SFlorian Westphal 		 * new incoming data is dropped due to memory limits.
228786c1a045SFlorian Westphal 		 * With large window, mss test triggers way too late in order
228886c1a045SFlorian Westphal 		 * to announce zero window in time before rmem limit kicks in.
228986c1a045SFlorian Westphal 		 */
229086c1a045SFlorian Westphal 		if (free_space < (allowed_space >> 4) || free_space < mss)
22911da177e4SLinus Torvalds 			return 0;
22921da177e4SLinus Torvalds 	}
22931da177e4SLinus Torvalds 
22941da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
22951da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
22961da177e4SLinus Torvalds 
22971da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
22981da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
22991da177e4SLinus Torvalds 	 */
23001da177e4SLinus Torvalds 	window = tp->rcv_wnd;
23011da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
23021da177e4SLinus Torvalds 		window = free_space;
23031da177e4SLinus Torvalds 
23041da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
23051da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
23061da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
23071da177e4SLinus Torvalds 		 */
23081da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
23091da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
23101da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
23111da177e4SLinus Torvalds 	} else {
23121da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
23131da177e4SLinus Torvalds 		 * Window clamp already applied above.
23141da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
23151da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
23161da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
23171da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
23181da177e4SLinus Torvalds 		 * is too small.
23191da177e4SLinus Torvalds 		 */
23201da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
23211da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
232284565070SJohn Heffner 		else if (mss == full_space &&
2323b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
232484565070SJohn Heffner 			window = free_space;
23251da177e4SLinus Torvalds 	}
23261da177e4SLinus Torvalds 
23271da177e4SLinus Torvalds 	return window;
23281da177e4SLinus Torvalds }
23291da177e4SLinus Torvalds 
23304a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
23314a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
23321da177e4SLinus Torvalds {
23331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2334fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2335058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
23361da177e4SLinus Torvalds 
2337058dc334SIlpo Järvinen 	skb_size = skb->len;
2338058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
23391da177e4SLinus Torvalds 
2340058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
23411da177e4SLinus Torvalds 
23426859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
2343a6963a6bSIlpo Järvinen 
2344fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
23451da177e4SLinus Torvalds 
2346058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
23471a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
23481da177e4SLinus Torvalds 
234952d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
235052d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
23511da177e4SLinus Torvalds 
235284fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
23531da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
23541da177e4SLinus Torvalds 
23551da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
23561da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
23571da177e4SLinus Torvalds 
2358e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
23594de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
23601da177e4SLinus Torvalds 
23611da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
23621da177e4SLinus Torvalds 	 * packet counting does not break.
23631da177e4SLinus Torvalds 	 */
23644828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2365b7689205SIlpo Järvinen 
2366b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
2367ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
2368ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
2369ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
2370b7689205SIlpo Järvinen 
2371797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2372797108d1SIlpo Järvinen 
23733ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
23741da177e4SLinus Torvalds }
23751da177e4SLinus Torvalds 
237667edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
2377a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
23784a17fc3aSIlpo Järvinen {
23794a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
2380a2a385d6SEric Dumazet 		return false;
23814a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
23824a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
2383a2a385d6SEric Dumazet 		return false;
23844a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
2385a2a385d6SEric Dumazet 		return false;
23864a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
2387a2a385d6SEric Dumazet 		return false;
23884a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
23894a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2390a2a385d6SEric Dumazet 		return false;
23914a17fc3aSIlpo Järvinen 
2392a2a385d6SEric Dumazet 	return true;
23934a17fc3aSIlpo Järvinen }
23944a17fc3aSIlpo Järvinen 
239567edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
239667edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
239767edfef7SAndi Kleen  */
23984a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
23994a17fc3aSIlpo Järvinen 				     int space)
24004a17fc3aSIlpo Järvinen {
24014a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
24024a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
2403a2a385d6SEric Dumazet 	bool first = true;
24044a17fc3aSIlpo Järvinen 
24054a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
24064a17fc3aSIlpo Järvinen 		return;
24074de075e0SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
24084a17fc3aSIlpo Järvinen 		return;
24094a17fc3aSIlpo Järvinen 
24104a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
24114a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
24124a17fc3aSIlpo Järvinen 			break;
24134a17fc3aSIlpo Järvinen 
24144a17fc3aSIlpo Järvinen 		space -= skb->len;
24154a17fc3aSIlpo Järvinen 
24164a17fc3aSIlpo Järvinen 		if (first) {
2417a2a385d6SEric Dumazet 			first = false;
24184a17fc3aSIlpo Järvinen 			continue;
24194a17fc3aSIlpo Järvinen 		}
24204a17fc3aSIlpo Järvinen 
24214a17fc3aSIlpo Järvinen 		if (space < 0)
24224a17fc3aSIlpo Järvinen 			break;
24234a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
24244a17fc3aSIlpo Järvinen 		 * the data in the second
24254a17fc3aSIlpo Järvinen 		 */
2426a21d4572SEric Dumazet 		if (skb->len > skb_availroom(to))
24274a17fc3aSIlpo Järvinen 			break;
24284a17fc3aSIlpo Järvinen 
24294a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
24304a17fc3aSIlpo Järvinen 			break;
24314a17fc3aSIlpo Järvinen 
24324a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
24334a17fc3aSIlpo Järvinen 	}
24344a17fc3aSIlpo Järvinen }
24354a17fc3aSIlpo Järvinen 
24361da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
24371da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
24381da177e4SLinus Torvalds  * error occurred which prevented the send.
24391da177e4SLinus Torvalds  */
244093b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
24411da177e4SLinus Torvalds {
24421da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24435d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
24447d227cd2SSridhar Samudrala 	unsigned int cur_mss;
2445c84a5711SYuchung Cheng 	int err;
24461da177e4SLinus Torvalds 
24475d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
24485d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
24495d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
24505d424d5aSJohn Heffner 	}
24515d424d5aSJohn Heffner 
24521da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2453caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
24541da177e4SLinus Torvalds 	 */
24551da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
24561da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
24571da177e4SLinus Torvalds 		return -EAGAIN;
24581da177e4SLinus Torvalds 
24591f3279aeSEric Dumazet 	if (skb_still_in_host_queue(sk, skb))
24601f3279aeSEric Dumazet 		return -EBUSY;
24611f3279aeSEric Dumazet 
24621da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
24631da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
24641da177e4SLinus Torvalds 			BUG();
24651da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
24661da177e4SLinus Torvalds 			return -ENOMEM;
24671da177e4SLinus Torvalds 	}
24681da177e4SLinus Torvalds 
24697d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
24707d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
24717d227cd2SSridhar Samudrala 
24720c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
24737d227cd2SSridhar Samudrala 
24741da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
24751da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
24761da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
24771da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
24781da177e4SLinus Torvalds 	 */
24799d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
24809d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
24811da177e4SLinus Torvalds 		return -EAGAIN;
24821da177e4SLinus Torvalds 
24831da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
24846cc55e09SOctavian Purdila 		if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
24851da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
248602276f3cSIlpo Järvinen 	} else {
24879eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
24889eb9362eSIlpo Järvinen 
24899eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
2490c52e2421SEric Dumazet 			if (skb_unclone(skb, GFP_ATOMIC))
2491c52e2421SEric Dumazet 				return -ENOMEM;
249202276f3cSIlpo Järvinen 			tcp_init_tso_segs(sk, skb, cur_mss);
24939eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
24949eb9362eSIlpo Järvinen 		}
24951da177e4SLinus Torvalds 	}
24961da177e4SLinus Torvalds 
24971da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
24981da177e4SLinus Torvalds 
24991da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
25001da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
25011da177e4SLinus Torvalds 	 */
25021da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
25031da177e4SLinus Torvalds 
250450bceae9SThomas Graf 	/* make sure skb->data is aligned on arches that require it
250550bceae9SThomas Graf 	 * and check if ack-trimming & collapsing extended the headroom
250650bceae9SThomas Graf 	 * beyond what csum_start can cover.
250750bceae9SThomas Graf 	 */
250850bceae9SThomas Graf 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
250950bceae9SThomas Graf 		     skb_headroom(skb) >= 0xFFFF)) {
2510117632e6SEric Dumazet 		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2511117632e6SEric Dumazet 						   GFP_ATOMIC);
2512c84a5711SYuchung Cheng 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2513117632e6SEric Dumazet 			     -ENOBUFS;
2514117632e6SEric Dumazet 	} else {
2515c84a5711SYuchung Cheng 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2516117632e6SEric Dumazet 	}
2517c84a5711SYuchung Cheng 
2518fc9f3501SEric Dumazet 	if (likely(!err)) {
2519c84a5711SYuchung Cheng 		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2520fc9f3501SEric Dumazet 		/* Update global TCP statistics. */
2521fc9f3501SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2522fc9f3501SEric Dumazet 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2523fc9f3501SEric Dumazet 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2524fc9f3501SEric Dumazet 		tp->total_retrans++;
2525fc9f3501SEric Dumazet 	}
2526c84a5711SYuchung Cheng 	return err;
252793b174adSYuchung Cheng }
252893b174adSYuchung Cheng 
252993b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
253093b174adSYuchung Cheng {
253193b174adSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
253293b174adSYuchung Cheng 	int err = __tcp_retransmit_skb(sk, skb);
25331da177e4SLinus Torvalds 
25341da177e4SLinus Torvalds 	if (err == 0) {
25351da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
25361da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2537e87cc472SJoe Perches 			net_dbg_ratelimited("retrans_out leaked\n");
25381da177e4SLinus Torvalds 		}
25391da177e4SLinus Torvalds #endif
2540b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
2541b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
25421da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
25431da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
25441da177e4SLinus Torvalds 
25451da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
25461da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
25471da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
25481da177e4SLinus Torvalds 
25491da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
25501da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
25511da177e4SLinus Torvalds 		 */
25521da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
25531f3279aeSEric Dumazet 	} else if (err != -EBUSY) {
255424ab6becSYuchung Cheng 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
25551da177e4SLinus Torvalds 	}
25566e08d5e3SYuchung Cheng 
25576e08d5e3SYuchung Cheng 	if (tp->undo_retrans < 0)
25586e08d5e3SYuchung Cheng 		tp->undo_retrans = 0;
25596e08d5e3SYuchung Cheng 	tp->undo_retrans += tcp_skb_pcount(skb);
25601da177e4SLinus Torvalds 	return err;
25611da177e4SLinus Torvalds }
25621da177e4SLinus Torvalds 
256367edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
256467edfef7SAndi Kleen  * window/congestion state.
256567edfef7SAndi Kleen  */
2566a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk)
2567b5afe7bcSIlpo Järvinen {
2568b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2569cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
2570b5afe7bcSIlpo Järvinen 
2571b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2572b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2573a2a385d6SEric Dumazet 		return false;
2574b5afe7bcSIlpo Järvinen 
2575b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2576b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2577a2a385d6SEric Dumazet 		return false;
2578b5afe7bcSIlpo Järvinen 
2579b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2580b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2581b5afe7bcSIlpo Järvinen 	 *
2582b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2583b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2584b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2585b5afe7bcSIlpo Järvinen 	 */
2586b5afe7bcSIlpo Järvinen 
2587b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2588a2a385d6SEric Dumazet 		return false;
2589b5afe7bcSIlpo Järvinen 
2590a2a385d6SEric Dumazet 	return true;
2591b5afe7bcSIlpo Järvinen }
2592b5afe7bcSIlpo Järvinen 
25931da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
25941da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
25951da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
25961da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
25971da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
25981da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
25991da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
26001da177e4SLinus Torvalds  */
26011da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
26021da177e4SLinus Torvalds {
26036687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
26041da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
26051da177e4SLinus Torvalds 	struct sk_buff *skb;
26060e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2607618d9f25SIlpo Järvinen 	u32 last_lost;
260861eb55f4SIlpo Järvinen 	int mib_idx;
26090e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
26106a438bbeSStephen Hemminger 
261145e77d31SIlpo Järvinen 	if (!tp->packets_out)
261245e77d31SIlpo Järvinen 		return;
261345e77d31SIlpo Järvinen 
261408ebd172SIlpo Järvinen 	if (!tp->lost_out)
261508ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
261608ebd172SIlpo Järvinen 
2617618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
26186a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2619618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2620618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2621618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2622618d9f25SIlpo Järvinen 	} else {
2623fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2624618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2625618d9f25SIlpo Järvinen 	}
26261da177e4SLinus Torvalds 
2627fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
26281da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
26291da177e4SLinus Torvalds 
2630fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2631fe067e8aSDavid S. Miller 			break;
26326a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
26330e1c54c2SIlpo Järvinen 		if (hole == NULL)
26346a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
26356a438bbeSStephen Hemminger 
26361da177e4SLinus Torvalds 		/* Assume this retransmit will generate
26371da177e4SLinus Torvalds 		 * only one packet for congestion window
26381da177e4SLinus Torvalds 		 * calculation purposes.  This works because
26391da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
26401da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
26411da177e4SLinus Torvalds 		 * packet counting works out.
26421da177e4SLinus Torvalds 		 */
26431da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
26441da177e4SLinus Torvalds 			return;
26450e1c54c2SIlpo Järvinen 
26460e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
26470e1c54c2SIlpo Järvinen begin_fwd:
26480e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2649006f582cSIlpo Järvinen 				break;
26500e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
26510e1c54c2SIlpo Järvinen 
26520e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2653618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
26540e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
26550e1c54c2SIlpo Järvinen 				break;
26560e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
26570e1c54c2SIlpo Järvinen 			if (hole != NULL) {
26580e1c54c2SIlpo Järvinen 				skb = hole;
26590e1c54c2SIlpo Järvinen 				hole = NULL;
26600e1c54c2SIlpo Järvinen 			}
26610e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
26620e1c54c2SIlpo Järvinen 			goto begin_fwd;
26630e1c54c2SIlpo Järvinen 
26640e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
2665ac11ba75SIlpo Järvinen 			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
26660e1c54c2SIlpo Järvinen 				hole = skb;
266761eb55f4SIlpo Järvinen 			continue;
26681da177e4SLinus Torvalds 
26690e1c54c2SIlpo Järvinen 		} else {
2670618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
26710e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
26720e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
26730e1c54c2SIlpo Järvinen 			else
26740e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
26750e1c54c2SIlpo Järvinen 		}
26760e1c54c2SIlpo Järvinen 
26770e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
267861eb55f4SIlpo Järvinen 			continue;
267940b215e5SPavel Emelyanov 
268024ab6becSYuchung Cheng 		if (tcp_retransmit_skb(sk, skb))
26811da177e4SLinus Torvalds 			return;
268224ab6becSYuchung Cheng 
2683de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
26841da177e4SLinus Torvalds 
2685684bad11SYuchung Cheng 		if (tcp_in_cwnd_reduction(sk))
2686a262f0cdSNandita Dukkipati 			tp->prr_out += tcp_skb_pcount(skb);
2687a262f0cdSNandita Dukkipati 
2688fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2689463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
26903f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
26913f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
26921da177e4SLinus Torvalds 	}
26931da177e4SLinus Torvalds }
26941da177e4SLinus Torvalds 
26951da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
26961da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
26971da177e4SLinus Torvalds  */
26981da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
26991da177e4SLinus Torvalds {
27001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2701fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
27021da177e4SLinus Torvalds 	int mss_now;
27031da177e4SLinus Torvalds 
27041da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
27051da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
27061da177e4SLinus Torvalds 	 * and IP options.
27071da177e4SLinus Torvalds 	 */
27080c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
27091da177e4SLinus Torvalds 
2710fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
27114de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
27121da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
27131da177e4SLinus Torvalds 		tp->write_seq++;
27141da177e4SLinus Torvalds 	} else {
27151da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
27161da177e4SLinus Torvalds 		for (;;) {
2717aa133076SWu Fengguang 			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2718aa133076SWu Fengguang 					       sk->sk_allocation);
27191da177e4SLinus Torvalds 			if (skb)
27201da177e4SLinus Torvalds 				break;
27211da177e4SLinus Torvalds 			yield();
27221da177e4SLinus Torvalds 		}
27231da177e4SLinus Torvalds 
27241da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
27251da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
27261da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2727e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2728a3433f35SChangli Gao 				     TCPHDR_ACK | TCPHDR_FIN);
27291da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
27301da177e4SLinus Torvalds 	}
27319e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
27321da177e4SLinus Torvalds }
27331da177e4SLinus Torvalds 
27341da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
27351da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
27361da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
273765bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
27381da177e4SLinus Torvalds  */
2739dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
27401da177e4SLinus Torvalds {
27411da177e4SLinus Torvalds 	struct sk_buff *skb;
27421da177e4SLinus Torvalds 
27431da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
27441da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
27451da177e4SLinus Torvalds 	if (!skb) {
27464e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
27471da177e4SLinus Torvalds 		return;
27481da177e4SLinus Torvalds 	}
27491da177e4SLinus Torvalds 
27501da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
27511da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2752e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2753a3433f35SChangli Gao 			     TCPHDR_ACK | TCPHDR_RST);
27541da177e4SLinus Torvalds 	/* Send it off. */
27551da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2756dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
27574e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
275826af65cbSSridhar Samudrala 
275981cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
27601da177e4SLinus Torvalds }
27611da177e4SLinus Torvalds 
276267edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
276367edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
27641da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
27651da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
27661da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
27671da177e4SLinus Torvalds  */
27681da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
27691da177e4SLinus Torvalds {
27701da177e4SLinus Torvalds 	struct sk_buff *skb;
27711da177e4SLinus Torvalds 
2772fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
27734de075e0SEric Dumazet 	if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
277491df42beSJoe Perches 		pr_debug("%s: wrong queue state\n", __func__);
27751da177e4SLinus Torvalds 		return -EFAULT;
27761da177e4SLinus Torvalds 	}
27774de075e0SEric Dumazet 	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
27781da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
27791da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
27801da177e4SLinus Torvalds 			if (nskb == NULL)
27811da177e4SLinus Torvalds 				return -ENOMEM;
2782fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
27831da177e4SLinus Torvalds 			skb_header_release(nskb);
2784fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
27853ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
27863ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
27873ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
27881da177e4SLinus Torvalds 			skb = nskb;
27891da177e4SLinus Torvalds 		}
27901da177e4SLinus Torvalds 
27914de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
27921da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
27931da177e4SLinus Torvalds 	}
27941da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2795dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
27961da177e4SLinus Torvalds }
27971da177e4SLinus Torvalds 
27984aea39c1SEric Dumazet /**
27994aea39c1SEric Dumazet  * tcp_make_synack - Prepare a SYN-ACK.
28004aea39c1SEric Dumazet  * sk: listener socket
28014aea39c1SEric Dumazet  * dst: dst entry attached to the SYNACK
28024aea39c1SEric Dumazet  * req: request_sock pointer
28034aea39c1SEric Dumazet  *
28044aea39c1SEric Dumazet  * Allocate one skb and build a SYNACK packet.
28054aea39c1SEric Dumazet  * @dst is consumed : Caller should not use it again.
28064aea39c1SEric Dumazet  */
28071da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2808e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
28098336886fSJerry Chu 				struct tcp_fastopen_cookie *foc)
28101da177e4SLinus Torvalds {
2811bd0388aeSWilliam Allen Simpson 	struct tcp_out_options opts;
28122e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
28131da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
28141da177e4SLinus Torvalds 	struct tcphdr *th;
28151da177e4SLinus Torvalds 	struct sk_buff *skb;
2816cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2817bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
2818f5fff5dcSTom Quetchenbach 	int mss;
28191da177e4SLinus Torvalds 
2820a0b8486cSEric Dumazet 	skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
28214aea39c1SEric Dumazet 	if (unlikely(!skb)) {
28224aea39c1SEric Dumazet 		dst_release(dst);
28231da177e4SLinus Torvalds 		return NULL;
28244aea39c1SEric Dumazet 	}
28251da177e4SLinus Torvalds 	/* Reserve space for headers. */
28261da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
28271da177e4SLinus Torvalds 
28284aea39c1SEric Dumazet 	skb_dst_set(skb, dst);
2829ca10b9e9SEric Dumazet 	security_skb_owned_by(skb, sk);
28301da177e4SLinus Torvalds 
28310dbaee3bSDavid S. Miller 	mss = dst_metric_advmss(dst);
2832f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2833f5fff5dcSTom Quetchenbach 		mss = tp->rx_opt.user_mss;
2834f5fff5dcSTom Quetchenbach 
283533ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
28368b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
28378b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
28388b5f12d0SFlorian Westphal 		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
28398b5f12d0SFlorian Westphal 	else
28408b5f12d0SFlorian Westphal #endif
284133ad798cSAdam Langley 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
28421a2c6181SChristoph Paasch 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
28431a2c6181SChristoph Paasch 					     foc) + sizeof(*th);
284433ad798cSAdam Langley 
2845aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2846aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
28471da177e4SLinus Torvalds 
2848aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
28491da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
28501da177e4SLinus Torvalds 	th->syn = 1;
28511da177e4SLinus Torvalds 	th->ack = 1;
28521da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
2853b44084c2SEric Dumazet 	th->source = htons(ireq->ir_num);
2854634fb979SEric Dumazet 	th->dest = ireq->ir_rmt_port;
2855e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
2856e870a8efSIlpo Järvinen 	 * not even correctly set)
2857e870a8efSIlpo Järvinen 	 */
2858e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2859a3433f35SChangli Gao 			     TCPHDR_SYN | TCPHDR_ACK);
28604957faadSWilliam Allen Simpson 
28611da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
28628336886fSJerry Chu 	/* XXX data is queued and acked as is. No buffer/window check */
28638336886fSJerry Chu 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
28641da177e4SLinus Torvalds 
28651da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2866600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
2867bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
28681da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
2869a0b8486cSEric Dumazet 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
2870cfb6eeb4SYOSHIFUJI Hideaki 
2871cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2872cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2873cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2874bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
287549a72dfbSAdam Langley 					       md5, NULL, req, skb);
2876cfb6eeb4SYOSHIFUJI Hideaki 	}
2877cfb6eeb4SYOSHIFUJI Hideaki #endif
2878cfb6eeb4SYOSHIFUJI Hideaki 
28791da177e4SLinus Torvalds 	return skb;
28801da177e4SLinus Torvalds }
28814bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack);
28821da177e4SLinus Torvalds 
288367edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
2884f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk)
28851da177e4SLinus Torvalds {
2886cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
28871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
28881da177e4SLinus Torvalds 	__u8 rcv_wscale;
28891da177e4SLinus Torvalds 
28901da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
28911da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
28921da177e4SLinus Torvalds 	 */
28931da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
2894bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
28951da177e4SLinus Torvalds 
2896cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2897cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2898cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2899cfb6eeb4SYOSHIFUJI Hideaki #endif
2900cfb6eeb4SYOSHIFUJI Hideaki 
29011da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
29021da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
29031da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
29041da177e4SLinus Torvalds 	tp->max_window = 0;
29055d424d5aSJohn Heffner 	tcp_mtup_init(sk);
29061da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
29071da177e4SLinus Torvalds 
29081da177e4SLinus Torvalds 	if (!tp->window_clamp)
29091da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
29100dbaee3bSDavid S. Miller 	tp->advmss = dst_metric_advmss(dst);
2911f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2912f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
2913f5fff5dcSTom Quetchenbach 
29141da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
29151da177e4SLinus Torvalds 
2916e88c64f0SHagen Paul Pfeifer 	/* limit the window selection if the user enforce a smaller rx buffer */
2917e88c64f0SHagen Paul Pfeifer 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2918e88c64f0SHagen Paul Pfeifer 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2919e88c64f0SHagen Paul Pfeifer 		tp->window_clamp = tcp_full_space(sk);
2920e88c64f0SHagen Paul Pfeifer 
29211da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
29221da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
29231da177e4SLinus Torvalds 				  &tp->rcv_wnd,
29241da177e4SLinus Torvalds 				  &tp->window_clamp,
2925bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
292631d12926Slaurent chavey 				  &rcv_wscale,
292731d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
29281da177e4SLinus Torvalds 
29291da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
29301da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
29311da177e4SLinus Torvalds 
29321da177e4SLinus Torvalds 	sk->sk_err = 0;
29331da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
29341da177e4SLinus Torvalds 	tp->snd_wnd = 0;
2935ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
29361da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
29371da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
293833f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
2939370816aeSPavel Emelyanov 	tp->snd_nxt = tp->write_seq;
2940ee995283SPavel Emelyanov 
2941ee995283SPavel Emelyanov 	if (likely(!tp->repair))
29421da177e4SLinus Torvalds 		tp->rcv_nxt = 0;
2943c7781a6eSAndrew Vagin 	else
2944c7781a6eSAndrew Vagin 		tp->rcv_tstamp = tcp_time_stamp;
2945ee995283SPavel Emelyanov 	tp->rcv_wup = tp->rcv_nxt;
2946ee995283SPavel Emelyanov 	tp->copied_seq = tp->rcv_nxt;
29471da177e4SLinus Torvalds 
2948463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2949463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
29501da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
29511da177e4SLinus Torvalds }
29521da177e4SLinus Torvalds 
2953783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2954783237e8SYuchung Cheng {
2955783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2956783237e8SYuchung Cheng 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2957783237e8SYuchung Cheng 
2958783237e8SYuchung Cheng 	tcb->end_seq += skb->len;
2959783237e8SYuchung Cheng 	skb_header_release(skb);
2960783237e8SYuchung Cheng 	__tcp_add_write_queue_tail(sk, skb);
2961783237e8SYuchung Cheng 	sk->sk_wmem_queued += skb->truesize;
2962783237e8SYuchung Cheng 	sk_mem_charge(sk, skb->truesize);
2963783237e8SYuchung Cheng 	tp->write_seq = tcb->end_seq;
2964783237e8SYuchung Cheng 	tp->packets_out += tcp_skb_pcount(skb);
2965783237e8SYuchung Cheng }
2966783237e8SYuchung Cheng 
2967783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However,
2968783237e8SYuchung Cheng  * queue a data-only packet after the regular SYN, such that regular SYNs
2969783237e8SYuchung Cheng  * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2970783237e8SYuchung Cheng  * only the SYN sequence, the data are retransmitted in the first ACK.
2971783237e8SYuchung Cheng  * If cookie is not cached or other error occurs, falls back to send a
2972783237e8SYuchung Cheng  * regular SYN with Fast Open cookie request option.
2973783237e8SYuchung Cheng  */
2974783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2975783237e8SYuchung Cheng {
2976783237e8SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
2977783237e8SYuchung Cheng 	struct tcp_fastopen_request *fo = tp->fastopen_req;
2978aab48743SYuchung Cheng 	int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
2979783237e8SYuchung Cheng 	struct sk_buff *syn_data = NULL, *data;
2980aab48743SYuchung Cheng 	unsigned long last_syn_loss = 0;
2981783237e8SYuchung Cheng 
298267da22d2SYuchung Cheng 	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
2983aab48743SYuchung Cheng 	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2984aab48743SYuchung Cheng 			       &syn_loss, &last_syn_loss);
2985aab48743SYuchung Cheng 	/* Recurring FO SYN losses: revert to regular handshake temporarily */
2986aab48743SYuchung Cheng 	if (syn_loss > 1 &&
2987aab48743SYuchung Cheng 	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2988aab48743SYuchung Cheng 		fo->cookie.len = -1;
2989aab48743SYuchung Cheng 		goto fallback;
2990aab48743SYuchung Cheng 	}
2991aab48743SYuchung Cheng 
299267da22d2SYuchung Cheng 	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
299367da22d2SYuchung Cheng 		fo->cookie.len = -1;
299467da22d2SYuchung Cheng 	else if (fo->cookie.len <= 0)
2995783237e8SYuchung Cheng 		goto fallback;
2996783237e8SYuchung Cheng 
2997783237e8SYuchung Cheng 	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2998783237e8SYuchung Cheng 	 * user-MSS. Reserve maximum option space for middleboxes that add
2999783237e8SYuchung Cheng 	 * private TCP options. The cost is reduced data space in SYN :(
3000783237e8SYuchung Cheng 	 */
3001783237e8SYuchung Cheng 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
3002783237e8SYuchung Cheng 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
30031b63edd6SYuchung Cheng 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3004783237e8SYuchung Cheng 		MAX_TCP_OPTION_SPACE;
3005783237e8SYuchung Cheng 
3006f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, fo->size);
3007f5ddcbbbSEric Dumazet 
3008f5ddcbbbSEric Dumazet 	/* limit to order-0 allocations */
3009f5ddcbbbSEric Dumazet 	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3010f5ddcbbbSEric Dumazet 
3011f5ddcbbbSEric Dumazet 	syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
3012783237e8SYuchung Cheng 				   sk->sk_allocation);
3013783237e8SYuchung Cheng 	if (syn_data == NULL)
3014783237e8SYuchung Cheng 		goto fallback;
3015783237e8SYuchung Cheng 
3016783237e8SYuchung Cheng 	for (i = 0; i < iovlen && syn_data->len < space; ++i) {
3017783237e8SYuchung Cheng 		struct iovec *iov = &fo->data->msg_iov[i];
3018783237e8SYuchung Cheng 		unsigned char __user *from = iov->iov_base;
3019783237e8SYuchung Cheng 		int len = iov->iov_len;
3020783237e8SYuchung Cheng 
3021783237e8SYuchung Cheng 		if (syn_data->len + len > space)
3022783237e8SYuchung Cheng 			len = space - syn_data->len;
3023783237e8SYuchung Cheng 		else if (i + 1 == iovlen)
3024783237e8SYuchung Cheng 			/* No more data pending in inet_wait_for_connect() */
3025783237e8SYuchung Cheng 			fo->data = NULL;
3026783237e8SYuchung Cheng 
3027783237e8SYuchung Cheng 		if (skb_add_data(syn_data, from, len))
3028783237e8SYuchung Cheng 			goto fallback;
3029783237e8SYuchung Cheng 	}
3030783237e8SYuchung Cheng 
3031783237e8SYuchung Cheng 	/* Queue a data-only packet after the regular SYN for retransmission */
3032783237e8SYuchung Cheng 	data = pskb_copy(syn_data, sk->sk_allocation);
3033783237e8SYuchung Cheng 	if (data == NULL)
3034783237e8SYuchung Cheng 		goto fallback;
3035783237e8SYuchung Cheng 	TCP_SKB_CB(data)->seq++;
3036783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
3037783237e8SYuchung Cheng 	TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
3038783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, data);
3039783237e8SYuchung Cheng 	fo->copied = data->len;
3040783237e8SYuchung Cheng 
3041431a9124SEric Dumazet 	/* syn_data is about to be sent, we need to take current time stamps
3042431a9124SEric Dumazet 	 * for the packets that are in write queue : SYN packet and DATA
3043431a9124SEric Dumazet 	 */
3044431a9124SEric Dumazet 	skb_mstamp_get(&syn->skb_mstamp);
3045431a9124SEric Dumazet 	data->skb_mstamp = syn->skb_mstamp;
3046431a9124SEric Dumazet 
3047783237e8SYuchung Cheng 	if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
304867da22d2SYuchung Cheng 		tp->syn_data = (fo->copied > 0);
3049f19c29e3SYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3050783237e8SYuchung Cheng 		goto done;
3051783237e8SYuchung Cheng 	}
3052783237e8SYuchung Cheng 	syn_data = NULL;
3053783237e8SYuchung Cheng 
3054783237e8SYuchung Cheng fallback:
3055783237e8SYuchung Cheng 	/* Send a regular SYN with Fast Open cookie request option */
3056783237e8SYuchung Cheng 	if (fo->cookie.len > 0)
3057783237e8SYuchung Cheng 		fo->cookie.len = 0;
3058783237e8SYuchung Cheng 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3059783237e8SYuchung Cheng 	if (err)
3060783237e8SYuchung Cheng 		tp->syn_fastopen = 0;
3061783237e8SYuchung Cheng 	kfree_skb(syn_data);
3062783237e8SYuchung Cheng done:
3063783237e8SYuchung Cheng 	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3064783237e8SYuchung Cheng 	return err;
3065783237e8SYuchung Cheng }
3066783237e8SYuchung Cheng 
306767edfef7SAndi Kleen /* Build a SYN and send it off. */
30681da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
30691da177e4SLinus Torvalds {
30701da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30711da177e4SLinus Torvalds 	struct sk_buff *buff;
3072ee586811SEric Paris 	int err;
30731da177e4SLinus Torvalds 
30741da177e4SLinus Torvalds 	tcp_connect_init(sk);
30751da177e4SLinus Torvalds 
30762b916477SAndrey Vagin 	if (unlikely(tp->repair)) {
30772b916477SAndrey Vagin 		tcp_finish_connect(sk, NULL);
30782b916477SAndrey Vagin 		return 0;
30792b916477SAndrey Vagin 	}
30802b916477SAndrey Vagin 
3081d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
30821da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
30831da177e4SLinus Torvalds 		return -ENOBUFS;
30841da177e4SLinus Torvalds 
30851da177e4SLinus Torvalds 	/* Reserve space for headers. */
30861da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
30871da177e4SLinus Torvalds 
3088a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3089783237e8SYuchung Cheng 	tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
3090783237e8SYuchung Cheng 	tcp_connect_queue_skb(sk, buff);
3091e870a8efSIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
30921da177e4SLinus Torvalds 
3093783237e8SYuchung Cheng 	/* Send off SYN; include data in Fast Open. */
3094783237e8SYuchung Cheng 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3095783237e8SYuchung Cheng 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3096ee586811SEric Paris 	if (err == -ECONNREFUSED)
3097ee586811SEric Paris 		return err;
3098bd37a088SWei Yongjun 
3099bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3100bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
3101bd37a088SWei Yongjun 	 */
3102bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
3103bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
310481cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
31051da177e4SLinus Torvalds 
31061da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
31073f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
31083f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
31091da177e4SLinus Torvalds 	return 0;
31101da177e4SLinus Torvalds }
31114bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect);
31121da177e4SLinus Torvalds 
31131da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
31141da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
31151da177e4SLinus Torvalds  * for details.
31161da177e4SLinus Torvalds  */
31171da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
31181da177e4SLinus Torvalds {
3119463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
3120463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
31211da177e4SLinus Torvalds 	unsigned long timeout;
31221da177e4SLinus Torvalds 
31231da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
3124463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
31251da177e4SLinus Torvalds 		int max_ato = HZ / 2;
31261da177e4SLinus Torvalds 
3127056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
3128056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
31291da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
31301da177e4SLinus Torvalds 
31311da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
31321da177e4SLinus Torvalds 
31331da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
3134463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
31351da177e4SLinus Torvalds 		 * directly.
31361da177e4SLinus Torvalds 		 */
3137740b0f18SEric Dumazet 		if (tp->srtt_us) {
3138740b0f18SEric Dumazet 			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3139740b0f18SEric Dumazet 					TCP_DELACK_MIN);
31401da177e4SLinus Torvalds 
31411da177e4SLinus Torvalds 			if (rtt < max_ato)
31421da177e4SLinus Torvalds 				max_ato = rtt;
31431da177e4SLinus Torvalds 		}
31441da177e4SLinus Torvalds 
31451da177e4SLinus Torvalds 		ato = min(ato, max_ato);
31461da177e4SLinus Torvalds 	}
31471da177e4SLinus Torvalds 
31481da177e4SLinus Torvalds 	/* Stay within the limit we were given */
31491da177e4SLinus Torvalds 	timeout = jiffies + ato;
31501da177e4SLinus Torvalds 
31511da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
3152463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
31531da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
31541da177e4SLinus Torvalds 		 * send ACK now.
31551da177e4SLinus Torvalds 		 */
3156463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
3157463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
31581da177e4SLinus Torvalds 			tcp_send_ack(sk);
31591da177e4SLinus Torvalds 			return;
31601da177e4SLinus Torvalds 		}
31611da177e4SLinus Torvalds 
3162463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
3163463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
31641da177e4SLinus Torvalds 	}
3165463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3166463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
3167463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
31681da177e4SLinus Torvalds }
31691da177e4SLinus Torvalds 
31701da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
31711da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
31721da177e4SLinus Torvalds {
31731da177e4SLinus Torvalds 	struct sk_buff *buff;
31741da177e4SLinus Torvalds 
3175058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
3176058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3177058dc334SIlpo Järvinen 		return;
3178058dc334SIlpo Järvinen 
31791da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
31801da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
31811da177e4SLinus Torvalds 	 * sock.
31821da177e4SLinus Torvalds 	 */
318399a1dec7SMel Gorman 	buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
31841da177e4SLinus Torvalds 	if (buff == NULL) {
3185463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
3186463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
31873f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
31883f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
31891da177e4SLinus Torvalds 		return;
31901da177e4SLinus Torvalds 	}
31911da177e4SLinus Torvalds 
31921da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
31931da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
3194a3433f35SChangli Gao 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
31951da177e4SLinus Torvalds 
31961da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
31971da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
319899a1dec7SMel Gorman 	tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
31991da177e4SLinus Torvalds }
32001da177e4SLinus Torvalds 
32011da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
32021da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
32031da177e4SLinus Torvalds  *
32041da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
32051da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
32061da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
32071da177e4SLinus Torvalds  *
32081da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
32091da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
32101da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
32111da177e4SLinus Torvalds  */
32121da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
32131da177e4SLinus Torvalds {
32141da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32151da177e4SLinus Torvalds 	struct sk_buff *skb;
32161da177e4SLinus Torvalds 
32171da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
321899a1dec7SMel Gorman 	skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
32191da177e4SLinus Torvalds 	if (skb == NULL)
32201da177e4SLinus Torvalds 		return -1;
32211da177e4SLinus Torvalds 
32221da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
32231da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
32241da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
32251da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
32261da177e4SLinus Torvalds 	 * send it.
32271da177e4SLinus Torvalds 	 */
3228a3433f35SChangli Gao 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
32291da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
3230dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
32311da177e4SLinus Torvalds }
32321da177e4SLinus Torvalds 
3233ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk)
3234ee995283SPavel Emelyanov {
3235ee995283SPavel Emelyanov 	if (sk->sk_state == TCP_ESTABLISHED) {
3236ee995283SPavel Emelyanov 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
3237ee995283SPavel Emelyanov 		tcp_xmit_probe_skb(sk, 0);
3238ee995283SPavel Emelyanov 	}
3239ee995283SPavel Emelyanov }
3240ee995283SPavel Emelyanov 
324167edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
32421da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
32431da177e4SLinus Torvalds {
32441da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32451da177e4SLinus Torvalds 	struct sk_buff *skb;
32461da177e4SLinus Torvalds 
3247058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
3248058dc334SIlpo Järvinen 		return -1;
3249058dc334SIlpo Järvinen 
3250fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) != NULL &&
325190840defSIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
32521da177e4SLinus Torvalds 		int err;
32530c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
325490840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
32551da177e4SLinus Torvalds 
32561da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
32571da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
32581da177e4SLinus Torvalds 
32591da177e4SLinus Torvalds 		/* We are probing the opening of a window
32601da177e4SLinus Torvalds 		 * but the window size is != 0
32611da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
32621da177e4SLinus Torvalds 		 */
32631da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
32641da177e4SLinus Torvalds 		    skb->len > mss) {
32651da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
32664de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
32676cc55e09SOctavian Purdila 			if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
32681da177e4SLinus Torvalds 				return -1;
32691da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
3270846998aeSDavid S. Miller 			tcp_set_skb_tso_segs(sk, skb, mss);
32711da177e4SLinus Torvalds 
32724de075e0SEric Dumazet 		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
32731da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
3274dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
327566f5fe62SIlpo Järvinen 		if (!err)
327666f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
32771da177e4SLinus Torvalds 		return err;
32781da177e4SLinus Torvalds 	} else {
327933f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
32804828e7f4SIlpo Järvinen 			tcp_xmit_probe_skb(sk, 1);
32811da177e4SLinus Torvalds 		return tcp_xmit_probe_skb(sk, 0);
32821da177e4SLinus Torvalds 	}
32831da177e4SLinus Torvalds }
32841da177e4SLinus Torvalds 
32851da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
32861da177e4SLinus Torvalds  * a partial packet else a zero probe.
32871da177e4SLinus Torvalds  */
32881da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
32891da177e4SLinus Torvalds {
3290463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
32911da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
32921da177e4SLinus Torvalds 	int err;
32931da177e4SLinus Torvalds 
32941da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
32951da177e4SLinus Torvalds 
3296fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
32971da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
32986687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
3299463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
33001da177e4SLinus Torvalds 		return;
33011da177e4SLinus Torvalds 	}
33021da177e4SLinus Torvalds 
33031da177e4SLinus Torvalds 	if (err <= 0) {
3304463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
3305463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
33066687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
3307463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
33083f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
33093f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
33101da177e4SLinus Torvalds 	} else {
33111da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
33126687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
33131da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
33141da177e4SLinus Torvalds 		 *
33151da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
33161da177e4SLinus Torvalds 		 */
33176687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
33186687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
3319463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3320463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
33213f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
33223f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
33231da177e4SLinus Torvalds 	}
33241da177e4SLinus Torvalds }
33255db92c99SOctavian Purdila 
33265db92c99SOctavian Purdila int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
33275db92c99SOctavian Purdila {
33285db92c99SOctavian Purdila 	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
33295db92c99SOctavian Purdila 	struct flowi fl;
33305db92c99SOctavian Purdila 	int res;
33315db92c99SOctavian Purdila 
33325db92c99SOctavian Purdila 	res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
33335db92c99SOctavian Purdila 	if (!res) {
33345db92c99SOctavian Purdila 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
33355db92c99SOctavian Purdila 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
33365db92c99SOctavian Purdila 	}
33375db92c99SOctavian Purdila 	return res;
33385db92c99SOctavian Purdila }
33395db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack);
3340