xref: /linux/net/ipv4/tcp_output.c (revision 28b2774a0d5852236dab77a4147b8b88548110f1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
802c30a84SJesper Juhl  * Authors:	Ross Biro
91da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
101da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
111da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
121da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
131da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
141da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
151da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
161da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
171da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
181da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
191da177e4SLinus Torvalds  */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /*
221da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
231da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
241da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
251da177e4SLinus Torvalds  *				:	AF independence
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
281da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
291da177e4SLinus Torvalds  *					during syn/ack processing.
301da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
311da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
321da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
331da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
341da177e4SLinus Torvalds  *
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
371da177e4SLinus Torvalds #include <net/tcp.h>
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <linux/compiler.h>
401da177e4SLinus Torvalds #include <linux/module.h>
411da177e4SLinus Torvalds 
421da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
43ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
441da177e4SLinus Torvalds 
4515d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that
4615d99e02SRick Jones  * interpret the window field as a signed quantity.
4715d99e02SRick Jones  */
48ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
4915d99e02SRick Jones 
501da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
511da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
521da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
531da177e4SLinus Torvalds  */
54ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
551da177e4SLinus Torvalds 
56ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
57ab32ea5dSBrian Haley int sysctl_tcp_base_mss __read_mostly = 512;
585d424d5aSJohn Heffner 
5935089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
60ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6135089bb2SDavid S. Miller 
62519855c5SWilliam Allen Simpson int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
63e6b09ccaSDavid S. Miller EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
64519855c5SWilliam Allen Simpson 
65519855c5SWilliam Allen Simpson 
6667edfef7SAndi Kleen /* Account for new data that has been sent to the network. */
6766f5fe62SIlpo Järvinen static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
686ff03ac3SIlpo Järvinen {
696ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
7066f5fe62SIlpo Järvinen 	unsigned int prior_packets = tp->packets_out;
719e412ba7SIlpo Järvinen 
72fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
731da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
748512430eSIlpo Järvinen 
758512430eSIlpo Järvinen 	/* Don't override Nagle indefinately with F-RTO */
768512430eSIlpo Järvinen 	if (tp->frto_counter == 2)
778512430eSIlpo Järvinen 		tp->frto_counter = 3;
7866f5fe62SIlpo Järvinen 
7966f5fe62SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
8066f5fe62SIlpo Järvinen 	if (!prior_packets)
8166f5fe62SIlpo Järvinen 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
8266f5fe62SIlpo Järvinen 					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds 
851da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
861da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
871da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
881da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
891da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
901da177e4SLinus Torvalds  */
919e412ba7SIlpo Järvinen static inline __u32 tcp_acceptable_seq(struct sock *sk)
921da177e4SLinus Torvalds {
939e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
949e412ba7SIlpo Järvinen 
9590840defSIlpo Järvinen 	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
961da177e4SLinus Torvalds 		return tp->snd_nxt;
971da177e4SLinus Torvalds 	else
9890840defSIlpo Järvinen 		return tcp_wnd_end(tp);
991da177e4SLinus Torvalds }
1001da177e4SLinus Torvalds 
1011da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1021da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1031da177e4SLinus Torvalds  *
1041da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1051da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1061da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1071da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1081da177e4SLinus Torvalds  *    large MSS.
1091da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1101da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1111da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1121da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1131da177e4SLinus Torvalds  *    probably even Jumbo".
1141da177e4SLinus Torvalds  */
1151da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1161da177e4SLinus Torvalds {
1171da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1181da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
1191da177e4SLinus Torvalds 	int mss = tp->advmss;
1201da177e4SLinus Torvalds 
1211da177e4SLinus Torvalds 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
1221da177e4SLinus Torvalds 		mss = dst_metric(dst, RTAX_ADVMSS);
1231da177e4SLinus Torvalds 		tp->advmss = mss;
1241da177e4SLinus Torvalds 	}
1251da177e4SLinus Torvalds 
1261da177e4SLinus Torvalds 	return (__u16)mss;
1271da177e4SLinus Torvalds }
1281da177e4SLinus Torvalds 
1291da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1301da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
131463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1321da177e4SLinus Torvalds {
133463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1341da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1351da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1361da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1371da177e4SLinus Torvalds 
1386687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1391da177e4SLinus Torvalds 
1406687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1411da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1421da177e4SLinus Torvalds 
143463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1441da177e4SLinus Torvalds 		cwnd >>= 1;
1451da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1461da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1471da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1481da177e4SLinus Torvalds }
1491da177e4SLinus Torvalds 
15067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */
15140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
1521da177e4SLinus Torvalds 				struct sk_buff *skb, struct sock *sk)
1531da177e4SLinus Torvalds {
154463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
155463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1561da177e4SLinus Torvalds 
15735089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
15835089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
159463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1601da177e4SLinus Torvalds 
1611da177e4SLinus Torvalds 	tp->lsndtime = now;
1621da177e4SLinus Torvalds 
1631da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1641da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1651da177e4SLinus Torvalds 	 */
166463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
167463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
17067edfef7SAndi Kleen /* Account for an ACK we sent. */
17140efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1721da177e4SLinus Torvalds {
173463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
174463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1751da177e4SLinus Torvalds }
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1781da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1791da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1801da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1811da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1821da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1831da177e4SLinus Torvalds  */
1841da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1851da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
18631d12926Slaurent chavey 			       int wscale_ok, __u8 *rcv_wscale,
18731d12926Slaurent chavey 			       __u32 init_rcv_wnd)
1881da177e4SLinus Torvalds {
1891da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
1901da177e4SLinus Torvalds 
1911da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
1921da177e4SLinus Torvalds 	if (*window_clamp == 0)
1931da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
1941da177e4SLinus Torvalds 	space = min(*window_clamp, space);
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
1971da177e4SLinus Torvalds 	if (space > mss)
1981da177e4SLinus Torvalds 		space = (space / mss) * mss;
1991da177e4SLinus Torvalds 
2001da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
20115d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
20215d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
20315d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
20415d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
20515d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
20615d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2071da177e4SLinus Torvalds 	 */
20815d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2091da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
21015d99e02SRick Jones 	else
21115d99e02SRick Jones 		(*rcv_wnd) = space;
21215d99e02SRick Jones 
2131da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2141da177e4SLinus Torvalds 	if (wscale_ok) {
2151da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2161da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2171da177e4SLinus Torvalds 		 */
2181da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
219316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2201da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2211da177e4SLinus Torvalds 			space >>= 1;
2221da177e4SLinus Torvalds 			(*rcv_wscale)++;
2231da177e4SLinus Torvalds 		}
2241da177e4SLinus Torvalds 	}
2251da177e4SLinus Torvalds 
2261da177e4SLinus Torvalds 	/* Set initial window to value enough for senders,
2276b251858SDavid S. Miller 	 * following RFC2414. Senders, not following this RFC,
2281da177e4SLinus Torvalds 	 * will be satisfied with 2.
2291da177e4SLinus Torvalds 	 */
2301da177e4SLinus Torvalds 	if (mss > (1 << *rcv_wscale)) {
23101ff367eSDavid S. Miller 		int init_cwnd = 4;
23201ff367eSDavid S. Miller 		if (mss > 1460 * 3)
2331da177e4SLinus Torvalds 			init_cwnd = 2;
23401ff367eSDavid S. Miller 		else if (mss > 1460)
23501ff367eSDavid S. Miller 			init_cwnd = 3;
23631d12926Slaurent chavey 		/* when initializing use the value from init_rcv_wnd
23731d12926Slaurent chavey 		 * rather than the default from above
23831d12926Slaurent chavey 		 */
23931d12926Slaurent chavey 		if (init_rcv_wnd &&
24031d12926Slaurent chavey 		    (*rcv_wnd > init_rcv_wnd * mss))
24131d12926Slaurent chavey 			*rcv_wnd = init_rcv_wnd * mss;
24231d12926Slaurent chavey 		else if (*rcv_wnd > init_cwnd * mss)
2431da177e4SLinus Torvalds 			*rcv_wnd = init_cwnd * mss;
2441da177e4SLinus Torvalds 	}
2451da177e4SLinus Torvalds 
2461da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2471da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2481da177e4SLinus Torvalds }
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2511da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2521da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2531da177e4SLinus Torvalds  * frame.
2541da177e4SLinus Torvalds  */
25540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2581da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2591da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2601da177e4SLinus Torvalds 
2611da177e4SLinus Torvalds 	/* Never shrink the offered window */
2621da177e4SLinus Torvalds 	if (new_win < cur_win) {
2631da177e4SLinus Torvalds 		/* Danger Will Robinson!
2641da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2651da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2661da177e4SLinus Torvalds 		 * window in time.  --DaveM
2671da177e4SLinus Torvalds 		 *
2681da177e4SLinus Torvalds 		 * Relax Will Robinson.
2691da177e4SLinus Torvalds 		 */
270607bfbf2SPatrick McHardy 		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
2711da177e4SLinus Torvalds 	}
2721da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2731da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2741da177e4SLinus Torvalds 
2751da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2761da177e4SLinus Torvalds 	 * scaled window.
2771da177e4SLinus Torvalds 	 */
27815d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2791da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2801da177e4SLinus Torvalds 	else
2811da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2841da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2851da177e4SLinus Torvalds 
2861da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2871da177e4SLinus Torvalds 	if (new_win == 0)
2881da177e4SLinus Torvalds 		tp->pred_flags = 0;
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds 	return new_win;
2911da177e4SLinus Torvalds }
2921da177e4SLinus Torvalds 
29367edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */
294056834d9SIlpo Järvinen static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
295bdf1ee5dSIlpo Järvinen {
296bdf1ee5dSIlpo Järvinen 	TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
297bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags & TCP_ECN_OK))
298bdf1ee5dSIlpo Järvinen 		TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
299bdf1ee5dSIlpo Järvinen }
300bdf1ee5dSIlpo Järvinen 
30167edfef7SAndi Kleen /* Packet ECN state for a SYN.  */
302bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
303bdf1ee5dSIlpo Järvinen {
304bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
305bdf1ee5dSIlpo Järvinen 
306bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
307255cac91SIlpo Järvinen 	if (sysctl_tcp_ecn == 1) {
308bdf1ee5dSIlpo Järvinen 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
309bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
310bdf1ee5dSIlpo Järvinen 	}
311bdf1ee5dSIlpo Järvinen }
312bdf1ee5dSIlpo Järvinen 
313bdf1ee5dSIlpo Järvinen static __inline__ void
314bdf1ee5dSIlpo Järvinen TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
315bdf1ee5dSIlpo Järvinen {
316bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
317bdf1ee5dSIlpo Järvinen 		th->ece = 1;
318bdf1ee5dSIlpo Järvinen }
319bdf1ee5dSIlpo Järvinen 
32067edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
32167edfef7SAndi Kleen  * be sent.
32267edfef7SAndi Kleen  */
323bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
324bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
325bdf1ee5dSIlpo Järvinen {
326bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
327bdf1ee5dSIlpo Järvinen 
328bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
329bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
330bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
331bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
332bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
333bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
334bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
335bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
336bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
337bdf1ee5dSIlpo Järvinen 			}
338bdf1ee5dSIlpo Järvinen 		} else {
339bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
340bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
341bdf1ee5dSIlpo Järvinen 		}
342bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
343bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
344bdf1ee5dSIlpo Järvinen 	}
345bdf1ee5dSIlpo Järvinen }
346bdf1ee5dSIlpo Järvinen 
347e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present,
348e870a8efSIlpo Järvinen  * auto increment end seqno.
349e870a8efSIlpo Järvinen  */
350e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
351e870a8efSIlpo Järvinen {
352e870a8efSIlpo Järvinen 	skb->csum = 0;
353e870a8efSIlpo Järvinen 
354e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->flags = flags;
355e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->sacked = 0;
356e870a8efSIlpo Järvinen 
357e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_segs = 1;
358e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_size = 0;
359e870a8efSIlpo Järvinen 	skb_shinfo(skb)->gso_type = 0;
360e870a8efSIlpo Järvinen 
361e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->seq = seq;
362e870a8efSIlpo Järvinen 	if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
363e870a8efSIlpo Järvinen 		seq++;
364e870a8efSIlpo Järvinen 	TCP_SKB_CB(skb)->end_seq = seq;
365e870a8efSIlpo Järvinen }
366e870a8efSIlpo Järvinen 
36733f5f57eSIlpo Järvinen static inline int tcp_urg_mode(const struct tcp_sock *tp)
36833f5f57eSIlpo Järvinen {
36933f5f57eSIlpo Järvinen 	return tp->snd_una != tp->snd_up;
37033f5f57eSIlpo Järvinen }
37133f5f57eSIlpo Järvinen 
37233ad798cSAdam Langley #define OPTION_SACK_ADVERTISE	(1 << 0)
37333ad798cSAdam Langley #define OPTION_TS		(1 << 1)
37433ad798cSAdam Langley #define OPTION_MD5		(1 << 2)
37589e95a61SOri Finkelman #define OPTION_WSCALE		(1 << 3)
376bd0388aeSWilliam Allen Simpson #define OPTION_COOKIE_EXTENSION	(1 << 4)
37733ad798cSAdam Langley 
37833ad798cSAdam Langley struct tcp_out_options {
37933ad798cSAdam Langley 	u8 options;		/* bit field of OPTION_* */
38033ad798cSAdam Langley 	u8 ws;			/* window scale, 0 to disable */
38133ad798cSAdam Langley 	u8 num_sack_blocks;	/* number of SACK blocks to include */
382bd0388aeSWilliam Allen Simpson 	u8 hash_size;		/* bytes in hash_location */
38333ad798cSAdam Langley 	u16 mss;		/* 0 to disable */
38433ad798cSAdam Langley 	__u32 tsval, tsecr;	/* need to include OPTION_TS */
385bd0388aeSWilliam Allen Simpson 	__u8 *hash_location;	/* temporary pointer, overloaded */
38633ad798cSAdam Langley };
38733ad798cSAdam Langley 
388bd0388aeSWilliam Allen Simpson /* The sysctl int routines are generic, so check consistency here.
389bd0388aeSWilliam Allen Simpson  */
390bd0388aeSWilliam Allen Simpson static u8 tcp_cookie_size_check(u8 desired)
391bd0388aeSWilliam Allen Simpson {
392bd0388aeSWilliam Allen Simpson 	if (desired > 0) {
393bd0388aeSWilliam Allen Simpson 		/* previously specified */
394bd0388aeSWilliam Allen Simpson 		return desired;
395bd0388aeSWilliam Allen Simpson 	}
396bd0388aeSWilliam Allen Simpson 	if (sysctl_tcp_cookie_size <= 0) {
397bd0388aeSWilliam Allen Simpson 		/* no default specified */
398bd0388aeSWilliam Allen Simpson 		return 0;
399bd0388aeSWilliam Allen Simpson 	}
400bd0388aeSWilliam Allen Simpson 	if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
401bd0388aeSWilliam Allen Simpson 		/* value too small, specify minimum */
402bd0388aeSWilliam Allen Simpson 		return TCP_COOKIE_MIN;
403bd0388aeSWilliam Allen Simpson 	}
404bd0388aeSWilliam Allen Simpson 	if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
405bd0388aeSWilliam Allen Simpson 		/* value too large, specify maximum */
406bd0388aeSWilliam Allen Simpson 		return TCP_COOKIE_MAX;
407bd0388aeSWilliam Allen Simpson 	}
408bd0388aeSWilliam Allen Simpson 	if (0x1 & sysctl_tcp_cookie_size) {
409bd0388aeSWilliam Allen Simpson 		/* 8-bit multiple, illegal, fix it */
410bd0388aeSWilliam Allen Simpson 		return (u8)(sysctl_tcp_cookie_size + 0x1);
411bd0388aeSWilliam Allen Simpson 	}
412bd0388aeSWilliam Allen Simpson 	return (u8)sysctl_tcp_cookie_size;
413bd0388aeSWilliam Allen Simpson }
414bd0388aeSWilliam Allen Simpson 
41567edfef7SAndi Kleen /* Write previously computed TCP options to the packet.
41667edfef7SAndi Kleen  *
41767edfef7SAndi Kleen  * Beware: Something in the Internet is very sensitive to the ordering of
418fd6149d3SIlpo Järvinen  * TCP options, we learned this through the hard way, so be careful here.
419fd6149d3SIlpo Järvinen  * Luckily we can at least blame others for their non-compliance but from
420fd6149d3SIlpo Järvinen  * inter-operatibility perspective it seems that we're somewhat stuck with
421fd6149d3SIlpo Järvinen  * the ordering which we have been using if we want to keep working with
422fd6149d3SIlpo Järvinen  * those broken things (not that it currently hurts anybody as there isn't
423fd6149d3SIlpo Järvinen  * particular reason why the ordering would need to be changed).
424fd6149d3SIlpo Järvinen  *
425fd6149d3SIlpo Järvinen  * At least SACK_PERM as the first option is known to lead to a disaster
426fd6149d3SIlpo Järvinen  * (but it may well be that other scenarios fail similarly).
427fd6149d3SIlpo Järvinen  */
42833ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
429bd0388aeSWilliam Allen Simpson 			      struct tcp_out_options *opts)
430bd0388aeSWilliam Allen Simpson {
431bd0388aeSWilliam Allen Simpson 	u8 options = opts->options;	/* mungable copy */
432bd0388aeSWilliam Allen Simpson 
433bd0388aeSWilliam Allen Simpson 	/* Having both authentication and cookies for security is redundant,
434bd0388aeSWilliam Allen Simpson 	 * and there's certainly not enough room.  Instead, the cookie-less
435bd0388aeSWilliam Allen Simpson 	 * extension variant is proposed.
436bd0388aeSWilliam Allen Simpson 	 *
437bd0388aeSWilliam Allen Simpson 	 * Consider the pessimal case with authentication.  The options
438bd0388aeSWilliam Allen Simpson 	 * could look like:
439bd0388aeSWilliam Allen Simpson 	 *   COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
440bd0388aeSWilliam Allen Simpson 	 */
441bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_MD5 & options)) {
442bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
443bd0388aeSWilliam Allen Simpson 			*ptr++ = htonl((TCPOPT_COOKIE << 24) |
444bd0388aeSWilliam Allen Simpson 				       (TCPOLEN_COOKIE_BASE << 16) |
445bd0388aeSWilliam Allen Simpson 				       (TCPOPT_MD5SIG << 8) |
446bd0388aeSWilliam Allen Simpson 				       TCPOLEN_MD5SIG);
447bd0388aeSWilliam Allen Simpson 		} else {
44833ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_NOP << 24) |
44933ad798cSAdam Langley 				       (TCPOPT_NOP << 16) |
45033ad798cSAdam Langley 				       (TCPOPT_MD5SIG << 8) |
45133ad798cSAdam Langley 				       TCPOLEN_MD5SIG);
452bd0388aeSWilliam Allen Simpson 		}
453bd0388aeSWilliam Allen Simpson 		options &= ~OPTION_COOKIE_EXTENSION;
454bd0388aeSWilliam Allen Simpson 		/* overload cookie hash location */
455bd0388aeSWilliam Allen Simpson 		opts->hash_location = (__u8 *)ptr;
45633ad798cSAdam Langley 		ptr += 4;
45733ad798cSAdam Langley 	}
45833ad798cSAdam Langley 
459fd6149d3SIlpo Järvinen 	if (unlikely(opts->mss)) {
460fd6149d3SIlpo Järvinen 		*ptr++ = htonl((TCPOPT_MSS << 24) |
461fd6149d3SIlpo Järvinen 			       (TCPOLEN_MSS << 16) |
462fd6149d3SIlpo Järvinen 			       opts->mss);
463fd6149d3SIlpo Järvinen 	}
464fd6149d3SIlpo Järvinen 
465bd0388aeSWilliam Allen Simpson 	if (likely(OPTION_TS & options)) {
466bd0388aeSWilliam Allen Simpson 		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
46733ad798cSAdam Langley 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
46833ad798cSAdam Langley 				       (TCPOLEN_SACK_PERM << 16) |
46933ad798cSAdam Langley 				       (TCPOPT_TIMESTAMP << 8) |
47033ad798cSAdam Langley 				       TCPOLEN_TIMESTAMP);
471bd0388aeSWilliam Allen Simpson 			options &= ~OPTION_SACK_ADVERTISE;
47233ad798cSAdam Langley 		} else {
473496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
47440efc6faSStephen Hemminger 				       (TCPOPT_NOP << 16) |
47540efc6faSStephen Hemminger 				       (TCPOPT_TIMESTAMP << 8) |
47640efc6faSStephen Hemminger 				       TCPOLEN_TIMESTAMP);
47740efc6faSStephen Hemminger 		}
47833ad798cSAdam Langley 		*ptr++ = htonl(opts->tsval);
47933ad798cSAdam Langley 		*ptr++ = htonl(opts->tsecr);
48033ad798cSAdam Langley 	}
48133ad798cSAdam Langley 
482bd0388aeSWilliam Allen Simpson 	/* Specification requires after timestamp, so do it now.
483bd0388aeSWilliam Allen Simpson 	 *
484bd0388aeSWilliam Allen Simpson 	 * Consider the pessimal case without authentication.  The options
485bd0388aeSWilliam Allen Simpson 	 * could look like:
486bd0388aeSWilliam Allen Simpson 	 *   MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
487bd0388aeSWilliam Allen Simpson 	 */
488bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
489bd0388aeSWilliam Allen Simpson 		__u8 *cookie_copy = opts->hash_location;
490bd0388aeSWilliam Allen Simpson 		u8 cookie_size = opts->hash_size;
491bd0388aeSWilliam Allen Simpson 
492bd0388aeSWilliam Allen Simpson 		/* 8-bit multiple handled in tcp_cookie_size_check() above,
493bd0388aeSWilliam Allen Simpson 		 * and elsewhere.
494bd0388aeSWilliam Allen Simpson 		 */
495bd0388aeSWilliam Allen Simpson 		if (0x2 & cookie_size) {
496bd0388aeSWilliam Allen Simpson 			__u8 *p = (__u8 *)ptr;
497bd0388aeSWilliam Allen Simpson 
498bd0388aeSWilliam Allen Simpson 			/* 16-bit multiple */
499bd0388aeSWilliam Allen Simpson 			*p++ = TCPOPT_COOKIE;
500bd0388aeSWilliam Allen Simpson 			*p++ = TCPOLEN_COOKIE_BASE + cookie_size;
501bd0388aeSWilliam Allen Simpson 			*p++ = *cookie_copy++;
502bd0388aeSWilliam Allen Simpson 			*p++ = *cookie_copy++;
503bd0388aeSWilliam Allen Simpson 			ptr++;
504bd0388aeSWilliam Allen Simpson 			cookie_size -= 2;
505bd0388aeSWilliam Allen Simpson 		} else {
506bd0388aeSWilliam Allen Simpson 			/* 32-bit multiple */
507bd0388aeSWilliam Allen Simpson 			*ptr++ = htonl(((TCPOPT_NOP << 24) |
508bd0388aeSWilliam Allen Simpson 					(TCPOPT_NOP << 16) |
509bd0388aeSWilliam Allen Simpson 					(TCPOPT_COOKIE << 8) |
510bd0388aeSWilliam Allen Simpson 					TCPOLEN_COOKIE_BASE) +
511bd0388aeSWilliam Allen Simpson 				       cookie_size);
512bd0388aeSWilliam Allen Simpson 		}
513bd0388aeSWilliam Allen Simpson 
514bd0388aeSWilliam Allen Simpson 		if (cookie_size > 0) {
515bd0388aeSWilliam Allen Simpson 			memcpy(ptr, cookie_copy, cookie_size);
516bd0388aeSWilliam Allen Simpson 			ptr += (cookie_size / 4);
517bd0388aeSWilliam Allen Simpson 		}
518bd0388aeSWilliam Allen Simpson 	}
519bd0388aeSWilliam Allen Simpson 
520bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
52133ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
52233ad798cSAdam Langley 			       (TCPOPT_NOP << 16) |
52333ad798cSAdam Langley 			       (TCPOPT_SACK_PERM << 8) |
52433ad798cSAdam Langley 			       TCPOLEN_SACK_PERM);
52533ad798cSAdam Langley 	}
52633ad798cSAdam Langley 
527bd0388aeSWilliam Allen Simpson 	if (unlikely(OPTION_WSCALE & options)) {
52833ad798cSAdam Langley 		*ptr++ = htonl((TCPOPT_NOP << 24) |
52933ad798cSAdam Langley 			       (TCPOPT_WINDOW << 16) |
53033ad798cSAdam Langley 			       (TCPOLEN_WINDOW << 8) |
53133ad798cSAdam Langley 			       opts->ws);
53233ad798cSAdam Langley 	}
53333ad798cSAdam Langley 
53433ad798cSAdam Langley 	if (unlikely(opts->num_sack_blocks)) {
53533ad798cSAdam Langley 		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
53633ad798cSAdam Langley 			tp->duplicate_sack : tp->selective_acks;
53740efc6faSStephen Hemminger 		int this_sack;
53840efc6faSStephen Hemminger 
53940efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
54040efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
54140efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
54233ad798cSAdam Langley 			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
54340efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
5442de979bdSStephen Hemminger 
54533ad798cSAdam Langley 		for (this_sack = 0; this_sack < opts->num_sack_blocks;
54633ad798cSAdam Langley 		     ++this_sack) {
54740efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
54840efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
54940efc6faSStephen Hemminger 		}
5502de979bdSStephen Hemminger 
55140efc6faSStephen Hemminger 		tp->rx_opt.dsack = 0;
55240efc6faSStephen Hemminger 	}
55340efc6faSStephen Hemminger }
55440efc6faSStephen Hemminger 
55567edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final
55667edfef7SAndi Kleen  * network wire format yet.
55767edfef7SAndi Kleen  */
55833ad798cSAdam Langley static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
55933ad798cSAdam Langley 				struct tcp_out_options *opts,
56033ad798cSAdam Langley 				struct tcp_md5sig_key **md5) {
56133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
562bd0388aeSWilliam Allen Simpson 	struct tcp_cookie_values *cvp = tp->cookie_values;
563bd0388aeSWilliam Allen Simpson 	unsigned remaining = MAX_TCP_OPTION_SPACE;
564bd0388aeSWilliam Allen Simpson 	u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
565bd0388aeSWilliam Allen Simpson 			 tcp_cookie_size_check(cvp->cookie_desired) :
566bd0388aeSWilliam Allen Simpson 			 0;
56733ad798cSAdam Langley 
568cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
56933ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
57033ad798cSAdam Langley 	if (*md5) {
57133ad798cSAdam Langley 		opts->options |= OPTION_MD5;
572bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
573cfb6eeb4SYOSHIFUJI Hideaki 	}
57433ad798cSAdam Langley #else
57533ad798cSAdam Langley 	*md5 = NULL;
576cfb6eeb4SYOSHIFUJI Hideaki #endif
57733ad798cSAdam Langley 
57833ad798cSAdam Langley 	/* We always get an MSS option.  The option bytes which will be seen in
57933ad798cSAdam Langley 	 * normal data packets should timestamps be used, must be in the MSS
58033ad798cSAdam Langley 	 * advertised.  But we subtract them from tp->mss_cache so that
58133ad798cSAdam Langley 	 * calculations in tcp_sendmsg are simpler etc.  So account for this
58233ad798cSAdam Langley 	 * fact here if necessary.  If we don't do this correctly, as a
58333ad798cSAdam Langley 	 * receiver we won't recognize data packets as being full sized when we
58433ad798cSAdam Langley 	 * should, and thus we won't abide by the delayed ACK rules correctly.
58533ad798cSAdam Langley 	 * SACKs don't matter, we never delay an ACK when we have any of those
58633ad798cSAdam Langley 	 * going out.  */
58733ad798cSAdam Langley 	opts->mss = tcp_advertise_mss(sk);
588bd0388aeSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
58933ad798cSAdam Langley 
590bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
59133ad798cSAdam Langley 		opts->options |= OPTION_TS;
59233ad798cSAdam Langley 		opts->tsval = TCP_SKB_CB(skb)->when;
59333ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
594bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
59533ad798cSAdam Langley 	}
596bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_window_scaling)) {
59733ad798cSAdam Langley 		opts->ws = tp->rx_opt.rcv_wscale;
59889e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
599bd0388aeSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
60033ad798cSAdam Langley 	}
601bb5b7c11SDavid S. Miller 	if (likely(sysctl_tcp_sack)) {
60233ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
603b32d1310SDavid S. Miller 		if (unlikely(!(OPTION_TS & opts->options)))
604bd0388aeSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
60533ad798cSAdam Langley 	}
60633ad798cSAdam Langley 
607bd0388aeSWilliam Allen Simpson 	/* Note that timestamps are required by the specification.
608bd0388aeSWilliam Allen Simpson 	 *
609bd0388aeSWilliam Allen Simpson 	 * Odd numbers of bytes are prohibited by the specification, ensuring
610bd0388aeSWilliam Allen Simpson 	 * that the cookie is 16-bit aligned, and the resulting cookie pair is
611bd0388aeSWilliam Allen Simpson 	 * 32-bit aligned.
612bd0388aeSWilliam Allen Simpson 	 */
613bd0388aeSWilliam Allen Simpson 	if (*md5 == NULL &&
614bd0388aeSWilliam Allen Simpson 	    (OPTION_TS & opts->options) &&
615bd0388aeSWilliam Allen Simpson 	    cookie_size > 0) {
616bd0388aeSWilliam Allen Simpson 		int need = TCPOLEN_COOKIE_BASE + cookie_size;
617bd0388aeSWilliam Allen Simpson 
618bd0388aeSWilliam Allen Simpson 		if (0x2 & need) {
619bd0388aeSWilliam Allen Simpson 			/* 32-bit multiple */
620bd0388aeSWilliam Allen Simpson 			need += 2; /* NOPs */
621bd0388aeSWilliam Allen Simpson 
622bd0388aeSWilliam Allen Simpson 			if (need > remaining) {
623bd0388aeSWilliam Allen Simpson 				/* try shrinking cookie to fit */
624bd0388aeSWilliam Allen Simpson 				cookie_size -= 2;
625bd0388aeSWilliam Allen Simpson 				need -= 4;
626bd0388aeSWilliam Allen Simpson 			}
627bd0388aeSWilliam Allen Simpson 		}
628bd0388aeSWilliam Allen Simpson 		while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
629bd0388aeSWilliam Allen Simpson 			cookie_size -= 4;
630bd0388aeSWilliam Allen Simpson 			need -= 4;
631bd0388aeSWilliam Allen Simpson 		}
632bd0388aeSWilliam Allen Simpson 		if (TCP_COOKIE_MIN <= cookie_size) {
633bd0388aeSWilliam Allen Simpson 			opts->options |= OPTION_COOKIE_EXTENSION;
634bd0388aeSWilliam Allen Simpson 			opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
635bd0388aeSWilliam Allen Simpson 			opts->hash_size = cookie_size;
636bd0388aeSWilliam Allen Simpson 
637bd0388aeSWilliam Allen Simpson 			/* Remember for future incarnations. */
638bd0388aeSWilliam Allen Simpson 			cvp->cookie_desired = cookie_size;
639bd0388aeSWilliam Allen Simpson 
640bd0388aeSWilliam Allen Simpson 			if (cvp->cookie_desired != cvp->cookie_pair_size) {
641bd0388aeSWilliam Allen Simpson 				/* Currently use random bytes as a nonce,
642bd0388aeSWilliam Allen Simpson 				 * assuming these are completely unpredictable
643bd0388aeSWilliam Allen Simpson 				 * by hostile users of the same system.
644bd0388aeSWilliam Allen Simpson 				 */
645bd0388aeSWilliam Allen Simpson 				get_random_bytes(&cvp->cookie_pair[0],
646bd0388aeSWilliam Allen Simpson 						 cookie_size);
647bd0388aeSWilliam Allen Simpson 				cvp->cookie_pair_size = cookie_size;
648bd0388aeSWilliam Allen Simpson 			}
649bd0388aeSWilliam Allen Simpson 
650bd0388aeSWilliam Allen Simpson 			remaining -= need;
651bd0388aeSWilliam Allen Simpson 		}
652bd0388aeSWilliam Allen Simpson 	}
653bd0388aeSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
65433ad798cSAdam Langley }
65533ad798cSAdam Langley 
65667edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */
65733ad798cSAdam Langley static unsigned tcp_synack_options(struct sock *sk,
65833ad798cSAdam Langley 				   struct request_sock *req,
65933ad798cSAdam Langley 				   unsigned mss, struct sk_buff *skb,
66033ad798cSAdam Langley 				   struct tcp_out_options *opts,
6614957faadSWilliam Allen Simpson 				   struct tcp_md5sig_key **md5,
6624957faadSWilliam Allen Simpson 				   struct tcp_extend_values *xvp)
6634957faadSWilliam Allen Simpson {
66433ad798cSAdam Langley 	struct inet_request_sock *ireq = inet_rsk(req);
6654957faadSWilliam Allen Simpson 	unsigned remaining = MAX_TCP_OPTION_SPACE;
6664957faadSWilliam Allen Simpson 	u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
6674957faadSWilliam Allen Simpson 			 xvp->cookie_plus :
6684957faadSWilliam Allen Simpson 			 0;
6694957faadSWilliam Allen Simpson 	bool doing_ts = ireq->tstamp_ok;
67033ad798cSAdam Langley 
67133ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
67233ad798cSAdam Langley 	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
67333ad798cSAdam Langley 	if (*md5) {
67433ad798cSAdam Langley 		opts->options |= OPTION_MD5;
6754957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_MD5SIG_ALIGNED;
6764957faadSWilliam Allen Simpson 
6774957faadSWilliam Allen Simpson 		/* We can't fit any SACK blocks in a packet with MD5 + TS
6784957faadSWilliam Allen Simpson 		 * options. There was discussion about disabling SACK
6794957faadSWilliam Allen Simpson 		 * rather than TS in order to fit in better with old,
6804957faadSWilliam Allen Simpson 		 * buggy kernels, but that was deemed to be unnecessary.
6814957faadSWilliam Allen Simpson 		 */
6824957faadSWilliam Allen Simpson 		doing_ts &= !ireq->sack_ok;
68333ad798cSAdam Langley 	}
68433ad798cSAdam Langley #else
68533ad798cSAdam Langley 	*md5 = NULL;
68633ad798cSAdam Langley #endif
68733ad798cSAdam Langley 
6884957faadSWilliam Allen Simpson 	/* We always send an MSS option. */
68933ad798cSAdam Langley 	opts->mss = mss;
6904957faadSWilliam Allen Simpson 	remaining -= TCPOLEN_MSS_ALIGNED;
69133ad798cSAdam Langley 
69233ad798cSAdam Langley 	if (likely(ireq->wscale_ok)) {
69333ad798cSAdam Langley 		opts->ws = ireq->rcv_wscale;
69489e95a61SOri Finkelman 		opts->options |= OPTION_WSCALE;
6954957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_WSCALE_ALIGNED;
69633ad798cSAdam Langley 	}
69733ad798cSAdam Langley 	if (likely(doing_ts)) {
69833ad798cSAdam Langley 		opts->options |= OPTION_TS;
69933ad798cSAdam Langley 		opts->tsval = TCP_SKB_CB(skb)->when;
70033ad798cSAdam Langley 		opts->tsecr = req->ts_recent;
7014957faadSWilliam Allen Simpson 		remaining -= TCPOLEN_TSTAMP_ALIGNED;
70233ad798cSAdam Langley 	}
70333ad798cSAdam Langley 	if (likely(ireq->sack_ok)) {
70433ad798cSAdam Langley 		opts->options |= OPTION_SACK_ADVERTISE;
70533ad798cSAdam Langley 		if (unlikely(!doing_ts))
7064957faadSWilliam Allen Simpson 			remaining -= TCPOLEN_SACKPERM_ALIGNED;
70733ad798cSAdam Langley 	}
70833ad798cSAdam Langley 
7094957faadSWilliam Allen Simpson 	/* Similar rationale to tcp_syn_options() applies here, too.
7104957faadSWilliam Allen Simpson 	 * If the <SYN> options fit, the same options should fit now!
7114957faadSWilliam Allen Simpson 	 */
7124957faadSWilliam Allen Simpson 	if (*md5 == NULL &&
7134957faadSWilliam Allen Simpson 	    doing_ts &&
7144957faadSWilliam Allen Simpson 	    cookie_plus > TCPOLEN_COOKIE_BASE) {
7154957faadSWilliam Allen Simpson 		int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
7164957faadSWilliam Allen Simpson 
7174957faadSWilliam Allen Simpson 		if (0x2 & need) {
7184957faadSWilliam Allen Simpson 			/* 32-bit multiple */
7194957faadSWilliam Allen Simpson 			need += 2; /* NOPs */
7204957faadSWilliam Allen Simpson 		}
7214957faadSWilliam Allen Simpson 		if (need <= remaining) {
7224957faadSWilliam Allen Simpson 			opts->options |= OPTION_COOKIE_EXTENSION;
7234957faadSWilliam Allen Simpson 			opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
7244957faadSWilliam Allen Simpson 			remaining -= need;
7254957faadSWilliam Allen Simpson 		} else {
7264957faadSWilliam Allen Simpson 			/* There's no error return, so flag it. */
7274957faadSWilliam Allen Simpson 			xvp->cookie_out_never = 1; /* true */
7284957faadSWilliam Allen Simpson 			opts->hash_size = 0;
7294957faadSWilliam Allen Simpson 		}
7304957faadSWilliam Allen Simpson 	}
7314957faadSWilliam Allen Simpson 	return MAX_TCP_OPTION_SPACE - remaining;
73233ad798cSAdam Langley }
73333ad798cSAdam Langley 
73467edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the
73567edfef7SAndi Kleen  * final wire format yet.
73667edfef7SAndi Kleen  */
73733ad798cSAdam Langley static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
73833ad798cSAdam Langley 					struct tcp_out_options *opts,
73933ad798cSAdam Langley 					struct tcp_md5sig_key **md5) {
74033ad798cSAdam Langley 	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
74133ad798cSAdam Langley 	struct tcp_sock *tp = tcp_sk(sk);
74233ad798cSAdam Langley 	unsigned size = 0;
743cabeccbdSIlpo Järvinen 	unsigned int eff_sacks;
74433ad798cSAdam Langley 
74533ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG
74633ad798cSAdam Langley 	*md5 = tp->af_specific->md5_lookup(sk, sk);
74733ad798cSAdam Langley 	if (unlikely(*md5)) {
74833ad798cSAdam Langley 		opts->options |= OPTION_MD5;
74933ad798cSAdam Langley 		size += TCPOLEN_MD5SIG_ALIGNED;
75033ad798cSAdam Langley 	}
75133ad798cSAdam Langley #else
75233ad798cSAdam Langley 	*md5 = NULL;
75333ad798cSAdam Langley #endif
75433ad798cSAdam Langley 
75533ad798cSAdam Langley 	if (likely(tp->rx_opt.tstamp_ok)) {
75633ad798cSAdam Langley 		opts->options |= OPTION_TS;
75733ad798cSAdam Langley 		opts->tsval = tcb ? tcb->when : 0;
75833ad798cSAdam Langley 		opts->tsecr = tp->rx_opt.ts_recent;
75933ad798cSAdam Langley 		size += TCPOLEN_TSTAMP_ALIGNED;
76033ad798cSAdam Langley 	}
76133ad798cSAdam Langley 
762cabeccbdSIlpo Järvinen 	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
763cabeccbdSIlpo Järvinen 	if (unlikely(eff_sacks)) {
76433ad798cSAdam Langley 		const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
76533ad798cSAdam Langley 		opts->num_sack_blocks =
766cabeccbdSIlpo Järvinen 			min_t(unsigned, eff_sacks,
76733ad798cSAdam Langley 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
76833ad798cSAdam Langley 			      TCPOLEN_SACK_PERBLOCK);
76933ad798cSAdam Langley 		size += TCPOLEN_SACK_BASE_ALIGNED +
77033ad798cSAdam Langley 			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
77133ad798cSAdam Langley 	}
77233ad798cSAdam Langley 
77333ad798cSAdam Langley 	return size;
77440efc6faSStephen Hemminger }
7751da177e4SLinus Torvalds 
7761da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
7771da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
7781da177e4SLinus Torvalds  * transmission and possible later retransmissions.
7791da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
7801da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
7811da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
7821da177e4SLinus Torvalds  * device.
7831da177e4SLinus Torvalds  *
7841da177e4SLinus Torvalds  * We are working here with either a clone of the original
7851da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
7861da177e4SLinus Torvalds  */
787056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
788056834d9SIlpo Järvinen 			    gfp_t gfp_mask)
7891da177e4SLinus Torvalds {
7906687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
791dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
792dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
793dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
79433ad798cSAdam Langley 	struct tcp_out_options opts;
79533ad798cSAdam Langley 	unsigned tcp_options_size, tcp_header_size;
796cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
7971da177e4SLinus Torvalds 	struct tcphdr *th;
7981da177e4SLinus Torvalds 	int err;
7991da177e4SLinus Torvalds 
800dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
801dfb4b9dcSDavid S. Miller 
802dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
803dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
804dfb4b9dcSDavid S. Miller 	 */
805164891aaSStephen Hemminger 	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
806dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
807dfb4b9dcSDavid S. Miller 
808dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
809dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
810dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
811dfb4b9dcSDavid S. Miller 		else
812dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
813dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
814dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
815dfb4b9dcSDavid S. Miller 	}
816dfb4b9dcSDavid S. Miller 
817dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
818dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
819dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
82033ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
8211da177e4SLinus Torvalds 
82233ad798cSAdam Langley 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
82333ad798cSAdam Langley 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
82433ad798cSAdam Langley 	else
82533ad798cSAdam Langley 		tcp_options_size = tcp_established_options(sk, skb, &opts,
82633ad798cSAdam Langley 							   &md5);
82733ad798cSAdam Langley 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
8281da177e4SLinus Torvalds 
829317a76f9SStephen Hemminger 	if (tcp_packets_in_flight(tp) == 0)
8306687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
8311da177e4SLinus Torvalds 
832aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
833aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
834e89862f4SDavid S. Miller 	skb_set_owner_w(skb, sk);
8351da177e4SLinus Torvalds 
8361da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
837aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
838c720c7e8SEric Dumazet 	th->source		= inet->inet_sport;
839c720c7e8SEric Dumazet 	th->dest		= inet->inet_dport;
8401da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
8411da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
842df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
843dfb4b9dcSDavid S. Miller 					tcb->flags);
844dfb4b9dcSDavid S. Miller 
845dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
8461da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
8471da177e4SLinus Torvalds 		 * is never scaled.
8481da177e4SLinus Torvalds 		 */
849600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
8501da177e4SLinus Torvalds 	} else {
8511da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
8521da177e4SLinus Torvalds 	}
8531da177e4SLinus Torvalds 	th->check		= 0;
8541da177e4SLinus Torvalds 	th->urg_ptr		= 0;
8551da177e4SLinus Torvalds 
85633f5f57eSIlpo Järvinen 	/* The urg_mode check is necessary during a below snd_una win probe */
8577691367dSHerbert Xu 	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
8587691367dSHerbert Xu 		if (before(tp->snd_up, tcb->seq + 0x10000)) {
8591da177e4SLinus Torvalds 			th->urg_ptr = htons(tp->snd_up - tcb->seq);
8601da177e4SLinus Torvalds 			th->urg = 1;
8617691367dSHerbert Xu 		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
8627691367dSHerbert Xu 			th->urg_ptr = 0xFFFF;
8637691367dSHerbert Xu 			th->urg = 1;
8647691367dSHerbert Xu 		}
8651da177e4SLinus Torvalds 	}
8661da177e4SLinus Torvalds 
867bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
86833ad798cSAdam Langley 	if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
8699e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
870dfb4b9dcSDavid S. Miller 
871cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
872cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
873cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
87433ad798cSAdam Langley 		sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
875bd0388aeSWilliam Allen Simpson 		tp->af_specific->calc_md5_hash(opts.hash_location,
87649a72dfbSAdam Langley 					       md5, sk, NULL, skb);
877cfb6eeb4SYOSHIFUJI Hideaki 	}
878cfb6eeb4SYOSHIFUJI Hideaki #endif
879cfb6eeb4SYOSHIFUJI Hideaki 
8808292a17aSArnaldo Carvalho de Melo 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
8811da177e4SLinus Torvalds 
882dfb4b9dcSDavid S. Miller 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
883fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
8841da177e4SLinus Torvalds 
8851da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
8861da177e4SLinus Torvalds 		tcp_event_data_sent(tp, skb, sk);
8871da177e4SLinus Torvalds 
888bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
88981cc8a75SPavel Emelyanov 		TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
8901da177e4SLinus Torvalds 
891e89862f4SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
89283de47cdSHua Zhong 	if (likely(err <= 0))
8931da177e4SLinus Torvalds 		return err;
8941da177e4SLinus Torvalds 
8953cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
8961da177e4SLinus Torvalds 
897b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
8981da177e4SLinus Torvalds }
8991da177e4SLinus Torvalds 
90067edfef7SAndi Kleen /* This routine just queues the buffer for sending.
9011da177e4SLinus Torvalds  *
9021da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
9031da177e4SLinus Torvalds  * otherwise socket can stall.
9041da177e4SLinus Torvalds  */
9051da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
9061da177e4SLinus Torvalds {
9071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9081da177e4SLinus Torvalds 
9091da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
9101da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
9111da177e4SLinus Torvalds 	skb_header_release(skb);
912fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
9133ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
9143ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
9151da177e4SLinus Torvalds }
9161da177e4SLinus Torvalds 
91767edfef7SAndi Kleen /* Initialize TSO segments for a packet. */
918056834d9SIlpo Järvinen static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
919056834d9SIlpo Järvinen 				 unsigned int mss_now)
920f6302d1dSDavid S. Miller {
9218e5b9ddaSHerbert Xu 	if (skb->len <= mss_now || !sk_can_gso(sk) ||
9228e5b9ddaSHerbert Xu 	    skb->ip_summed == CHECKSUM_NONE) {
923f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
924f6302d1dSDavid S. Miller 		 * non-TSO case.
925f6302d1dSDavid S. Miller 		 */
9267967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
9277967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
9287967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
929f6302d1dSDavid S. Miller 	} else {
930356f89e1SIlpo Järvinen 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
9317967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = mss_now;
932bcd76111SHerbert Xu 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
9331da177e4SLinus Torvalds 	}
9341da177e4SLinus Torvalds }
9351da177e4SLinus Torvalds 
93691fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
93768f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
93891fed7a1SIlpo Järvinen  */
939a47e5a98SIlpo Järvinen static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
94091fed7a1SIlpo Järvinen 				   int decr)
94191fed7a1SIlpo Järvinen {
942a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
943a47e5a98SIlpo Järvinen 
944dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
94591fed7a1SIlpo Järvinen 		return;
94691fed7a1SIlpo Järvinen 
9476859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
94891fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
94991fed7a1SIlpo Järvinen }
95091fed7a1SIlpo Järvinen 
951797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various
952797108d1SIlpo Järvinen  * tweaks to fix counters
953797108d1SIlpo Järvinen  */
954797108d1SIlpo Järvinen static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
955797108d1SIlpo Järvinen {
956797108d1SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
957797108d1SIlpo Järvinen 
958797108d1SIlpo Järvinen 	tp->packets_out -= decr;
959797108d1SIlpo Järvinen 
960797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
961797108d1SIlpo Järvinen 		tp->sacked_out -= decr;
962797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
963797108d1SIlpo Järvinen 		tp->retrans_out -= decr;
964797108d1SIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
965797108d1SIlpo Järvinen 		tp->lost_out -= decr;
966797108d1SIlpo Järvinen 
967797108d1SIlpo Järvinen 	/* Reno case is special. Sigh... */
968797108d1SIlpo Järvinen 	if (tcp_is_reno(tp) && decr > 0)
969797108d1SIlpo Järvinen 		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
970797108d1SIlpo Järvinen 
971797108d1SIlpo Järvinen 	tcp_adjust_fackets_out(sk, skb, decr);
972797108d1SIlpo Järvinen 
973797108d1SIlpo Järvinen 	if (tp->lost_skb_hint &&
974797108d1SIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
97552cf3cc8SIlpo Järvinen 	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
976797108d1SIlpo Järvinen 		tp->lost_cnt_hint -= decr;
977797108d1SIlpo Järvinen 
978797108d1SIlpo Järvinen 	tcp_verify_left_out(tp);
979797108d1SIlpo Järvinen }
980797108d1SIlpo Järvinen 
9811da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
9821da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
9831da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
9841da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
9851da177e4SLinus Torvalds  */
986056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
987056834d9SIlpo Järvinen 		 unsigned int mss_now)
9881da177e4SLinus Torvalds {
9891da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9901da177e4SLinus Torvalds 	struct sk_buff *buff;
9916475be16SDavid S. Miller 	int nsize, old_factor;
992b60b49eaSHerbert Xu 	int nlen;
9939ce01461SIlpo Järvinen 	u8 flags;
9941da177e4SLinus Torvalds 
995b2cc99f0SHerbert Xu 	BUG_ON(len > skb->len);
9966a438bbeSStephen Hemminger 
9971da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
9981da177e4SLinus Torvalds 	if (nsize < 0)
9991da177e4SLinus Torvalds 		nsize = 0;
10001da177e4SLinus Torvalds 
10011da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
10021da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
10031da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
10041da177e4SLinus Torvalds 		return -ENOMEM;
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
10071da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
10081da177e4SLinus Torvalds 	if (buff == NULL)
10091da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
1010ef5cb973SHerbert Xu 
10113ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
10123ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1013b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
1014b60b49eaSHerbert Xu 	buff->truesize += nlen;
1015b60b49eaSHerbert Xu 	skb->truesize -= nlen;
10161da177e4SLinus Torvalds 
10171da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
10181da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
10191da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
10201da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
10211da177e4SLinus Torvalds 
10221da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
10231da177e4SLinus Torvalds 	flags = TCP_SKB_CB(skb)->flags;
10241da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
10251da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = flags;
1026e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
10271da177e4SLinus Torvalds 
102884fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
10291da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
1030056834d9SIlpo Järvinen 		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1031056834d9SIlpo Järvinen 						       skb_put(buff, nsize),
10321da177e4SLinus Torvalds 						       nsize, 0);
10331da177e4SLinus Torvalds 
10341da177e4SLinus Torvalds 		skb_trim(skb, len);
10351da177e4SLinus Torvalds 
10361da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
10371da177e4SLinus Torvalds 	} else {
103884fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
10391da177e4SLinus Torvalds 		skb_split(skb, buff, len);
10401da177e4SLinus Torvalds 	}
10411da177e4SLinus Torvalds 
10421da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
10431da177e4SLinus Torvalds 
10441da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
10451da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
10461da177e4SLinus Torvalds 	 */
10471da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1048a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
10491da177e4SLinus Torvalds 
10506475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
10516475be16SDavid S. Miller 
10521da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
1053846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1054846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
10551da177e4SLinus Torvalds 
10566475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
10576475be16SDavid S. Miller 	 * adjust the various packet counters.
10586475be16SDavid S. Miller 	 */
1059cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
10606475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
10616475be16SDavid S. Miller 			tcp_skb_pcount(buff);
10621da177e4SLinus Torvalds 
1063797108d1SIlpo Järvinen 		if (diff)
1064797108d1SIlpo Järvinen 			tcp_adjust_pcount(sk, skb, diff);
10651da177e4SLinus Torvalds 	}
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
1068f44b5271SDavid S. Miller 	skb_header_release(buff);
1069fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 	return 0;
10721da177e4SLinus Torvalds }
10731da177e4SLinus Torvalds 
10741da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
10751da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
10761da177e4SLinus Torvalds  * immediately discarded.
10771da177e4SLinus Torvalds  */
1078f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
10791da177e4SLinus Torvalds {
10801da177e4SLinus Torvalds 	int i, k, eat;
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 	eat = len;
10831da177e4SLinus Torvalds 	k = 0;
10841da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10851da177e4SLinus Torvalds 		if (skb_shinfo(skb)->frags[i].size <= eat) {
10861da177e4SLinus Torvalds 			put_page(skb_shinfo(skb)->frags[i].page);
10871da177e4SLinus Torvalds 			eat -= skb_shinfo(skb)->frags[i].size;
10881da177e4SLinus Torvalds 		} else {
10891da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
10901da177e4SLinus Torvalds 			if (eat) {
10911da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
10921da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].size -= eat;
10931da177e4SLinus Torvalds 				eat = 0;
10941da177e4SLinus Torvalds 			}
10951da177e4SLinus Torvalds 			k++;
10961da177e4SLinus Torvalds 		}
10971da177e4SLinus Torvalds 	}
10981da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
10991da177e4SLinus Torvalds 
110027a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
11011da177e4SLinus Torvalds 	skb->data_len -= len;
11021da177e4SLinus Torvalds 	skb->len = skb->data_len;
11031da177e4SLinus Torvalds }
11041da177e4SLinus Torvalds 
110567edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */
11061da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
11071da177e4SLinus Torvalds {
1108056834d9SIlpo Järvinen 	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
11091da177e4SLinus Torvalds 		return -ENOMEM;
11101da177e4SLinus Torvalds 
1111f2911969SHerbert Xu ~{PmVHI~} 	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
1112f2911969SHerbert Xu ~{PmVHI~} 	if (unlikely(len < skb_headlen(skb)))
11131da177e4SLinus Torvalds 		__skb_pull(skb, len);
1114f2911969SHerbert Xu ~{PmVHI~} 	else
1115f2911969SHerbert Xu ~{PmVHI~} 		__pskb_trim_head(skb, len - skb_headlen(skb));
11161da177e4SLinus Torvalds 
11171da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
111884fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
11191da177e4SLinus Torvalds 
11201da177e4SLinus Torvalds 	skb->truesize	     -= len;
11211da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
11223ab224beSHideo Aoki 	sk_mem_uncharge(sk, len);
11231da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
11241da177e4SLinus Torvalds 
11251da177e4SLinus Torvalds 	/* Any change of skb->len requires recalculation of tso
11261da177e4SLinus Torvalds 	 * factor and mss.
11271da177e4SLinus Torvalds 	 */
11281da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
11290c54b85fSIlpo Järvinen 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
11301da177e4SLinus Torvalds 
11311da177e4SLinus Torvalds 	return 0;
11321da177e4SLinus Torvalds }
11331da177e4SLinus Torvalds 
113467edfef7SAndi Kleen /* Calculate MSS. Not accounting for SACKs here.  */
11355d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu)
11365d424d5aSJohn Heffner {
11375d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
11385d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
11395d424d5aSJohn Heffner 	int mss_now;
11405d424d5aSJohn Heffner 
11415d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
11425d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
11435d424d5aSJohn Heffner 	 */
11445d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
11455d424d5aSJohn Heffner 
11465d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
11475d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
11485d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
11495d424d5aSJohn Heffner 
11505d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
11515d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
11525d424d5aSJohn Heffner 
11535d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
11545d424d5aSJohn Heffner 	if (mss_now < 48)
11555d424d5aSJohn Heffner 		mss_now = 48;
11565d424d5aSJohn Heffner 
11575d424d5aSJohn Heffner 	/* Now subtract TCP options size, not including SACKs */
11585d424d5aSJohn Heffner 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
11595d424d5aSJohn Heffner 
11605d424d5aSJohn Heffner 	return mss_now;
11615d424d5aSJohn Heffner }
11625d424d5aSJohn Heffner 
11635d424d5aSJohn Heffner /* Inverse of above */
11645d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss)
11655d424d5aSJohn Heffner {
11665d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
11675d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
11685d424d5aSJohn Heffner 	int mtu;
11695d424d5aSJohn Heffner 
11705d424d5aSJohn Heffner 	mtu = mss +
11715d424d5aSJohn Heffner 	      tp->tcp_header_len +
11725d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
11735d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
11745d424d5aSJohn Heffner 
11755d424d5aSJohn Heffner 	return mtu;
11765d424d5aSJohn Heffner }
11775d424d5aSJohn Heffner 
117867edfef7SAndi Kleen /* MTU probing init per socket */
11795d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
11805d424d5aSJohn Heffner {
11815d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
11825d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
11835d424d5aSJohn Heffner 
11845d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
11855d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
11865d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
11875d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
11885d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
11895d424d5aSJohn Heffner }
11905d424d5aSJohn Heffner 
11911da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
11941da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
11951da177e4SLinus Torvalds 
11961da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1197caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
11981da177e4SLinus Torvalds    It also does not include TCP options.
11991da177e4SLinus Torvalds 
1200d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
12011da177e4SLinus Torvalds 
12021da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
12031da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
12041da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
12051da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
12061da177e4SLinus Torvalds 
12071da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
12081da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
12091da177e4SLinus Torvalds 
1210d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1211d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
12121da177e4SLinus Torvalds  */
12131da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
12141da177e4SLinus Torvalds {
12151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1216d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
12175d424d5aSJohn Heffner 	int mss_now;
12181da177e4SLinus Torvalds 
12195d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
12205d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
12211da177e4SLinus Torvalds 
12225d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
1223409d22b4SIlpo Järvinen 	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
12241da177e4SLinus Torvalds 
12251da177e4SLinus Torvalds 	/* And store cached results */
1226d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
12275d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
12285d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1229c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
12301da177e4SLinus Torvalds 
12311da177e4SLinus Torvalds 	return mss_now;
12321da177e4SLinus Torvalds }
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
12351da177e4SLinus Torvalds  * and even PMTU discovery events into account.
12361da177e4SLinus Torvalds  */
12370c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk)
12381da177e4SLinus Torvalds {
12391da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12401da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
1241c1b4a7e6SDavid S. Miller 	u32 mss_now;
124233ad798cSAdam Langley 	unsigned header_len;
124333ad798cSAdam Langley 	struct tcp_out_options opts;
124433ad798cSAdam Langley 	struct tcp_md5sig_key *md5;
12451da177e4SLinus Torvalds 
1246c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
1247c1b4a7e6SDavid S. Miller 
12481da177e4SLinus Torvalds 	if (dst) {
12491da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
1250d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
12511da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
12521da177e4SLinus Torvalds 	}
12531da177e4SLinus Torvalds 
125433ad798cSAdam Langley 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
125533ad798cSAdam Langley 		     sizeof(struct tcphdr);
125633ad798cSAdam Langley 	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
125733ad798cSAdam Langley 	 * some common options. If this is an odd packet (because we have SACK
125833ad798cSAdam Langley 	 * blocks etc) then our calculated header_len will be different, and
125933ad798cSAdam Langley 	 * we have to adjust mss_now correspondingly */
126033ad798cSAdam Langley 	if (header_len != tp->tcp_header_len) {
126133ad798cSAdam Langley 		int delta = (int) header_len - tp->tcp_header_len;
126233ad798cSAdam Langley 		mss_now -= delta;
126333ad798cSAdam Langley 	}
1264cfb6eeb4SYOSHIFUJI Hideaki 
12651da177e4SLinus Torvalds 	return mss_now;
12661da177e4SLinus Torvalds }
12671da177e4SLinus Torvalds 
1268a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
12699e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk)
1270a762a980SDavid S. Miller {
12719e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1272a762a980SDavid S. Miller 
1273d436d686SIlpo Järvinen 	if (tp->packets_out >= tp->snd_cwnd) {
1274a762a980SDavid S. Miller 		/* Network is feed fully. */
1275a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1276a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1277a762a980SDavid S. Miller 	} else {
1278a762a980SDavid S. Miller 		/* Network starves. */
1279a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1280a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1281a762a980SDavid S. Miller 
128215d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
128315d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1284a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1285a762a980SDavid S. Miller 	}
1286a762a980SDavid S. Miller }
1287a762a980SDavid S. Miller 
12880e3a4803SIlpo Järvinen /* Returns the portion of skb which can be sent right away without
12890e3a4803SIlpo Järvinen  * introducing MSS oddities to segment boundaries. In rare cases where
12900e3a4803SIlpo Järvinen  * mss_now != mss_cache, we will request caller to create a small skb
12910e3a4803SIlpo Järvinen  * per input skb which could be mostly avoided here (if desired).
12925ea3a748SIlpo Järvinen  *
12935ea3a748SIlpo Järvinen  * We explicitly want to create a request for splitting write queue tail
12945ea3a748SIlpo Järvinen  * to a small skb for Nagle purposes while avoiding unnecessary modulos,
12955ea3a748SIlpo Järvinen  * thus all the complexity (cwnd_len is always MSS multiple which we
12965ea3a748SIlpo Järvinen  * return whenever allowed by the other factors). Basically we need the
12975ea3a748SIlpo Järvinen  * modulo only when the receiver window alone is the limiting factor or
12985ea3a748SIlpo Järvinen  * when we would be allowed to send the split-due-to-Nagle skb fully.
12990e3a4803SIlpo Järvinen  */
13000e3a4803SIlpo Järvinen static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1301056834d9SIlpo Järvinen 					unsigned int mss_now, unsigned int cwnd)
1302c1b4a7e6SDavid S. Miller {
13030e3a4803SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
13040e3a4803SIlpo Järvinen 	u32 needed, window, cwnd_len;
1305c1b4a7e6SDavid S. Miller 
130690840defSIlpo Järvinen 	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1307c1b4a7e6SDavid S. Miller 	cwnd_len = mss_now * cwnd;
13080e3a4803SIlpo Järvinen 
13090e3a4803SIlpo Järvinen 	if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
13100e3a4803SIlpo Järvinen 		return cwnd_len;
13110e3a4803SIlpo Järvinen 
13125ea3a748SIlpo Järvinen 	needed = min(skb->len, window);
13135ea3a748SIlpo Järvinen 
131417515408SIlpo Järvinen 	if (cwnd_len <= needed)
13150e3a4803SIlpo Järvinen 		return cwnd_len;
13160e3a4803SIlpo Järvinen 
13170e3a4803SIlpo Järvinen 	return needed - needed % mss_now;
1318c1b4a7e6SDavid S. Miller }
1319c1b4a7e6SDavid S. Miller 
1320c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1321c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1322c1b4a7e6SDavid S. Miller  */
1323056834d9SIlpo Järvinen static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1324056834d9SIlpo Järvinen 					 struct sk_buff *skb)
1325c1b4a7e6SDavid S. Miller {
1326c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1327c1b4a7e6SDavid S. Miller 
1328c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
1329104439a8SJohn Heffner 	if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1330104439a8SJohn Heffner 	    tcp_skb_pcount(skb) == 1)
1331c1b4a7e6SDavid S. Miller 		return 1;
1332c1b4a7e6SDavid S. Miller 
1333c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1334c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1335c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1336c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1337c1b4a7e6SDavid S. Miller 
1338c1b4a7e6SDavid S. Miller 	return 0;
1339c1b4a7e6SDavid S. Miller }
1340c1b4a7e6SDavid S. Miller 
134167edfef7SAndi Kleen /* Intialize TSO state of a skb.
134267edfef7SAndi Kleen  * This must be invoked the first time we consider transmitting
1343c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1344c1b4a7e6SDavid S. Miller  */
1345056834d9SIlpo Järvinen static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1346056834d9SIlpo Järvinen 			     unsigned int mss_now)
1347c1b4a7e6SDavid S. Miller {
1348c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1349c1b4a7e6SDavid S. Miller 
1350f8269a49SIlpo Järvinen 	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1351846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1352c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1353c1b4a7e6SDavid S. Miller 	}
1354c1b4a7e6SDavid S. Miller 	return tso_segs;
1355c1b4a7e6SDavid S. Miller }
1356c1b4a7e6SDavid S. Miller 
135767edfef7SAndi Kleen /* Minshall's variant of the Nagle send check. */
1358c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp)
1359c1b4a7e6SDavid S. Miller {
1360c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml, tp->snd_una) &&
1361c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
1362c1b4a7e6SDavid S. Miller }
1363c1b4a7e6SDavid S. Miller 
1364c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules:
1365c1b4a7e6SDavid S. Miller  * 1. It is full sized.
1366c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
1367c1b4a7e6SDavid S. Miller  * 3. Or TCP_NODELAY was set.
1368c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1369c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
1370c1b4a7e6SDavid S. Miller  */
1371c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp,
1372c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
1373c1b4a7e6SDavid S. Miller 				  unsigned mss_now, int nonagle)
1374c1b4a7e6SDavid S. Miller {
1375c1b4a7e6SDavid S. Miller 	return (skb->len < mss_now &&
1376c1b4a7e6SDavid S. Miller 		((nonagle & TCP_NAGLE_CORK) ||
1377056834d9SIlpo Järvinen 		 (!nonagle && tp->packets_out && tcp_minshall_check(tp))));
1378c1b4a7e6SDavid S. Miller }
1379c1b4a7e6SDavid S. Miller 
1380c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be
1381c1b4a7e6SDavid S. Miller  * sent now.
1382c1b4a7e6SDavid S. Miller  */
1383c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1384c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1385c1b4a7e6SDavid S. Miller {
1386c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1387c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1388c1b4a7e6SDavid S. Miller 	 *
1389c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1390c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1391c1b4a7e6SDavid S. Miller 	 */
1392c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1393c1b4a7e6SDavid S. Miller 		return 1;
1394c1b4a7e6SDavid S. Miller 
1395d551e454SIlpo Järvinen 	/* Don't use the nagle rule for urgent data (or for the final FIN).
1396d551e454SIlpo Järvinen 	 * Nagle can be ignored during F-RTO too (see RFC4138).
1397d551e454SIlpo Järvinen 	 */
139833f5f57eSIlpo Järvinen 	if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1399c1b4a7e6SDavid S. Miller 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1400c1b4a7e6SDavid S. Miller 		return 1;
1401c1b4a7e6SDavid S. Miller 
1402c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1403c1b4a7e6SDavid S. Miller 		return 1;
1404c1b4a7e6SDavid S. Miller 
1405c1b4a7e6SDavid S. Miller 	return 0;
1406c1b4a7e6SDavid S. Miller }
1407c1b4a7e6SDavid S. Miller 
1408c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1409056834d9SIlpo Järvinen static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1410056834d9SIlpo Järvinen 				   unsigned int cur_mss)
1411c1b4a7e6SDavid S. Miller {
1412c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1413c1b4a7e6SDavid S. Miller 
1414c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1415c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1416c1b4a7e6SDavid S. Miller 
141790840defSIlpo Järvinen 	return !after(end_seq, tcp_wnd_end(tp));
1418c1b4a7e6SDavid S. Miller }
1419c1b4a7e6SDavid S. Miller 
1420fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1421c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1422c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1423c1b4a7e6SDavid S. Miller  */
1424c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1425c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1426c1b4a7e6SDavid S. Miller {
1427c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1428c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1429c1b4a7e6SDavid S. Miller 
1430846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1431c1b4a7e6SDavid S. Miller 
1432c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1433c1b4a7e6SDavid S. Miller 		return 0;
1434c1b4a7e6SDavid S. Miller 
1435c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1436056834d9SIlpo Järvinen 	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1437c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1438c1b4a7e6SDavid S. Miller 
1439c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1440c1b4a7e6SDavid S. Miller }
1441c1b4a7e6SDavid S. Miller 
144267edfef7SAndi Kleen /* Test if sending is allowed right now. */
14439e412ba7SIlpo Järvinen int tcp_may_send_now(struct sock *sk)
1444c1b4a7e6SDavid S. Miller {
14459e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1446fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1447c1b4a7e6SDavid S. Miller 
1448c1b4a7e6SDavid S. Miller 	return (skb &&
14490c54b85fSIlpo Järvinen 		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1450c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
14514e67d876SIlpo Järvinen 			      tp->nonagle : TCP_NAGLE_PUSH)));
1452c1b4a7e6SDavid S. Miller }
1453c1b4a7e6SDavid S. Miller 
1454c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1455c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1456c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1457c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1458c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1459c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1460c1b4a7e6SDavid S. Miller  */
1461056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1462056834d9SIlpo Järvinen 			unsigned int mss_now)
1463c1b4a7e6SDavid S. Miller {
1464c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1465c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
14669ce01461SIlpo Järvinen 	u8 flags;
1467c1b4a7e6SDavid S. Miller 
1468c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1469c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1470c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1471c1b4a7e6SDavid S. Miller 
1472df97c708SPavel Emelyanov 	buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
1473c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1474c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1475c1b4a7e6SDavid S. Miller 
14763ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
14773ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
1478b60b49eaSHerbert Xu 	buff->truesize += nlen;
1479c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1480c1b4a7e6SDavid S. Miller 
1481c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1482c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1483c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1484c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1485c1b4a7e6SDavid S. Miller 
1486c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
1487c1b4a7e6SDavid S. Miller 	flags = TCP_SKB_CB(skb)->flags;
1488c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
1489c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->flags = flags;
1490c1b4a7e6SDavid S. Miller 
1491c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1492c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1493c1b4a7e6SDavid S. Miller 
149484fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1495c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1496c1b4a7e6SDavid S. Miller 
1497c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1498846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1499846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1500c1b4a7e6SDavid S. Miller 
1501c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1502c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1503fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1504c1b4a7e6SDavid S. Miller 
1505c1b4a7e6SDavid S. Miller 	return 0;
1506c1b4a7e6SDavid S. Miller }
1507c1b4a7e6SDavid S. Miller 
1508c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1509c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1510c1b4a7e6SDavid S. Miller  *
1511c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1512c1b4a7e6SDavid S. Miller  */
15139e412ba7SIlpo Järvinen static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1514c1b4a7e6SDavid S. Miller {
15159e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
15166687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1517c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1518c1b4a7e6SDavid S. Miller 
1519c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1520ae8064acSJohn Heffner 		goto send_now;
1521c1b4a7e6SDavid S. Miller 
15226687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1523ae8064acSJohn Heffner 		goto send_now;
1524ae8064acSJohn Heffner 
1525ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1526bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1527a2acde07SIlpo Järvinen 	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1528ae8064acSJohn Heffner 		goto send_now;
1529908a75c1SDavid S. Miller 
1530c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1531c1b4a7e6SDavid S. Miller 
1532056834d9SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1533c1b4a7e6SDavid S. Miller 
153490840defSIlpo Järvinen 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1535c1b4a7e6SDavid S. Miller 
1536c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1537c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1538c1b4a7e6SDavid S. Miller 
1539c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1540c1b4a7e6SDavid S. Miller 
1541ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
154282cc1a7aSPeter P Waskiewicz Jr 	if (limit >= sk->sk_gso_max_size)
1543ae8064acSJohn Heffner 		goto send_now;
1544ba244fe9SDavid S. Miller 
154562ad2761SIlpo Järvinen 	/* Middle in queue won't get any more data, full sendable already? */
154662ad2761SIlpo Järvinen 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
154762ad2761SIlpo Järvinen 		goto send_now;
154862ad2761SIlpo Järvinen 
1549c1b4a7e6SDavid S. Miller 	if (sysctl_tcp_tso_win_divisor) {
1550c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1551c1b4a7e6SDavid S. Miller 
1552c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1553c1b4a7e6SDavid S. Miller 		 * just use it.
1554c1b4a7e6SDavid S. Miller 		 */
1555c1b4a7e6SDavid S. Miller 		chunk /= sysctl_tcp_tso_win_divisor;
1556c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1557ae8064acSJohn Heffner 			goto send_now;
1558c1b4a7e6SDavid S. Miller 	} else {
1559c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1560c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1561c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1562c1b4a7e6SDavid S. Miller 		 * then send now.
1563c1b4a7e6SDavid S. Miller 		 */
1564c1b4a7e6SDavid S. Miller 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1565ae8064acSJohn Heffner 			goto send_now;
1566c1b4a7e6SDavid S. Miller 	}
1567c1b4a7e6SDavid S. Miller 
1568c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
1569ae8064acSJohn Heffner 	tp->tso_deferred = 1 | (jiffies << 1);
1570ae8064acSJohn Heffner 
1571c1b4a7e6SDavid S. Miller 	return 1;
1572ae8064acSJohn Heffner 
1573ae8064acSJohn Heffner send_now:
1574ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1575ae8064acSJohn Heffner 	return 0;
1576c1b4a7e6SDavid S. Miller }
1577c1b4a7e6SDavid S. Miller 
15785d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
157967edfef7SAndi Kleen  * MTU probe is regularly attempting to increase the path MTU by
158067edfef7SAndi Kleen  * deliberately sending larger packets.  This discovers routing
158167edfef7SAndi Kleen  * changes resulting in larger path MTUs.
158267edfef7SAndi Kleen  *
15835d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
15845d424d5aSJohn Heffner  *         1 if a probe was sent,
1585056834d9SIlpo Järvinen  *         -1 otherwise
1586056834d9SIlpo Järvinen  */
15875d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
15885d424d5aSJohn Heffner {
15895d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
15905d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
15915d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
15925d424d5aSJohn Heffner 	int len;
15935d424d5aSJohn Heffner 	int probe_size;
159491cc17c0SIlpo Järvinen 	int size_needed;
15955d424d5aSJohn Heffner 	int copy;
15965d424d5aSJohn Heffner 	int mss_now;
15975d424d5aSJohn Heffner 
15985d424d5aSJohn Heffner 	/* Not currently probing/verifying,
15995d424d5aSJohn Heffner 	 * not in recovery,
16005d424d5aSJohn Heffner 	 * have enough cwnd, and
16015d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
16025d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
16035d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
16045d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
16055d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
1606cabeccbdSIlpo Järvinen 	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
16075d424d5aSJohn Heffner 		return -1;
16085d424d5aSJohn Heffner 
16095d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
16100c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
16115d424d5aSJohn Heffner 	probe_size = 2 * tp->mss_cache;
161291cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
16135d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
16145d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
16155d424d5aSJohn Heffner 		return -1;
16165d424d5aSJohn Heffner 	}
16175d424d5aSJohn Heffner 
16185d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
16197f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
16205d424d5aSJohn Heffner 		return -1;
16215d424d5aSJohn Heffner 
162291cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
16235d424d5aSJohn Heffner 		return -1;
162490840defSIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
16255d424d5aSJohn Heffner 		return 0;
16265d424d5aSJohn Heffner 
1627d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1628d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1629d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
16305d424d5aSJohn Heffner 			return -1;
16315d424d5aSJohn Heffner 		else
16325d424d5aSJohn Heffner 			return 0;
16335d424d5aSJohn Heffner 	}
16345d424d5aSJohn Heffner 
16355d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
16365d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
16375d424d5aSJohn Heffner 		return -1;
16383ab224beSHideo Aoki 	sk->sk_wmem_queued += nskb->truesize;
16393ab224beSHideo Aoki 	sk_mem_charge(sk, nskb->truesize);
16405d424d5aSJohn Heffner 
1641fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
16425d424d5aSJohn Heffner 
16435d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
16445d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
16455d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
16465d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
16475d424d5aSJohn Heffner 	nskb->csum = 0;
164884fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
16495d424d5aSJohn Heffner 
165050c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
165150c4817eSIlpo Järvinen 
16525d424d5aSJohn Heffner 	len = 0;
1653234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
16545d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
16555d424d5aSJohn Heffner 		if (nskb->ip_summed)
16565d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
16575d424d5aSJohn Heffner 		else
16585d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1659056834d9SIlpo Järvinen 							    skb_put(nskb, copy),
1660056834d9SIlpo Järvinen 							    copy, nskb->csum);
16615d424d5aSJohn Heffner 
16625d424d5aSJohn Heffner 		if (skb->len <= copy) {
16635d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
16645d424d5aSJohn Heffner 			 * Throw it away. */
16655d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1666fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
16673ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
16685d424d5aSJohn Heffner 		} else {
16695d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
16705d424d5aSJohn Heffner 						   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
16715d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
16725d424d5aSJohn Heffner 				skb_pull(skb, copy);
167384fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
1674056834d9SIlpo Järvinen 					skb->csum = csum_partial(skb->data,
1675056834d9SIlpo Järvinen 								 skb->len, 0);
16765d424d5aSJohn Heffner 			} else {
16775d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
16785d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
16795d424d5aSJohn Heffner 			}
16805d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
16815d424d5aSJohn Heffner 		}
16825d424d5aSJohn Heffner 
16835d424d5aSJohn Heffner 		len += copy;
1684234b6860SIlpo Järvinen 
1685234b6860SIlpo Järvinen 		if (len >= probe_size)
1686234b6860SIlpo Järvinen 			break;
16875d424d5aSJohn Heffner 	}
16885d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
16895d424d5aSJohn Heffner 
16905d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
16915d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
16925d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
16935d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
16945d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
16955d424d5aSJohn Heffner 		 * effectively two packets. */
16965d424d5aSJohn Heffner 		tp->snd_cwnd--;
169766f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, nskb);
16985d424d5aSJohn Heffner 
16995d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
17000e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
17010e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
17025d424d5aSJohn Heffner 
17035d424d5aSJohn Heffner 		return 1;
17045d424d5aSJohn Heffner 	}
17055d424d5aSJohn Heffner 
17065d424d5aSJohn Heffner 	return -1;
17075d424d5aSJohn Heffner }
17085d424d5aSJohn Heffner 
17091da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
17101da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
17111da177e4SLinus Torvalds  * window for us.
17121da177e4SLinus Torvalds  *
1713f8269a49SIlpo Järvinen  * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1714f8269a49SIlpo Järvinen  * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1715f8269a49SIlpo Järvinen  * account rare use of URG, this is not a big flaw.
1716f8269a49SIlpo Järvinen  *
17171da177e4SLinus Torvalds  * Returns 1, if no segments are in flight and we have queued segments, but
17181da177e4SLinus Torvalds  * cannot send anything now because of SWS or another problem.
17191da177e4SLinus Torvalds  */
1720d5dd9175SIlpo Järvinen static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1721d5dd9175SIlpo Järvinen 			  int push_one, gfp_t gfp)
17221da177e4SLinus Torvalds {
17231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
172492df7b51SDavid S. Miller 	struct sk_buff *skb;
1725c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1726c1b4a7e6SDavid S. Miller 	int cwnd_quota;
17275d424d5aSJohn Heffner 	int result;
17281da177e4SLinus Torvalds 
1729c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
17305d424d5aSJohn Heffner 
1731d5dd9175SIlpo Järvinen 	if (!push_one) {
17325d424d5aSJohn Heffner 		/* Do MTU probing. */
1733d5dd9175SIlpo Järvinen 		result = tcp_mtu_probe(sk);
1734d5dd9175SIlpo Järvinen 		if (!result) {
17355d424d5aSJohn Heffner 			return 0;
17365d424d5aSJohn Heffner 		} else if (result > 0) {
17375d424d5aSJohn Heffner 			sent_pkts = 1;
17385d424d5aSJohn Heffner 		}
1739d5dd9175SIlpo Järvinen 	}
17405d424d5aSJohn Heffner 
1741fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1742c8ac3774SHerbert Xu 		unsigned int limit;
1743c8ac3774SHerbert Xu 
1744b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1745c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1746c1b4a7e6SDavid S. Miller 
1747b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
1748b68e9f85SHerbert Xu 		if (!cwnd_quota)
1749b68e9f85SHerbert Xu 			break;
1750b68e9f85SHerbert Xu 
1751b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1752b68e9f85SHerbert Xu 			break;
1753b68e9f85SHerbert Xu 
1754c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1755aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1756aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1757aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1758aa93466bSDavid S. Miller 				break;
1759c1b4a7e6SDavid S. Miller 		} else {
1760d5dd9175SIlpo Järvinen 			if (!push_one && tcp_tso_should_defer(sk, skb))
1761aa93466bSDavid S. Miller 				break;
1762c1b4a7e6SDavid S. Miller 		}
1763aa93466bSDavid S. Miller 
1764c8ac3774SHerbert Xu 		limit = mss_now;
1765f8269a49SIlpo Järvinen 		if (tso_segs > 1 && !tcp_urg_mode(tp))
17660e3a4803SIlpo Järvinen 			limit = tcp_mss_split_point(sk, skb, mss_now,
17670e3a4803SIlpo Järvinen 						    cwnd_quota);
1768c8ac3774SHerbert Xu 
1769c8ac3774SHerbert Xu 		if (skb->len > limit &&
1770c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
17711da177e4SLinus Torvalds 			break;
17721da177e4SLinus Torvalds 
17731da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1774c1b4a7e6SDavid S. Miller 
1775d5dd9175SIlpo Järvinen 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
17761da177e4SLinus Torvalds 			break;
17771da177e4SLinus Torvalds 
17781da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
17791da177e4SLinus Torvalds 		 * This call will increment packets_out.
17801da177e4SLinus Torvalds 		 */
178166f5fe62SIlpo Järvinen 		tcp_event_new_data_sent(sk, skb);
17821da177e4SLinus Torvalds 
17831da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1784aa93466bSDavid S. Miller 		sent_pkts++;
1785d5dd9175SIlpo Järvinen 
1786d5dd9175SIlpo Järvinen 		if (push_one)
1787d5dd9175SIlpo Järvinen 			break;
17881da177e4SLinus Torvalds 	}
17891da177e4SLinus Torvalds 
1790aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
17919e412ba7SIlpo Järvinen 		tcp_cwnd_validate(sk);
17921da177e4SLinus Torvalds 		return 0;
17931da177e4SLinus Torvalds 	}
1794fe067e8aSDavid S. Miller 	return !tp->packets_out && tcp_send_head(sk);
17951da177e4SLinus Torvalds }
17961da177e4SLinus Torvalds 
1797a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
1798a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
1799a762a980SDavid S. Miller  * The socket must be locked by the caller.
1800a762a980SDavid S. Miller  */
18019e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
18029e412ba7SIlpo Järvinen 			       int nonagle)
1803a762a980SDavid S. Miller {
1804726e07a8SIlpo Järvinen 	/* If we are closed, the bytes will have to remain here.
1805726e07a8SIlpo Järvinen 	 * In time closedown will finish, we empty the write queue and
1806726e07a8SIlpo Järvinen 	 * all will be happy.
1807726e07a8SIlpo Järvinen 	 */
1808726e07a8SIlpo Järvinen 	if (unlikely(sk->sk_state == TCP_CLOSE))
1809726e07a8SIlpo Järvinen 		return;
1810726e07a8SIlpo Järvinen 
1811d5dd9175SIlpo Järvinen 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
18129e412ba7SIlpo Järvinen 		tcp_check_probe_timer(sk);
1813a762a980SDavid S. Miller }
1814a762a980SDavid S. Miller 
1815c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
1816c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
1817c1b4a7e6SDavid S. Miller  */
1818c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
1819c1b4a7e6SDavid S. Miller {
1820fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1821c1b4a7e6SDavid S. Miller 
1822c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
1823c1b4a7e6SDavid S. Miller 
1824d5dd9175SIlpo Järvinen 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
1825c1b4a7e6SDavid S. Miller }
1826c1b4a7e6SDavid S. Miller 
18271da177e4SLinus Torvalds /* This function returns the amount that we can raise the
18281da177e4SLinus Torvalds  * usable window based on the following constraints
18291da177e4SLinus Torvalds  *
18301da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
18311da177e4SLinus Torvalds  * 2. We limit memory per socket
18321da177e4SLinus Torvalds  *
18331da177e4SLinus Torvalds  * RFC 1122:
18341da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
18351da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
18361da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
18371da177e4SLinus Torvalds  *
18381da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
18391da177e4SLinus Torvalds  * it at least MSS bytes.
18401da177e4SLinus Torvalds  *
18411da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
18421da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
18431da177e4SLinus Torvalds  *
18441da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
18451da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
18461da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
18471da177e4SLinus Torvalds  * window to always advance by a single byte.
18481da177e4SLinus Torvalds  *
18491da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
18501da177e4SLinus Torvalds  * then this will not be a problem.
18511da177e4SLinus Torvalds  *
18521da177e4SLinus Torvalds  * BSD seems to make the following compromise:
18531da177e4SLinus Torvalds  *
18541da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
18551da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
18561da177e4SLinus Torvalds  *	then set the window to 0.
18571da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
18581da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
18591da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
18601da177e4SLinus Torvalds  *
18611da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
18621da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
18631da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
18641da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
18651da177e4SLinus Torvalds  * because the pipeline is full.
18661da177e4SLinus Torvalds  *
18671da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
18681da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
18691da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
18701da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
18711da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
18721da177e4SLinus Torvalds  *
18731da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
18741da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
18751da177e4SLinus Torvalds  *
18761da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
18771da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
18781da177e4SLinus Torvalds  */
18791da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
18801da177e4SLinus Torvalds {
1881463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
18821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1883caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
18841da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
18851da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
18861da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
18871da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
18881da177e4SLinus Torvalds 	 */
1889463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
18901da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
18911da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
18921da177e4SLinus Torvalds 	int window;
18931da177e4SLinus Torvalds 
18941da177e4SLinus Torvalds 	if (mss > full_space)
18951da177e4SLinus Torvalds 		mss = full_space;
18961da177e4SLinus Torvalds 
1897b92edbe0SEric Dumazet 	if (free_space < (full_space >> 1)) {
1898463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
18991da177e4SLinus Torvalds 
19001da177e4SLinus Torvalds 		if (tcp_memory_pressure)
1901056834d9SIlpo Järvinen 			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1902056834d9SIlpo Järvinen 					       4U * tp->advmss);
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds 		if (free_space < mss)
19051da177e4SLinus Torvalds 			return 0;
19061da177e4SLinus Torvalds 	}
19071da177e4SLinus Torvalds 
19081da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
19091da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
19101da177e4SLinus Torvalds 
19111da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
19121da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
19131da177e4SLinus Torvalds 	 */
19141da177e4SLinus Torvalds 	window = tp->rcv_wnd;
19151da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
19161da177e4SLinus Torvalds 		window = free_space;
19171da177e4SLinus Torvalds 
19181da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
19191da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
19201da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
19211da177e4SLinus Torvalds 		 */
19221da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
19231da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
19241da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
19251da177e4SLinus Torvalds 	} else {
19261da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
19271da177e4SLinus Torvalds 		 * Window clamp already applied above.
19281da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
19291da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
19301da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
19311da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
19321da177e4SLinus Torvalds 		 * is too small.
19331da177e4SLinus Torvalds 		 */
19341da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
19351da177e4SLinus Torvalds 			window = (free_space / mss) * mss;
193684565070SJohn Heffner 		else if (mss == full_space &&
1937b92edbe0SEric Dumazet 			 free_space > window + (full_space >> 1))
193884565070SJohn Heffner 			window = free_space;
19391da177e4SLinus Torvalds 	}
19401da177e4SLinus Torvalds 
19411da177e4SLinus Torvalds 	return window;
19421da177e4SLinus Torvalds }
19431da177e4SLinus Torvalds 
19444a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */
19454a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
19461da177e4SLinus Torvalds {
19471da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1948fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1949058dc334SIlpo Järvinen 	int skb_size, next_skb_size;
19501da177e4SLinus Torvalds 
1951058dc334SIlpo Järvinen 	skb_size = skb->len;
1952058dc334SIlpo Järvinen 	next_skb_size = next_skb->len;
19531da177e4SLinus Torvalds 
1954058dc334SIlpo Järvinen 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
19551da177e4SLinus Torvalds 
19566859d494SIlpo Järvinen 	tcp_highest_sack_combine(sk, next_skb, skb);
1957a6963a6bSIlpo Järvinen 
1958fe067e8aSDavid S. Miller 	tcp_unlink_write_queue(next_skb, sk);
19591da177e4SLinus Torvalds 
1960058dc334SIlpo Järvinen 	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
19611a4e2d09SArnaldo Carvalho de Melo 				  next_skb_size);
19621da177e4SLinus Torvalds 
196352d570aaSJarek Poplawski 	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
196452d570aaSJarek Poplawski 		skb->ip_summed = CHECKSUM_PARTIAL;
19651da177e4SLinus Torvalds 
196684fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
19671da177e4SLinus Torvalds 		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
19681da177e4SLinus Torvalds 
19691da177e4SLinus Torvalds 	/* Update sequence range on original skb. */
19701da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
19711da177e4SLinus Torvalds 
1972e6c7d085SIlpo Järvinen 	/* Merge over control information. This moves PSH/FIN etc. over */
1973e6c7d085SIlpo Järvinen 	TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
19741da177e4SLinus Torvalds 
19751da177e4SLinus Torvalds 	/* All done, get rid of second SKB and account for it so
19761da177e4SLinus Torvalds 	 * packet counting does not break.
19771da177e4SLinus Torvalds 	 */
19784828e7f4SIlpo Järvinen 	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
1979b7689205SIlpo Järvinen 
1980b7689205SIlpo Järvinen 	/* changed transmit queue under us so clear hints */
1981ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
1982ef9da47cSIlpo Järvinen 	if (next_skb == tp->retransmit_skb_hint)
1983ef9da47cSIlpo Järvinen 		tp->retransmit_skb_hint = skb;
1984b7689205SIlpo Järvinen 
1985797108d1SIlpo Järvinen 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
1986797108d1SIlpo Järvinen 
19873ab224beSHideo Aoki 	sk_wmem_free_skb(sk, next_skb);
19881da177e4SLinus Torvalds }
19891da177e4SLinus Torvalds 
199067edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */
19914a17fc3aSIlpo Järvinen static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
19924a17fc3aSIlpo Järvinen {
19934a17fc3aSIlpo Järvinen 	if (tcp_skb_pcount(skb) > 1)
19944a17fc3aSIlpo Järvinen 		return 0;
19954a17fc3aSIlpo Järvinen 	/* TODO: SACK collapsing could be used to remove this condition */
19964a17fc3aSIlpo Järvinen 	if (skb_shinfo(skb)->nr_frags != 0)
19974a17fc3aSIlpo Järvinen 		return 0;
19984a17fc3aSIlpo Järvinen 	if (skb_cloned(skb))
19994a17fc3aSIlpo Järvinen 		return 0;
20004a17fc3aSIlpo Järvinen 	if (skb == tcp_send_head(sk))
20014a17fc3aSIlpo Järvinen 		return 0;
20024a17fc3aSIlpo Järvinen 	/* Some heurestics for collapsing over SACK'd could be invented */
20034a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
20044a17fc3aSIlpo Järvinen 		return 0;
20054a17fc3aSIlpo Järvinen 
20064a17fc3aSIlpo Järvinen 	return 1;
20074a17fc3aSIlpo Järvinen }
20084a17fc3aSIlpo Järvinen 
200967edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create
201067edfef7SAndi Kleen  * less packets on the wire. This is only done on retransmission.
201167edfef7SAndi Kleen  */
20124a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
20134a17fc3aSIlpo Järvinen 				     int space)
20144a17fc3aSIlpo Järvinen {
20154a17fc3aSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
20164a17fc3aSIlpo Järvinen 	struct sk_buff *skb = to, *tmp;
20174a17fc3aSIlpo Järvinen 	int first = 1;
20184a17fc3aSIlpo Järvinen 
20194a17fc3aSIlpo Järvinen 	if (!sysctl_tcp_retrans_collapse)
20204a17fc3aSIlpo Järvinen 		return;
20214a17fc3aSIlpo Järvinen 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
20224a17fc3aSIlpo Järvinen 		return;
20234a17fc3aSIlpo Järvinen 
20244a17fc3aSIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, tmp, sk) {
20254a17fc3aSIlpo Järvinen 		if (!tcp_can_collapse(sk, skb))
20264a17fc3aSIlpo Järvinen 			break;
20274a17fc3aSIlpo Järvinen 
20284a17fc3aSIlpo Järvinen 		space -= skb->len;
20294a17fc3aSIlpo Järvinen 
20304a17fc3aSIlpo Järvinen 		if (first) {
20314a17fc3aSIlpo Järvinen 			first = 0;
20324a17fc3aSIlpo Järvinen 			continue;
20334a17fc3aSIlpo Järvinen 		}
20344a17fc3aSIlpo Järvinen 
20354a17fc3aSIlpo Järvinen 		if (space < 0)
20364a17fc3aSIlpo Järvinen 			break;
20374a17fc3aSIlpo Järvinen 		/* Punt if not enough space exists in the first SKB for
20384a17fc3aSIlpo Järvinen 		 * the data in the second
20394a17fc3aSIlpo Järvinen 		 */
20404a17fc3aSIlpo Järvinen 		if (skb->len > skb_tailroom(to))
20414a17fc3aSIlpo Järvinen 			break;
20424a17fc3aSIlpo Järvinen 
20434a17fc3aSIlpo Järvinen 		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
20444a17fc3aSIlpo Järvinen 			break;
20454a17fc3aSIlpo Järvinen 
20464a17fc3aSIlpo Järvinen 		tcp_collapse_retrans(sk, to);
20474a17fc3aSIlpo Järvinen 	}
20484a17fc3aSIlpo Järvinen }
20494a17fc3aSIlpo Järvinen 
20501da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
20511da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
20521da177e4SLinus Torvalds  * error occurred which prevented the send.
20531da177e4SLinus Torvalds  */
20541da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
20551da177e4SLinus Torvalds {
20561da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20575d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
20587d227cd2SSridhar Samudrala 	unsigned int cur_mss;
20591da177e4SLinus Torvalds 	int err;
20601da177e4SLinus Torvalds 
20615d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
20625d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
20635d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
20645d424d5aSJohn Heffner 	}
20655d424d5aSJohn Heffner 
20661da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
2067caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
20681da177e4SLinus Torvalds 	 */
20691da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
20701da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
20711da177e4SLinus Torvalds 		return -EAGAIN;
20721da177e4SLinus Torvalds 
20731da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
20741da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
20751da177e4SLinus Torvalds 			BUG();
20761da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
20771da177e4SLinus Torvalds 			return -ENOMEM;
20781da177e4SLinus Torvalds 	}
20791da177e4SLinus Torvalds 
20807d227cd2SSridhar Samudrala 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
20817d227cd2SSridhar Samudrala 		return -EHOSTUNREACH; /* Routing failure or similar. */
20827d227cd2SSridhar Samudrala 
20830c54b85fSIlpo Järvinen 	cur_mss = tcp_current_mss(sk);
20847d227cd2SSridhar Samudrala 
20851da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
20861da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
20871da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
20881da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
20891da177e4SLinus Torvalds 	 */
20909d4fb27dSJoe Perches 	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
20919d4fb27dSJoe Perches 	    TCP_SKB_CB(skb)->seq != tp->snd_una)
20921da177e4SLinus Torvalds 		return -EAGAIN;
20931da177e4SLinus Torvalds 
20941da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
2095846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
20961da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
209702276f3cSIlpo Järvinen 	} else {
20989eb9362eSIlpo Järvinen 		int oldpcount = tcp_skb_pcount(skb);
20999eb9362eSIlpo Järvinen 
21009eb9362eSIlpo Järvinen 		if (unlikely(oldpcount > 1)) {
210102276f3cSIlpo Järvinen 			tcp_init_tso_segs(sk, skb, cur_mss);
21029eb9362eSIlpo Järvinen 			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
21039eb9362eSIlpo Järvinen 		}
21041da177e4SLinus Torvalds 	}
21051da177e4SLinus Torvalds 
21061da177e4SLinus Torvalds 	tcp_retrans_try_collapse(sk, skb, cur_mss);
21071da177e4SLinus Torvalds 
21081da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
21091da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
21101da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
21111da177e4SLinus Torvalds 	 */
21121da177e4SLinus Torvalds 	if (skb->len > 0 &&
21131da177e4SLinus Torvalds 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
21141da177e4SLinus Torvalds 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
21151da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
2116e870a8efSIlpo Järvinen 			/* Reuse, even though it does some unnecessary work */
2117e870a8efSIlpo Järvinen 			tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2118e870a8efSIlpo Järvinen 					     TCP_SKB_CB(skb)->flags);
21191da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
21201da177e4SLinus Torvalds 		}
21211da177e4SLinus Torvalds 	}
21221da177e4SLinus Torvalds 
21231da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
21241da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
21251da177e4SLinus Torvalds 	 */
21261da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
21271da177e4SLinus Torvalds 
2128dfb4b9dcSDavid S. Miller 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
21291da177e4SLinus Torvalds 
21301da177e4SLinus Torvalds 	if (err == 0) {
21311da177e4SLinus Torvalds 		/* Update global TCP statistics. */
213281cc8a75SPavel Emelyanov 		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
21331da177e4SLinus Torvalds 
21341da177e4SLinus Torvalds 		tp->total_retrans++;
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
21371da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
21381da177e4SLinus Torvalds 			if (net_ratelimit())
21391da177e4SLinus Torvalds 				printk(KERN_DEBUG "retrans_out leaked.\n");
21401da177e4SLinus Torvalds 		}
21411da177e4SLinus Torvalds #endif
2142b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
2143b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
21441da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
21451da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
21461da177e4SLinus Torvalds 
21471da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
21481da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
21491da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
21501da177e4SLinus Torvalds 
21511da177e4SLinus Torvalds 		tp->undo_retrans++;
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
21541da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
21551da177e4SLinus Torvalds 		 */
21561da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
21571da177e4SLinus Torvalds 	}
21581da177e4SLinus Torvalds 	return err;
21591da177e4SLinus Torvalds }
21601da177e4SLinus Torvalds 
216167edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current
216267edfef7SAndi Kleen  * window/congestion state.
216367edfef7SAndi Kleen  */
2164b5afe7bcSIlpo Järvinen static int tcp_can_forward_retransmit(struct sock *sk)
2165b5afe7bcSIlpo Järvinen {
2166b5afe7bcSIlpo Järvinen 	const struct inet_connection_sock *icsk = inet_csk(sk);
2167b5afe7bcSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
2168b5afe7bcSIlpo Järvinen 
2169b5afe7bcSIlpo Järvinen 	/* Forward retransmissions are possible only during Recovery. */
2170b5afe7bcSIlpo Järvinen 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2171b5afe7bcSIlpo Järvinen 		return 0;
2172b5afe7bcSIlpo Järvinen 
2173b5afe7bcSIlpo Järvinen 	/* No forward retransmissions in Reno are possible. */
2174b5afe7bcSIlpo Järvinen 	if (tcp_is_reno(tp))
2175b5afe7bcSIlpo Järvinen 		return 0;
2176b5afe7bcSIlpo Järvinen 
2177b5afe7bcSIlpo Järvinen 	/* Yeah, we have to make difficult choice between forward transmission
2178b5afe7bcSIlpo Järvinen 	 * and retransmission... Both ways have their merits...
2179b5afe7bcSIlpo Järvinen 	 *
2180b5afe7bcSIlpo Järvinen 	 * For now we do not retransmit anything, while we have some new
2181b5afe7bcSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
2182b5afe7bcSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
2183b5afe7bcSIlpo Järvinen 	 */
2184b5afe7bcSIlpo Järvinen 
2185b5afe7bcSIlpo Järvinen 	if (tcp_may_send_now(sk))
2186b5afe7bcSIlpo Järvinen 		return 0;
2187b5afe7bcSIlpo Järvinen 
2188b5afe7bcSIlpo Järvinen 	return 1;
2189b5afe7bcSIlpo Järvinen }
2190b5afe7bcSIlpo Järvinen 
21911da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
21921da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
21931da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
21941da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
21951da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
21961da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
21971da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
21981da177e4SLinus Torvalds  */
21991da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
22001da177e4SLinus Torvalds {
22016687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
22021da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22031da177e4SLinus Torvalds 	struct sk_buff *skb;
22040e1c54c2SIlpo Järvinen 	struct sk_buff *hole = NULL;
2205618d9f25SIlpo Järvinen 	u32 last_lost;
220661eb55f4SIlpo Järvinen 	int mib_idx;
22070e1c54c2SIlpo Järvinen 	int fwd_rexmitting = 0;
22086a438bbeSStephen Hemminger 
220908ebd172SIlpo Järvinen 	if (!tp->lost_out)
221008ebd172SIlpo Järvinen 		tp->retransmit_high = tp->snd_una;
221108ebd172SIlpo Järvinen 
2212618d9f25SIlpo Järvinen 	if (tp->retransmit_skb_hint) {
22136a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
2214618d9f25SIlpo Järvinen 		last_lost = TCP_SKB_CB(skb)->end_seq;
2215618d9f25SIlpo Järvinen 		if (after(last_lost, tp->retransmit_high))
2216618d9f25SIlpo Järvinen 			last_lost = tp->retransmit_high;
2217618d9f25SIlpo Järvinen 	} else {
2218fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
2219618d9f25SIlpo Järvinen 		last_lost = tp->snd_una;
2220618d9f25SIlpo Järvinen 	}
22211da177e4SLinus Torvalds 
2222fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
22231da177e4SLinus Torvalds 		__u8 sacked = TCP_SKB_CB(skb)->sacked;
22241da177e4SLinus Torvalds 
2225fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2226fe067e8aSDavid S. Miller 			break;
22276a438bbeSStephen Hemminger 		/* we could do better than to assign each time */
22280e1c54c2SIlpo Järvinen 		if (hole == NULL)
22296a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
22306a438bbeSStephen Hemminger 
22311da177e4SLinus Torvalds 		/* Assume this retransmit will generate
22321da177e4SLinus Torvalds 		 * only one packet for congestion window
22331da177e4SLinus Torvalds 		 * calculation purposes.  This works because
22341da177e4SLinus Torvalds 		 * tcp_retransmit_skb() will chop up the
22351da177e4SLinus Torvalds 		 * packet to be MSS sized and all the
22361da177e4SLinus Torvalds 		 * packet counting works out.
22371da177e4SLinus Torvalds 		 */
22381da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
22391da177e4SLinus Torvalds 			return;
22400e1c54c2SIlpo Järvinen 
22410e1c54c2SIlpo Järvinen 		if (fwd_rexmitting) {
22420e1c54c2SIlpo Järvinen begin_fwd:
22430e1c54c2SIlpo Järvinen 			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2244006f582cSIlpo Järvinen 				break;
22450e1c54c2SIlpo Järvinen 			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
22460e1c54c2SIlpo Järvinen 
22470e1c54c2SIlpo Järvinen 		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2248618d9f25SIlpo Järvinen 			tp->retransmit_high = last_lost;
22490e1c54c2SIlpo Järvinen 			if (!tcp_can_forward_retransmit(sk))
22500e1c54c2SIlpo Järvinen 				break;
22510e1c54c2SIlpo Järvinen 			/* Backtrack if necessary to non-L'ed skb */
22520e1c54c2SIlpo Järvinen 			if (hole != NULL) {
22530e1c54c2SIlpo Järvinen 				skb = hole;
22540e1c54c2SIlpo Järvinen 				hole = NULL;
22550e1c54c2SIlpo Järvinen 			}
22560e1c54c2SIlpo Järvinen 			fwd_rexmitting = 1;
22570e1c54c2SIlpo Järvinen 			goto begin_fwd;
22580e1c54c2SIlpo Järvinen 
22590e1c54c2SIlpo Järvinen 		} else if (!(sacked & TCPCB_LOST)) {
2260ac11ba75SIlpo Järvinen 			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
22610e1c54c2SIlpo Järvinen 				hole = skb;
226261eb55f4SIlpo Järvinen 			continue;
22631da177e4SLinus Torvalds 
22640e1c54c2SIlpo Järvinen 		} else {
2265618d9f25SIlpo Järvinen 			last_lost = TCP_SKB_CB(skb)->end_seq;
22660e1c54c2SIlpo Järvinen 			if (icsk->icsk_ca_state != TCP_CA_Loss)
22670e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPFASTRETRANS;
22680e1c54c2SIlpo Järvinen 			else
22690e1c54c2SIlpo Järvinen 				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
22700e1c54c2SIlpo Järvinen 		}
22710e1c54c2SIlpo Järvinen 
22720e1c54c2SIlpo Järvinen 		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
227361eb55f4SIlpo Järvinen 			continue;
227440b215e5SPavel Emelyanov 
2275f0ceb0edSIlpo Järvinen 		if (tcp_retransmit_skb(sk, skb))
22761da177e4SLinus Torvalds 			return;
2277de0744afSPavel Emelyanov 		NET_INC_STATS_BH(sock_net(sk), mib_idx);
22781da177e4SLinus Torvalds 
2279fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
2280463c84b9SArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
22813f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
22823f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
22831da177e4SLinus Torvalds 	}
22841da177e4SLinus Torvalds }
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
22871da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
22881da177e4SLinus Torvalds  */
22891da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
22901da177e4SLinus Torvalds {
22911da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2292fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
22931da177e4SLinus Torvalds 	int mss_now;
22941da177e4SLinus Torvalds 
22951da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
22961da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
22971da177e4SLinus Torvalds 	 * and IP options.
22981da177e4SLinus Torvalds 	 */
22990c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
23001da177e4SLinus Torvalds 
2301fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
23021da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
23031da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
23041da177e4SLinus Torvalds 		tp->write_seq++;
23051da177e4SLinus Torvalds 	} else {
23061da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
23071da177e4SLinus Torvalds 		for (;;) {
2308aa133076SWu Fengguang 			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2309aa133076SWu Fengguang 					       sk->sk_allocation);
23101da177e4SLinus Torvalds 			if (skb)
23111da177e4SLinus Torvalds 				break;
23121da177e4SLinus Torvalds 			yield();
23131da177e4SLinus Torvalds 		}
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
23161da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
23171da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2318e870a8efSIlpo Järvinen 		tcp_init_nondata_skb(skb, tp->write_seq,
2319e870a8efSIlpo Järvinen 				     TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
23201da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
23211da177e4SLinus Torvalds 	}
23229e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
23231da177e4SLinus Torvalds }
23241da177e4SLinus Torvalds 
23251da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
23261da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
23271da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
232865bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
23291da177e4SLinus Torvalds  */
2330dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
23311da177e4SLinus Torvalds {
23321da177e4SLinus Torvalds 	struct sk_buff *skb;
23331da177e4SLinus Torvalds 
23341da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
23351da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
23361da177e4SLinus Torvalds 	if (!skb) {
23374e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
23381da177e4SLinus Torvalds 		return;
23391da177e4SLinus Torvalds 	}
23401da177e4SLinus Torvalds 
23411da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
23421da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
2343e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2344e870a8efSIlpo Järvinen 			     TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
23451da177e4SLinus Torvalds 	/* Send it off. */
23461da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2347dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
23484e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
234926af65cbSSridhar Samudrala 
235081cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
23511da177e4SLinus Torvalds }
23521da177e4SLinus Torvalds 
235367edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment.
235467edfef7SAndi Kleen  * WARNING: This routine must only be called when we have already sent
23551da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
23561da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
23571da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
23581da177e4SLinus Torvalds  */
23591da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
23601da177e4SLinus Torvalds {
23611da177e4SLinus Torvalds 	struct sk_buff *skb;
23621da177e4SLinus Torvalds 
2363fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
23641da177e4SLinus Torvalds 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
23651da177e4SLinus Torvalds 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
23661da177e4SLinus Torvalds 		return -EFAULT;
23671da177e4SLinus Torvalds 	}
23681da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
23691da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
23701da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
23711da177e4SLinus Torvalds 			if (nskb == NULL)
23721da177e4SLinus Torvalds 				return -ENOMEM;
2373fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
23741da177e4SLinus Torvalds 			skb_header_release(nskb);
2375fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
23763ab224beSHideo Aoki 			sk_wmem_free_skb(sk, skb);
23773ab224beSHideo Aoki 			sk->sk_wmem_queued += nskb->truesize;
23783ab224beSHideo Aoki 			sk_mem_charge(sk, nskb->truesize);
23791da177e4SLinus Torvalds 			skb = nskb;
23801da177e4SLinus Torvalds 		}
23811da177e4SLinus Torvalds 
23821da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
23831da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
23841da177e4SLinus Torvalds 	}
23851da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2386dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
23871da177e4SLinus Torvalds }
23881da177e4SLinus Torvalds 
238967edfef7SAndi Kleen /* Prepare a SYN-ACK. */
23901da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2391e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
2392e6b4d113SWilliam Allen Simpson 				struct request_values *rvp)
23931da177e4SLinus Torvalds {
2394bd0388aeSWilliam Allen Simpson 	struct tcp_out_options opts;
23954957faadSWilliam Allen Simpson 	struct tcp_extend_values *xvp = tcp_xv(rvp);
23962e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
23971da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2398*28b2774aSEric Dumazet 	const struct tcp_cookie_values *cvp = tp->cookie_values;
23991da177e4SLinus Torvalds 	struct tcphdr *th;
24001da177e4SLinus Torvalds 	struct sk_buff *skb;
2401cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2402bd0388aeSWilliam Allen Simpson 	int tcp_header_size;
2403f5fff5dcSTom Quetchenbach 	int mss;
2404*28b2774aSEric Dumazet 	int s_data_desired = 0;
24051da177e4SLinus Torvalds 
2406*28b2774aSEric Dumazet 	if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2407*28b2774aSEric Dumazet 		s_data_desired = cvp->s_data_desired;
2408*28b2774aSEric Dumazet 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
24091da177e4SLinus Torvalds 	if (skb == NULL)
24101da177e4SLinus Torvalds 		return NULL;
24111da177e4SLinus Torvalds 
24121da177e4SLinus Torvalds 	/* Reserve space for headers. */
24131da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
24141da177e4SLinus Torvalds 
2415adf30907SEric Dumazet 	skb_dst_set(skb, dst_clone(dst));
24161da177e4SLinus Torvalds 
2417f5fff5dcSTom Quetchenbach 	mss = dst_metric(dst, RTAX_ADVMSS);
2418f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2419f5fff5dcSTom Quetchenbach 		mss = tp->rx_opt.user_mss;
2420f5fff5dcSTom Quetchenbach 
242133ad798cSAdam Langley 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
242233ad798cSAdam Langley 		__u8 rcv_wscale;
242333ad798cSAdam Langley 		/* Set this up on the first call only */
242433ad798cSAdam Langley 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
242533ad798cSAdam Langley 		/* tcp_full_space because it is guaranteed to be the first packet */
242633ad798cSAdam Langley 		tcp_select_initial_window(tcp_full_space(sk),
2427f5fff5dcSTom Quetchenbach 			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
242833ad798cSAdam Langley 			&req->rcv_wnd,
242933ad798cSAdam Langley 			&req->window_clamp,
243033ad798cSAdam Langley 			ireq->wscale_ok,
243131d12926Slaurent chavey 			&rcv_wscale,
243231d12926Slaurent chavey 			dst_metric(dst, RTAX_INITRWND));
243333ad798cSAdam Langley 		ireq->rcv_wscale = rcv_wscale;
243433ad798cSAdam Langley 	}
2435cfb6eeb4SYOSHIFUJI Hideaki 
243633ad798cSAdam Langley 	memset(&opts, 0, sizeof(opts));
24378b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES
24388b5f12d0SFlorian Westphal 	if (unlikely(req->cookie_ts))
24398b5f12d0SFlorian Westphal 		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
24408b5f12d0SFlorian Westphal 	else
24418b5f12d0SFlorian Westphal #endif
244233ad798cSAdam Langley 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2443f5fff5dcSTom Quetchenbach 	tcp_header_size = tcp_synack_options(sk, req, mss,
24444957faadSWilliam Allen Simpson 					     skb, &opts, &md5, xvp)
24454957faadSWilliam Allen Simpson 			+ sizeof(*th);
244633ad798cSAdam Langley 
2447aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2448aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
24491da177e4SLinus Torvalds 
2450aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
24511da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
24521da177e4SLinus Torvalds 	th->syn = 1;
24531da177e4SLinus Torvalds 	th->ack = 1;
24541da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
2455a3116ac5SKOVACS Krisztian 	th->source = ireq->loc_port;
24562e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
2457e870a8efSIlpo Järvinen 	/* Setting of flags are superfluous here for callers (and ECE is
2458e870a8efSIlpo Järvinen 	 * not even correctly set)
2459e870a8efSIlpo Järvinen 	 */
2460e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2461e870a8efSIlpo Järvinen 			     TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
24624957faadSWilliam Allen Simpson 
24634957faadSWilliam Allen Simpson 	if (OPTION_COOKIE_EXTENSION & opts.options) {
2464*28b2774aSEric Dumazet 		if (s_data_desired) {
2465*28b2774aSEric Dumazet 			u8 *buf = skb_put(skb, s_data_desired);
24664957faadSWilliam Allen Simpson 
24674957faadSWilliam Allen Simpson 			/* copy data directly from the listening socket. */
2468*28b2774aSEric Dumazet 			memcpy(buf, cvp->s_data_payload, s_data_desired);
2469*28b2774aSEric Dumazet 			TCP_SKB_CB(skb)->end_seq += s_data_desired;
24704957faadSWilliam Allen Simpson 		}
24714957faadSWilliam Allen Simpson 
24724957faadSWilliam Allen Simpson 		if (opts.hash_size > 0) {
24734957faadSWilliam Allen Simpson 			__u32 workspace[SHA_WORKSPACE_WORDS];
24744957faadSWilliam Allen Simpson 			u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
24754957faadSWilliam Allen Simpson 			u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
24764957faadSWilliam Allen Simpson 
24774957faadSWilliam Allen Simpson 			/* Secret recipe depends on the Timestamp, (future)
24784957faadSWilliam Allen Simpson 			 * Sequence and Acknowledgment Numbers, Initiator
24794957faadSWilliam Allen Simpson 			 * Cookie, and others handled by IP variant caller.
24804957faadSWilliam Allen Simpson 			 */
24814957faadSWilliam Allen Simpson 			*tail-- ^= opts.tsval;
24824957faadSWilliam Allen Simpson 			*tail-- ^= tcp_rsk(req)->rcv_isn + 1;
24834957faadSWilliam Allen Simpson 			*tail-- ^= TCP_SKB_CB(skb)->seq + 1;
24844957faadSWilliam Allen Simpson 
24854957faadSWilliam Allen Simpson 			/* recommended */
24864957faadSWilliam Allen Simpson 			*tail-- ^= ((th->dest << 16) | th->source);
2487f9a2e69eSDavid S. Miller 			*tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
24884957faadSWilliam Allen Simpson 
24894957faadSWilliam Allen Simpson 			sha_transform((__u32 *)&xvp->cookie_bakery[0],
24904957faadSWilliam Allen Simpson 				      (char *)mess,
24914957faadSWilliam Allen Simpson 				      &workspace[0]);
24924957faadSWilliam Allen Simpson 			opts.hash_location =
24934957faadSWilliam Allen Simpson 				(__u8 *)&xvp->cookie_bakery[0];
24944957faadSWilliam Allen Simpson 		}
24954957faadSWilliam Allen Simpson 	}
24964957faadSWilliam Allen Simpson 
24971da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
24982e6599cbSArnaldo Carvalho de Melo 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
24991da177e4SLinus Torvalds 
25001da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2501600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
2502bd0388aeSWilliam Allen Simpson 	tcp_options_write((__be32 *)(th + 1), tp, &opts);
25031da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
250481cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
2505cfb6eeb4SYOSHIFUJI Hideaki 
2506cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2507cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2508cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2509bd0388aeSWilliam Allen Simpson 		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
251049a72dfbSAdam Langley 					       md5, NULL, req, skb);
2511cfb6eeb4SYOSHIFUJI Hideaki 	}
2512cfb6eeb4SYOSHIFUJI Hideaki #endif
2513cfb6eeb4SYOSHIFUJI Hideaki 
25141da177e4SLinus Torvalds 	return skb;
25151da177e4SLinus Torvalds }
25161da177e4SLinus Torvalds 
251767edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */
251840efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk)
25191da177e4SLinus Torvalds {
25201da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
25211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
25221da177e4SLinus Torvalds 	__u8 rcv_wscale;
25231da177e4SLinus Torvalds 
25241da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
25251da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
25261da177e4SLinus Torvalds 	 */
25271da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
2528bb5b7c11SDavid S. Miller 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
25291da177e4SLinus Torvalds 
2530cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2531cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2532cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2533cfb6eeb4SYOSHIFUJI Hideaki #endif
2534cfb6eeb4SYOSHIFUJI Hideaki 
25351da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
25361da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
25371da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
25381da177e4SLinus Torvalds 	tp->max_window = 0;
25395d424d5aSJohn Heffner 	tcp_mtup_init(sk);
25401da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
25411da177e4SLinus Torvalds 
25421da177e4SLinus Torvalds 	if (!tp->window_clamp)
25431da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
25441da177e4SLinus Torvalds 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2545f5fff5dcSTom Quetchenbach 	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2546f5fff5dcSTom Quetchenbach 		tp->advmss = tp->rx_opt.user_mss;
2547f5fff5dcSTom Quetchenbach 
25481da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
25491da177e4SLinus Torvalds 
25501da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
25511da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
25521da177e4SLinus Torvalds 				  &tp->rcv_wnd,
25531da177e4SLinus Torvalds 				  &tp->window_clamp,
2554bb5b7c11SDavid S. Miller 				  sysctl_tcp_window_scaling,
255531d12926Slaurent chavey 				  &rcv_wscale,
255631d12926Slaurent chavey 				  dst_metric(dst, RTAX_INITRWND));
25571da177e4SLinus Torvalds 
25581da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
25591da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
25601da177e4SLinus Torvalds 
25611da177e4SLinus Torvalds 	sk->sk_err = 0;
25621da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
25631da177e4SLinus Torvalds 	tp->snd_wnd = 0;
2564ee7537b6SHantzis Fotis 	tcp_init_wl(tp, 0);
25651da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
25661da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
256733f5f57eSIlpo Järvinen 	tp->snd_up = tp->write_seq;
25681da177e4SLinus Torvalds 	tp->rcv_nxt = 0;
25691da177e4SLinus Torvalds 	tp->rcv_wup = 0;
25701da177e4SLinus Torvalds 	tp->copied_seq = 0;
25711da177e4SLinus Torvalds 
2572463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2573463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
25741da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
25751da177e4SLinus Torvalds }
25761da177e4SLinus Torvalds 
257767edfef7SAndi Kleen /* Build a SYN and send it off. */
25781da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
25791da177e4SLinus Torvalds {
25801da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
25811da177e4SLinus Torvalds 	struct sk_buff *buff;
25821da177e4SLinus Torvalds 
25831da177e4SLinus Torvalds 	tcp_connect_init(sk);
25841da177e4SLinus Torvalds 
2585d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
25861da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
25871da177e4SLinus Torvalds 		return -ENOBUFS;
25881da177e4SLinus Torvalds 
25891da177e4SLinus Torvalds 	/* Reserve space for headers. */
25901da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
25911da177e4SLinus Torvalds 
2592bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
2593e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
2594e870a8efSIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
25951da177e4SLinus Torvalds 
25961da177e4SLinus Torvalds 	/* Send it off. */
25971da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
25981da177e4SLinus Torvalds 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
25991da177e4SLinus Torvalds 	skb_header_release(buff);
2600fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, buff);
26013ab224beSHideo Aoki 	sk->sk_wmem_queued += buff->truesize;
26023ab224beSHideo Aoki 	sk_mem_charge(sk, buff->truesize);
26031da177e4SLinus Torvalds 	tp->packets_out += tcp_skb_pcount(buff);
2604aa133076SWu Fengguang 	tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2605bd37a088SWei Yongjun 
2606bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2607bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
2608bd37a088SWei Yongjun 	 */
2609bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
2610bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
261181cc8a75SPavel Emelyanov 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
26121da177e4SLinus Torvalds 
26131da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
26143f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
26153f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
26161da177e4SLinus Torvalds 	return 0;
26171da177e4SLinus Torvalds }
26181da177e4SLinus Torvalds 
26191da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
26201da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
26211da177e4SLinus Torvalds  * for details.
26221da177e4SLinus Torvalds  */
26231da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
26241da177e4SLinus Torvalds {
2625463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
2626463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
26271da177e4SLinus Torvalds 	unsigned long timeout;
26281da177e4SLinus Torvalds 
26291da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
2630463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
26311da177e4SLinus Torvalds 		int max_ato = HZ / 2;
26321da177e4SLinus Torvalds 
2633056834d9SIlpo Järvinen 		if (icsk->icsk_ack.pingpong ||
2634056834d9SIlpo Järvinen 		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
26351da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
26361da177e4SLinus Torvalds 
26371da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
26381da177e4SLinus Torvalds 
26391da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
2640463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
26411da177e4SLinus Torvalds 		 * directly.
26421da177e4SLinus Torvalds 		 */
26431da177e4SLinus Torvalds 		if (tp->srtt) {
26441da177e4SLinus Torvalds 			int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
26451da177e4SLinus Torvalds 
26461da177e4SLinus Torvalds 			if (rtt < max_ato)
26471da177e4SLinus Torvalds 				max_ato = rtt;
26481da177e4SLinus Torvalds 		}
26491da177e4SLinus Torvalds 
26501da177e4SLinus Torvalds 		ato = min(ato, max_ato);
26511da177e4SLinus Torvalds 	}
26521da177e4SLinus Torvalds 
26531da177e4SLinus Torvalds 	/* Stay within the limit we were given */
26541da177e4SLinus Torvalds 	timeout = jiffies + ato;
26551da177e4SLinus Torvalds 
26561da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
2657463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
26581da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
26591da177e4SLinus Torvalds 		 * send ACK now.
26601da177e4SLinus Torvalds 		 */
2661463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
2662463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
26631da177e4SLinus Torvalds 			tcp_send_ack(sk);
26641da177e4SLinus Torvalds 			return;
26651da177e4SLinus Torvalds 		}
26661da177e4SLinus Torvalds 
2667463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2668463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
26691da177e4SLinus Torvalds 	}
2670463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2671463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
2672463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
26731da177e4SLinus Torvalds }
26741da177e4SLinus Torvalds 
26751da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
26761da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
26771da177e4SLinus Torvalds {
26781da177e4SLinus Torvalds 	struct sk_buff *buff;
26791da177e4SLinus Torvalds 
2680058dc334SIlpo Järvinen 	/* If we have been reset, we may not send again. */
2681058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
2682058dc334SIlpo Järvinen 		return;
2683058dc334SIlpo Järvinen 
26841da177e4SLinus Torvalds 	/* We are not putting this on the write queue, so
26851da177e4SLinus Torvalds 	 * tcp_transmit_skb() will set the ownership to this
26861da177e4SLinus Torvalds 	 * sock.
26871da177e4SLinus Torvalds 	 */
26881da177e4SLinus Torvalds 	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
26891da177e4SLinus Torvalds 	if (buff == NULL) {
2690463c84b9SArnaldo Carvalho de Melo 		inet_csk_schedule_ack(sk);
2691463c84b9SArnaldo Carvalho de Melo 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
26923f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
26933f421baaSArnaldo Carvalho de Melo 					  TCP_DELACK_MAX, TCP_RTO_MAX);
26941da177e4SLinus Torvalds 		return;
26951da177e4SLinus Torvalds 	}
26961da177e4SLinus Torvalds 
26971da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
26981da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
2699e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
27001da177e4SLinus Torvalds 
27011da177e4SLinus Torvalds 	/* Send it off, this clears delayed acks for us. */
27021da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2703dfb4b9dcSDavid S. Miller 	tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
27041da177e4SLinus Torvalds }
27051da177e4SLinus Torvalds 
27061da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
27071da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
27081da177e4SLinus Torvalds  *
27091da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
27101da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
27111da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
27121da177e4SLinus Torvalds  *
27131da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
27141da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
27151da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
27161da177e4SLinus Torvalds  */
27171da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
27181da177e4SLinus Torvalds {
27191da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27201da177e4SLinus Torvalds 	struct sk_buff *skb;
27211da177e4SLinus Torvalds 
27221da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
27231da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
27241da177e4SLinus Torvalds 	if (skb == NULL)
27251da177e4SLinus Torvalds 		return -1;
27261da177e4SLinus Torvalds 
27271da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
27281da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
27291da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
27301da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
27311da177e4SLinus Torvalds 	 * send it.
27321da177e4SLinus Torvalds 	 */
2733e870a8efSIlpo Järvinen 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
27341da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2735dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
27361da177e4SLinus Torvalds }
27371da177e4SLinus Torvalds 
273867edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */
27391da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
27401da177e4SLinus Torvalds {
27411da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27421da177e4SLinus Torvalds 	struct sk_buff *skb;
27431da177e4SLinus Torvalds 
2744058dc334SIlpo Järvinen 	if (sk->sk_state == TCP_CLOSE)
2745058dc334SIlpo Järvinen 		return -1;
2746058dc334SIlpo Järvinen 
2747fe067e8aSDavid S. Miller 	if ((skb = tcp_send_head(sk)) != NULL &&
274890840defSIlpo Järvinen 	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
27491da177e4SLinus Torvalds 		int err;
27500c54b85fSIlpo Järvinen 		unsigned int mss = tcp_current_mss(sk);
275190840defSIlpo Järvinen 		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
27521da177e4SLinus Torvalds 
27531da177e4SLinus Torvalds 		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
27541da177e4SLinus Torvalds 			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
27551da177e4SLinus Torvalds 
27561da177e4SLinus Torvalds 		/* We are probing the opening of a window
27571da177e4SLinus Torvalds 		 * but the window size is != 0
27581da177e4SLinus Torvalds 		 * must have been a result SWS avoidance ( sender )
27591da177e4SLinus Torvalds 		 */
27601da177e4SLinus Torvalds 		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
27611da177e4SLinus Torvalds 		    skb->len > mss) {
27621da177e4SLinus Torvalds 			seg_size = min(seg_size, mss);
27631da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2764846998aeSDavid S. Miller 			if (tcp_fragment(sk, skb, seg_size, mss))
27651da177e4SLinus Torvalds 				return -1;
27661da177e4SLinus Torvalds 		} else if (!tcp_skb_pcount(skb))
2767846998aeSDavid S. Miller 			tcp_set_skb_tso_segs(sk, skb, mss);
27681da177e4SLinus Torvalds 
27691da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
27701da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
2771dfb4b9dcSDavid S. Miller 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
277266f5fe62SIlpo Järvinen 		if (!err)
277366f5fe62SIlpo Järvinen 			tcp_event_new_data_sent(sk, skb);
27741da177e4SLinus Torvalds 		return err;
27751da177e4SLinus Torvalds 	} else {
277633f5f57eSIlpo Järvinen 		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
27774828e7f4SIlpo Järvinen 			tcp_xmit_probe_skb(sk, 1);
27781da177e4SLinus Torvalds 		return tcp_xmit_probe_skb(sk, 0);
27791da177e4SLinus Torvalds 	}
27801da177e4SLinus Torvalds }
27811da177e4SLinus Torvalds 
27821da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
27831da177e4SLinus Torvalds  * a partial packet else a zero probe.
27841da177e4SLinus Torvalds  */
27851da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
27861da177e4SLinus Torvalds {
2787463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
27881da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
27891da177e4SLinus Torvalds 	int err;
27901da177e4SLinus Torvalds 
27911da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
27921da177e4SLinus Torvalds 
2793fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
27941da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
27956687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
2796463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
27971da177e4SLinus Torvalds 		return;
27981da177e4SLinus Torvalds 	}
27991da177e4SLinus Torvalds 
28001da177e4SLinus Torvalds 	if (err <= 0) {
2801463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2802463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
28036687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
2804463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
28053f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
28063f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
28071da177e4SLinus Torvalds 	} else {
28081da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
28096687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
28101da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
28111da177e4SLinus Torvalds 		 *
28121da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
28131da177e4SLinus Torvalds 		 */
28146687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
28156687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
2816463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2817463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
28183f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
28193f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
28201da177e4SLinus Torvalds 	}
28211da177e4SLinus Torvalds }
28221da177e4SLinus Torvalds 
2823c6aefafbSGlenn Griffin EXPORT_SYMBOL(tcp_select_initial_window);
28241da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect);
28251da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack);
28261da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit);
28271da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss);
28285d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init);
2829