xref: /linux/net/ipv4/tcp_output.c (revision bd515c3e48ececd774eb3128e81b669dbbd32637)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
251da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
261da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
271da177e4SLinus Torvalds  *				:	AF independence
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
301da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
311da177e4SLinus Torvalds  *					during syn/ack processing.
321da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
331da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
341da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
351da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
361da177e4SLinus Torvalds  *
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
421da177e4SLinus Torvalds #include <linux/module.h>
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
45ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1;
461da177e4SLinus Torvalds 
4715d99e02SRick Jones /* People can turn this on to  work with those rare, broken TCPs that
4815d99e02SRick Jones  * interpret the window field as a signed quantity.
4915d99e02SRick Jones  */
50ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
5115d99e02SRick Jones 
521da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
531da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
541da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
551da177e4SLinus Torvalds  */
56ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3;
571da177e4SLinus Torvalds 
58ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0;
59ab32ea5dSBrian Haley int sysctl_tcp_base_mss __read_mostly = 512;
605d424d5aSJohn Heffner 
6135089bb2SDavid S. Miller /* By default, RFC2861 behavior.  */
62ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
6335089bb2SDavid S. Miller 
646ff03ac3SIlpo Järvinen static inline void tcp_packets_out_inc(struct sock *sk,
656ff03ac3SIlpo Järvinen 				       const struct sk_buff *skb)
666ff03ac3SIlpo Järvinen {
676ff03ac3SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
686ff03ac3SIlpo Järvinen 	int orig = tp->packets_out;
696ff03ac3SIlpo Järvinen 
706ff03ac3SIlpo Järvinen 	tp->packets_out += tcp_skb_pcount(skb);
716ff03ac3SIlpo Järvinen 	if (!orig)
726ff03ac3SIlpo Järvinen 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
736ff03ac3SIlpo Järvinen 					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
746ff03ac3SIlpo Järvinen }
756ff03ac3SIlpo Järvinen 
769e412ba7SIlpo Järvinen static void update_send_head(struct sock *sk, struct sk_buff *skb)
771da177e4SLinus Torvalds {
789e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
799e412ba7SIlpo Järvinen 
80fe067e8aSDavid S. Miller 	tcp_advance_send_head(sk, skb);
811da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
829e412ba7SIlpo Järvinen 	tcp_packets_out_inc(sk, skb);
838512430eSIlpo Järvinen 
848512430eSIlpo Järvinen 	/* Don't override Nagle indefinately with F-RTO */
858512430eSIlpo Järvinen 	if (tp->frto_counter == 2)
868512430eSIlpo Järvinen 		tp->frto_counter = 3;
871da177e4SLinus Torvalds }
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
901da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
911da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
921da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
931da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
941da177e4SLinus Torvalds  */
959e412ba7SIlpo Järvinen static inline __u32 tcp_acceptable_seq(struct sock *sk)
961da177e4SLinus Torvalds {
979e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
989e412ba7SIlpo Järvinen 
991da177e4SLinus Torvalds 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
1001da177e4SLinus Torvalds 		return tp->snd_nxt;
1011da177e4SLinus Torvalds 	else
1021da177e4SLinus Torvalds 		return tp->snd_una+tp->snd_wnd;
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
1061da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
1071da177e4SLinus Torvalds  *
1081da177e4SLinus Torvalds  * 1. It is independent of path mtu.
1091da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
1101da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
1111da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
1121da177e4SLinus Torvalds  *    large MSS.
1131da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
1141da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
1151da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
1161da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
1171da177e4SLinus Torvalds  *    probably even Jumbo".
1181da177e4SLinus Torvalds  */
1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
1201da177e4SLinus Torvalds {
1211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1221da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
1231da177e4SLinus Torvalds 	int mss = tp->advmss;
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
1261da177e4SLinus Torvalds 		mss = dst_metric(dst, RTAX_ADVMSS);
1271da177e4SLinus Torvalds 		tp->advmss = mss;
1281da177e4SLinus Torvalds 	}
1291da177e4SLinus Torvalds 
1301da177e4SLinus Torvalds 	return (__u16)mss;
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
1331da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1341da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
135463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1361da177e4SLinus Torvalds {
137463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1381da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1391da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1401da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1411da177e4SLinus Torvalds 
1426687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1431da177e4SLinus Torvalds 
1446687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1451da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1461da177e4SLinus Torvalds 
147463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1481da177e4SLinus Torvalds 		cwnd >>= 1;
1491da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1501da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1511da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1521da177e4SLinus Torvalds }
1531da177e4SLinus Torvalds 
15440efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
1551da177e4SLinus Torvalds 				struct sk_buff *skb, struct sock *sk)
1561da177e4SLinus Torvalds {
157463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
158463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1591da177e4SLinus Torvalds 
16035089bb2SDavid S. Miller 	if (sysctl_tcp_slow_start_after_idle &&
16135089bb2SDavid S. Miller 	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
162463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds 	tp->lsndtime = now;
1651da177e4SLinus Torvalds 
1661da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1671da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1681da177e4SLinus Torvalds 	 */
169463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
170463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1711da177e4SLinus Torvalds }
1721da177e4SLinus Torvalds 
17340efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1741da177e4SLinus Torvalds {
175463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
176463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1801da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1811da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1821da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1831da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1841da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1851da177e4SLinus Torvalds  */
1861da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1871da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
1881da177e4SLinus Torvalds 			       int wscale_ok, __u8 *rcv_wscale)
1891da177e4SLinus Torvalds {
1901da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
1931da177e4SLinus Torvalds 	if (*window_clamp == 0)
1941da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
1951da177e4SLinus Torvalds 	space = min(*window_clamp, space);
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
1981da177e4SLinus Torvalds 	if (space > mss)
1991da177e4SLinus Torvalds 		space = (space / mss) * mss;
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
20215d99e02SRick Jones 	 * will break some buggy TCP stacks. If the admin tells us
20315d99e02SRick Jones 	 * it is likely we could be speaking with such a buggy stack
20415d99e02SRick Jones 	 * we will truncate our initial window offering to 32K-1
20515d99e02SRick Jones 	 * unless the remote has sent us a window scaling option,
20615d99e02SRick Jones 	 * which we interpret as a sign the remote TCP is not
20715d99e02SRick Jones 	 * misinterpreting the window field as a signed quantity.
2081da177e4SLinus Torvalds 	 */
20915d99e02SRick Jones 	if (sysctl_tcp_workaround_signed_windows)
2101da177e4SLinus Torvalds 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
21115d99e02SRick Jones 	else
21215d99e02SRick Jones 		(*rcv_wnd) = space;
21315d99e02SRick Jones 
2141da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
2151da177e4SLinus Torvalds 	if (wscale_ok) {
2161da177e4SLinus Torvalds 		/* Set window scaling on max possible window
2171da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
2181da177e4SLinus Torvalds 		 */
2191da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
220316c1592SStephen Hemminger 		space = min_t(u32, space, *window_clamp);
2211da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
2221da177e4SLinus Torvalds 			space >>= 1;
2231da177e4SLinus Torvalds 			(*rcv_wscale)++;
2241da177e4SLinus Torvalds 		}
2251da177e4SLinus Torvalds 	}
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 	/* Set initial window to value enough for senders,
2286b251858SDavid S. Miller 	 * following RFC2414. Senders, not following this RFC,
2291da177e4SLinus Torvalds 	 * will be satisfied with 2.
2301da177e4SLinus Torvalds 	 */
2311da177e4SLinus Torvalds 	if (mss > (1<<*rcv_wscale)) {
23201ff367eSDavid S. Miller 		int init_cwnd = 4;
23301ff367eSDavid S. Miller 		if (mss > 1460*3)
2341da177e4SLinus Torvalds 			init_cwnd = 2;
23501ff367eSDavid S. Miller 		else if (mss > 1460)
23601ff367eSDavid S. Miller 			init_cwnd = 3;
2371da177e4SLinus Torvalds 		if (*rcv_wnd > init_cwnd*mss)
2381da177e4SLinus Torvalds 			*rcv_wnd = init_cwnd*mss;
2391da177e4SLinus Torvalds 	}
2401da177e4SLinus Torvalds 
2411da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2421da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds 
2451da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2461da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2471da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2481da177e4SLinus Torvalds  * frame.
2491da177e4SLinus Torvalds  */
25040efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2511da177e4SLinus Torvalds {
2521da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2531da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2541da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	/* Never shrink the offered window */
2571da177e4SLinus Torvalds 	if (new_win < cur_win) {
2581da177e4SLinus Torvalds 		/* Danger Will Robinson!
2591da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2601da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2611da177e4SLinus Torvalds 		 * window in time.  --DaveM
2621da177e4SLinus Torvalds 		 *
2631da177e4SLinus Torvalds 		 * Relax Will Robinson.
2641da177e4SLinus Torvalds 		 */
2651da177e4SLinus Torvalds 		new_win = cur_win;
2661da177e4SLinus Torvalds 	}
2671da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2681da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2711da177e4SLinus Torvalds 	 * scaled window.
2721da177e4SLinus Torvalds 	 */
27315d99e02SRick Jones 	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
2741da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2751da177e4SLinus Torvalds 	else
2761da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2771da177e4SLinus Torvalds 
2781da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2791da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2801da177e4SLinus Torvalds 
2811da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2821da177e4SLinus Torvalds 	if (new_win == 0)
2831da177e4SLinus Torvalds 		tp->pred_flags = 0;
2841da177e4SLinus Torvalds 
2851da177e4SLinus Torvalds 	return new_win;
2861da177e4SLinus Torvalds }
2871da177e4SLinus Torvalds 
288bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
289bdf1ee5dSIlpo Järvinen 				       struct sk_buff *skb)
290bdf1ee5dSIlpo Järvinen {
291bdf1ee5dSIlpo Järvinen 	TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
292bdf1ee5dSIlpo Järvinen 	if (!(tp->ecn_flags&TCP_ECN_OK))
293bdf1ee5dSIlpo Järvinen 		TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
294bdf1ee5dSIlpo Järvinen }
295bdf1ee5dSIlpo Järvinen 
296bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
297bdf1ee5dSIlpo Järvinen {
298bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
299bdf1ee5dSIlpo Järvinen 
300bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = 0;
301bdf1ee5dSIlpo Järvinen 	if (sysctl_tcp_ecn) {
302bdf1ee5dSIlpo Järvinen 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
303bdf1ee5dSIlpo Järvinen 		tp->ecn_flags = TCP_ECN_OK;
304bdf1ee5dSIlpo Järvinen 	}
305bdf1ee5dSIlpo Järvinen }
306bdf1ee5dSIlpo Järvinen 
307bdf1ee5dSIlpo Järvinen static __inline__ void
308bdf1ee5dSIlpo Järvinen TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
309bdf1ee5dSIlpo Järvinen {
310bdf1ee5dSIlpo Järvinen 	if (inet_rsk(req)->ecn_ok)
311bdf1ee5dSIlpo Järvinen 		th->ece = 1;
312bdf1ee5dSIlpo Järvinen }
313bdf1ee5dSIlpo Järvinen 
314bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
315bdf1ee5dSIlpo Järvinen 				int tcp_header_len)
316bdf1ee5dSIlpo Järvinen {
317bdf1ee5dSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
318bdf1ee5dSIlpo Järvinen 
319bdf1ee5dSIlpo Järvinen 	if (tp->ecn_flags & TCP_ECN_OK) {
320bdf1ee5dSIlpo Järvinen 		/* Not-retransmitted data segment: set ECT and inject CWR. */
321bdf1ee5dSIlpo Järvinen 		if (skb->len != tcp_header_len &&
322bdf1ee5dSIlpo Järvinen 		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
323bdf1ee5dSIlpo Järvinen 			INET_ECN_xmit(sk);
324bdf1ee5dSIlpo Järvinen 			if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
325bdf1ee5dSIlpo Järvinen 				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
326bdf1ee5dSIlpo Järvinen 				tcp_hdr(skb)->cwr = 1;
327bdf1ee5dSIlpo Järvinen 				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
328bdf1ee5dSIlpo Järvinen 			}
329bdf1ee5dSIlpo Järvinen 		} else {
330bdf1ee5dSIlpo Järvinen 			/* ACK or retransmitted segment: clear ECT|CE */
331bdf1ee5dSIlpo Järvinen 			INET_ECN_dontxmit(sk);
332bdf1ee5dSIlpo Järvinen 		}
333bdf1ee5dSIlpo Järvinen 		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
334bdf1ee5dSIlpo Järvinen 			tcp_hdr(skb)->ece = 1;
335bdf1ee5dSIlpo Järvinen 	}
336bdf1ee5dSIlpo Järvinen }
337bdf1ee5dSIlpo Järvinen 
338df7a3b07SAl Viro static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
339cfb6eeb4SYOSHIFUJI Hideaki 					 __u32 tstamp, __u8 **md5_hash)
34040efc6faSStephen Hemminger {
34140efc6faSStephen Hemminger 	if (tp->rx_opt.tstamp_ok) {
342496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
34340efc6faSStephen Hemminger 			       (TCPOPT_NOP << 16) |
34440efc6faSStephen Hemminger 			       (TCPOPT_TIMESTAMP << 8) |
34540efc6faSStephen Hemminger 			       TCPOLEN_TIMESTAMP);
34640efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);
34740efc6faSStephen Hemminger 		*ptr++ = htonl(tp->rx_opt.ts_recent);
34840efc6faSStephen Hemminger 	}
34940efc6faSStephen Hemminger 	if (tp->rx_opt.eff_sacks) {
35040efc6faSStephen Hemminger 		struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
35140efc6faSStephen Hemminger 		int this_sack;
35240efc6faSStephen Hemminger 
35340efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
35440efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
35540efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
35640efc6faSStephen Hemminger 			       (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
35740efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
3582de979bdSStephen Hemminger 
35940efc6faSStephen Hemminger 		for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
36040efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
36140efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
36240efc6faSStephen Hemminger 		}
3632de979bdSStephen Hemminger 
36440efc6faSStephen Hemminger 		if (tp->rx_opt.dsack) {
36540efc6faSStephen Hemminger 			tp->rx_opt.dsack = 0;
36640efc6faSStephen Hemminger 			tp->rx_opt.eff_sacks--;
36740efc6faSStephen Hemminger 		}
36840efc6faSStephen Hemminger 	}
369cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
370cfb6eeb4SYOSHIFUJI Hideaki 	if (md5_hash) {
371cfb6eeb4SYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
372cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
373cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_MD5SIG << 8) |
374cfb6eeb4SYOSHIFUJI Hideaki 			       TCPOLEN_MD5SIG);
375cfb6eeb4SYOSHIFUJI Hideaki 		*md5_hash = (__u8 *)ptr;
376cfb6eeb4SYOSHIFUJI Hideaki 	}
377cfb6eeb4SYOSHIFUJI Hideaki #endif
37840efc6faSStephen Hemminger }
37940efc6faSStephen Hemminger 
38040efc6faSStephen Hemminger /* Construct a tcp options header for a SYN or SYN_ACK packet.
38140efc6faSStephen Hemminger  * If this is every changed make sure to change the definition of
38240efc6faSStephen Hemminger  * MAX_SYN_SIZE to match the new maximum number of options that you
38340efc6faSStephen Hemminger  * can generate.
384cfb6eeb4SYOSHIFUJI Hideaki  *
385cfb6eeb4SYOSHIFUJI Hideaki  * Note - that with the RFC2385 TCP option, we make room for the
386cfb6eeb4SYOSHIFUJI Hideaki  * 16 byte MD5 hash. This will be filled in later, so the pointer for the
387cfb6eeb4SYOSHIFUJI Hideaki  * location to be filled is passed back up.
38840efc6faSStephen Hemminger  */
389df7a3b07SAl Viro static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
39040efc6faSStephen Hemminger 				  int offer_wscale, int wscale, __u32 tstamp,
391cfb6eeb4SYOSHIFUJI Hideaki 				  __u32 ts_recent, __u8 **md5_hash)
39240efc6faSStephen Hemminger {
39340efc6faSStephen Hemminger 	/* We always get an MSS option.
39440efc6faSStephen Hemminger 	 * The option bytes which will be seen in normal data
39540efc6faSStephen Hemminger 	 * packets should timestamps be used, must be in the MSS
39640efc6faSStephen Hemminger 	 * advertised.  But we subtract them from tp->mss_cache so
39740efc6faSStephen Hemminger 	 * that calculations in tcp_sendmsg are simpler etc.
39840efc6faSStephen Hemminger 	 * So account for this fact here if necessary.  If we
39940efc6faSStephen Hemminger 	 * don't do this correctly, as a receiver we won't
40040efc6faSStephen Hemminger 	 * recognize data packets as being full sized when we
40140efc6faSStephen Hemminger 	 * should, and thus we won't abide by the delayed ACK
40240efc6faSStephen Hemminger 	 * rules correctly.
40340efc6faSStephen Hemminger 	 * SACKs don't matter, we never delay an ACK when we
40440efc6faSStephen Hemminger 	 * have any of those going out.
40540efc6faSStephen Hemminger 	 */
40640efc6faSStephen Hemminger 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
40740efc6faSStephen Hemminger 	if (ts) {
40840efc6faSStephen Hemminger 		if (sack)
409496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
410496c98dfSYOSHIFUJI Hideaki 				       (TCPOLEN_SACK_PERM << 16) |
411496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_TIMESTAMP << 8) |
412496c98dfSYOSHIFUJI Hideaki 				       TCPOLEN_TIMESTAMP);
41340efc6faSStephen Hemminger 		else
414496c98dfSYOSHIFUJI Hideaki 			*ptr++ = htonl((TCPOPT_NOP << 24) |
415496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_NOP << 16) |
416496c98dfSYOSHIFUJI Hideaki 				       (TCPOPT_TIMESTAMP << 8) |
417496c98dfSYOSHIFUJI Hideaki 				       TCPOLEN_TIMESTAMP);
41840efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);		/* TSVAL */
41940efc6faSStephen Hemminger 		*ptr++ = htonl(ts_recent);	/* TSECR */
42040efc6faSStephen Hemminger 	} else if (sack)
421496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
422496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
423496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_SACK_PERM << 8) |
424496c98dfSYOSHIFUJI Hideaki 			       TCPOLEN_SACK_PERM);
42540efc6faSStephen Hemminger 	if (offer_wscale)
426496c98dfSYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
427496c98dfSYOSHIFUJI Hideaki 			       (TCPOPT_WINDOW << 16) |
428496c98dfSYOSHIFUJI Hideaki 			       (TCPOLEN_WINDOW << 8) |
429496c98dfSYOSHIFUJI Hideaki 			       (wscale));
430cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
431cfb6eeb4SYOSHIFUJI Hideaki 	/*
432cfb6eeb4SYOSHIFUJI Hideaki 	 * If MD5 is enabled, then we set the option, and include the size
433cfb6eeb4SYOSHIFUJI Hideaki 	 * (always 18). The actual MD5 hash is added just before the
434cfb6eeb4SYOSHIFUJI Hideaki 	 * packet is sent.
435cfb6eeb4SYOSHIFUJI Hideaki 	 */
436cfb6eeb4SYOSHIFUJI Hideaki 	if (md5_hash) {
437cfb6eeb4SYOSHIFUJI Hideaki 		*ptr++ = htonl((TCPOPT_NOP << 24) |
438cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_NOP << 16) |
439cfb6eeb4SYOSHIFUJI Hideaki 			       (TCPOPT_MD5SIG << 8) |
440cfb6eeb4SYOSHIFUJI Hideaki 			       TCPOLEN_MD5SIG);
441cfb6eeb4SYOSHIFUJI Hideaki 		*md5_hash = (__u8 *) ptr;
442cfb6eeb4SYOSHIFUJI Hideaki 	}
443cfb6eeb4SYOSHIFUJI Hideaki #endif
44440efc6faSStephen Hemminger }
4451da177e4SLinus Torvalds 
4461da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
4471da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
4481da177e4SLinus Torvalds  * transmission and possible later retransmissions.
4491da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
4501da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
4511da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
4521da177e4SLinus Torvalds  * device.
4531da177e4SLinus Torvalds  *
4541da177e4SLinus Torvalds  * We are working here with either a clone of the original
4551da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
4561da177e4SLinus Torvalds  */
457dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
4581da177e4SLinus Torvalds {
4596687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
460dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
461dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
462dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
463dfb4b9dcSDavid S. Miller 	int tcp_header_size;
464cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
465cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
466cfb6eeb4SYOSHIFUJI Hideaki 	__u8 *md5_hash_location;
467cfb6eeb4SYOSHIFUJI Hideaki #endif
4681da177e4SLinus Torvalds 	struct tcphdr *th;
4691da177e4SLinus Torvalds 	int sysctl_flags;
4701da177e4SLinus Torvalds 	int err;
4711da177e4SLinus Torvalds 
472dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
473dfb4b9dcSDavid S. Miller 
474dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
475dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
476dfb4b9dcSDavid S. Miller 	 */
477164891aaSStephen Hemminger 	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
478dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
479dfb4b9dcSDavid S. Miller 
480dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
481dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
482dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
483dfb4b9dcSDavid S. Miller 		else
484dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
485dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
486dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
487dfb4b9dcSDavid S. Miller 	}
488dfb4b9dcSDavid S. Miller 
489dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
490dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
491dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
492dfb4b9dcSDavid S. Miller 	tcp_header_size = tp->tcp_header_len;
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS	0x1
4951da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE	0x2
4961da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK	0x4
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds 	sysctl_flags = 0;
499dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
5001da177e4SLinus Torvalds 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
5011da177e4SLinus Torvalds 		if (sysctl_tcp_timestamps) {
5021da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
5031da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
5041da177e4SLinus Torvalds 		}
5051da177e4SLinus Torvalds 		if (sysctl_tcp_window_scaling) {
5061da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
5071da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
5081da177e4SLinus Torvalds 		}
5091da177e4SLinus Torvalds 		if (sysctl_tcp_sack) {
5101da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_SACK;
5111da177e4SLinus Torvalds 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
5121da177e4SLinus Torvalds 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
5131da177e4SLinus Torvalds 		}
514dfb4b9dcSDavid S. Miller 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
5151da177e4SLinus Torvalds 		/* A SACK is 2 pad bytes, a 2 byte header, plus
5161da177e4SLinus Torvalds 		 * 2 32-bit sequence numbers for each SACK block.
5171da177e4SLinus Torvalds 		 */
5181da177e4SLinus Torvalds 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
519dfb4b9dcSDavid S. Miller 				    (tp->rx_opt.eff_sacks *
520dfb4b9dcSDavid S. Miller 				     TCPOLEN_SACK_PERBLOCK));
5211da177e4SLinus Torvalds 	}
5221da177e4SLinus Torvalds 
523317a76f9SStephen Hemminger 	if (tcp_packets_in_flight(tp) == 0)
5246687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
5251da177e4SLinus Torvalds 
526cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
527cfb6eeb4SYOSHIFUJI Hideaki 	/*
528cfb6eeb4SYOSHIFUJI Hideaki 	 * Are we doing MD5 on this segment? If so - make
529cfb6eeb4SYOSHIFUJI Hideaki 	 * room for it.
530cfb6eeb4SYOSHIFUJI Hideaki 	 */
531cfb6eeb4SYOSHIFUJI Hideaki 	md5 = tp->af_specific->md5_lookup(sk, sk);
532cfb6eeb4SYOSHIFUJI Hideaki 	if (md5)
533cfb6eeb4SYOSHIFUJI Hideaki 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
534cfb6eeb4SYOSHIFUJI Hideaki #endif
535cfb6eeb4SYOSHIFUJI Hideaki 
536aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
537aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
538e89862f4SDavid S. Miller 	skb_set_owner_w(skb, sk);
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
541aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
5421da177e4SLinus Torvalds 	th->source		= inet->sport;
5431da177e4SLinus Torvalds 	th->dest		= inet->dport;
5441da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
5451da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
546df7a3b07SAl Viro 	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
547dfb4b9dcSDavid S. Miller 					tcb->flags);
548dfb4b9dcSDavid S. Miller 
549dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
5501da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
5511da177e4SLinus Torvalds 		 * is never scaled.
5521da177e4SLinus Torvalds 		 */
553600ff0c2SIlpo Järvinen 		th->window	= htons(min(tp->rcv_wnd, 65535U));
5541da177e4SLinus Torvalds 	} else {
5551da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
5561da177e4SLinus Torvalds 	}
5571da177e4SLinus Torvalds 	th->check		= 0;
5581da177e4SLinus Torvalds 	th->urg_ptr		= 0;
5591da177e4SLinus Torvalds 
560dfb4b9dcSDavid S. Miller 	if (unlikely(tp->urg_mode &&
561dfb4b9dcSDavid S. Miller 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
5621da177e4SLinus Torvalds 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
5631da177e4SLinus Torvalds 		th->urg			= 1;
5641da177e4SLinus Torvalds 	}
5651da177e4SLinus Torvalds 
566dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
567df7a3b07SAl Viro 		tcp_syn_build_options((__be32 *)(th + 1),
5681da177e4SLinus Torvalds 				      tcp_advertise_mss(sk),
5691da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
5701da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_SACK),
5711da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
5721da177e4SLinus Torvalds 				      tp->rx_opt.rcv_wscale,
5731da177e4SLinus Torvalds 				      tcb->when,
574cfb6eeb4SYOSHIFUJI Hideaki 				      tp->rx_opt.ts_recent,
575cfb6eeb4SYOSHIFUJI Hideaki 
576cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
577cfb6eeb4SYOSHIFUJI Hideaki 				      md5 ? &md5_hash_location :
578cfb6eeb4SYOSHIFUJI Hideaki #endif
579cfb6eeb4SYOSHIFUJI Hideaki 				      NULL);
5801da177e4SLinus Torvalds 	} else {
581df7a3b07SAl Viro 		tcp_build_and_update_options((__be32 *)(th + 1),
582cfb6eeb4SYOSHIFUJI Hideaki 					     tp, tcb->when,
583cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
584cfb6eeb4SYOSHIFUJI Hideaki 					     md5 ? &md5_hash_location :
585cfb6eeb4SYOSHIFUJI Hideaki #endif
586cfb6eeb4SYOSHIFUJI Hideaki 					     NULL);
5879e412ba7SIlpo Järvinen 		TCP_ECN_send(sk, skb, tcp_header_size);
5881da177e4SLinus Torvalds 	}
589dfb4b9dcSDavid S. Miller 
590cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
591cfb6eeb4SYOSHIFUJI Hideaki 	/* Calculate the MD5 hash, as we have all we need now */
592cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
593cfb6eeb4SYOSHIFUJI Hideaki 		tp->af_specific->calc_md5_hash(md5_hash_location,
594cfb6eeb4SYOSHIFUJI Hideaki 					       md5,
595cfb6eeb4SYOSHIFUJI Hideaki 					       sk, NULL, NULL,
596aa8223c7SArnaldo Carvalho de Melo 					       tcp_hdr(skb),
597cfb6eeb4SYOSHIFUJI Hideaki 					       sk->sk_protocol,
598cfb6eeb4SYOSHIFUJI Hideaki 					       skb->len);
599cfb6eeb4SYOSHIFUJI Hideaki 	}
600cfb6eeb4SYOSHIFUJI Hideaki #endif
601cfb6eeb4SYOSHIFUJI Hideaki 
6028292a17aSArnaldo Carvalho de Melo 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
6031da177e4SLinus Torvalds 
604dfb4b9dcSDavid S. Miller 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
605fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
6061da177e4SLinus Torvalds 
6071da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
6081da177e4SLinus Torvalds 		tcp_event_data_sent(tp, skb, sk);
6091da177e4SLinus Torvalds 
610bd37a088SWei Yongjun 	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
6111da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_OUTSEGS);
6121da177e4SLinus Torvalds 
613e89862f4SDavid S. Miller 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
61483de47cdSHua Zhong 	if (likely(err <= 0))
6151da177e4SLinus Torvalds 		return err;
6161da177e4SLinus Torvalds 
6173cfe3baaSIlpo Järvinen 	tcp_enter_cwr(sk, 1);
6181da177e4SLinus Torvalds 
619b9df3cb8SGerrit Renker 	return net_xmit_eval(err);
620dfb4b9dcSDavid S. Miller 
6211da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS
6221da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE
6231da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK
6241da177e4SLinus Torvalds }
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds 
6271da177e4SLinus Torvalds /* This routine just queue's the buffer
6281da177e4SLinus Torvalds  *
6291da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
6301da177e4SLinus Torvalds  * otherwise socket can stall.
6311da177e4SLinus Torvalds  */
6321da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
6331da177e4SLinus Torvalds {
6341da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6351da177e4SLinus Torvalds 
6361da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
6371da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
6381da177e4SLinus Torvalds 	skb_header_release(skb);
639fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
6401da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
6411da177e4SLinus Torvalds }
6421da177e4SLinus Torvalds 
643846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
644f6302d1dSDavid S. Miller {
645bcd76111SHerbert Xu 	if (skb->len <= mss_now || !sk_can_gso(sk)) {
646f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
647f6302d1dSDavid S. Miller 		 * non-TSO case.
648f6302d1dSDavid S. Miller 		 */
6497967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
6507967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
6517967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
652f6302d1dSDavid S. Miller 	} else {
653356f89e1SIlpo Järvinen 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
6547967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = mss_now;
655bcd76111SHerbert Xu 		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
6561da177e4SLinus Torvalds 	}
6571da177e4SLinus Torvalds }
6581da177e4SLinus Torvalds 
65991fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check
66068f8353bSIlpo Järvinen  * skb is counted to fackets_out or not.
66191fed7a1SIlpo Järvinen  */
662a47e5a98SIlpo Järvinen static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
66391fed7a1SIlpo Järvinen 				   int decr)
66491fed7a1SIlpo Järvinen {
665a47e5a98SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
666a47e5a98SIlpo Järvinen 
667dc86967bSIlpo Järvinen 	if (!tp->sacked_out || tcp_is_reno(tp))
66891fed7a1SIlpo Järvinen 		return;
66991fed7a1SIlpo Järvinen 
6706859d494SIlpo Järvinen 	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
67191fed7a1SIlpo Järvinen 		tp->fackets_out -= decr;
67291fed7a1SIlpo Järvinen }
67391fed7a1SIlpo Järvinen 
6741da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
6751da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
6761da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
6771da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
6781da177e4SLinus Torvalds  */
6796475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
6801da177e4SLinus Torvalds {
6811da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6821da177e4SLinus Torvalds 	struct sk_buff *buff;
6836475be16SDavid S. Miller 	int nsize, old_factor;
684b60b49eaSHerbert Xu 	int nlen;
6851da177e4SLinus Torvalds 	u16 flags;
6861da177e4SLinus Torvalds 
687b2cc99f0SHerbert Xu 	BUG_ON(len > skb->len);
6886a438bbeSStephen Hemminger 
689b7689205SIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
6901da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
6911da177e4SLinus Torvalds 	if (nsize < 0)
6921da177e4SLinus Torvalds 		nsize = 0;
6931da177e4SLinus Torvalds 
6941da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
6951da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
6961da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6971da177e4SLinus Torvalds 		return -ENOMEM;
6981da177e4SLinus Torvalds 
6991da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
7001da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
7011da177e4SLinus Torvalds 	if (buff == NULL)
7021da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
703ef5cb973SHerbert Xu 
704b60b49eaSHerbert Xu 	sk_charge_skb(sk, buff);
705b60b49eaSHerbert Xu 	nlen = skb->len - len - nsize;
706b60b49eaSHerbert Xu 	buff->truesize += nlen;
707b60b49eaSHerbert Xu 	skb->truesize -= nlen;
7081da177e4SLinus Torvalds 
7091da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
7101da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
7111da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
7121da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
7131da177e4SLinus Torvalds 
7141da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
7151da177e4SLinus Torvalds 	flags = TCP_SKB_CB(skb)->flags;
7161da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
7171da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = flags;
718e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
7191da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
7201da177e4SLinus Torvalds 
72184fa7933SPatrick McHardy 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
7221da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
7231da177e4SLinus Torvalds 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
7241da177e4SLinus Torvalds 						       nsize, 0);
7251da177e4SLinus Torvalds 
7261da177e4SLinus Torvalds 		skb_trim(skb, len);
7271da177e4SLinus Torvalds 
7281da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
7291da177e4SLinus Torvalds 	} else {
73084fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
7311da177e4SLinus Torvalds 		skb_split(skb, buff, len);
7321da177e4SLinus Torvalds 	}
7331da177e4SLinus Torvalds 
7341da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
7351da177e4SLinus Torvalds 
7361da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
7371da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
7381da177e4SLinus Torvalds 	 */
7391da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
740a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
7411da177e4SLinus Torvalds 
7426475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
7436475be16SDavid S. Miller 
7441da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
745846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
746846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
7471da177e4SLinus Torvalds 
7486475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
7496475be16SDavid S. Miller 	 * adjust the various packet counters.
7506475be16SDavid S. Miller 	 */
751cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
7526475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
7536475be16SDavid S. Miller 			tcp_skb_pcount(buff);
7541da177e4SLinus Torvalds 
7556475be16SDavid S. Miller 		tp->packets_out -= diff;
756e14c3cafSHerbert Xu 
757e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
758e14c3cafSHerbert Xu 			tp->sacked_out -= diff;
759e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
760e14c3cafSHerbert Xu 			tp->retrans_out -= diff;
761e14c3cafSHerbert Xu 
762b5860bbaSIlpo Järvinen 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
7636475be16SDavid S. Miller 			tp->lost_out -= diff;
76483ca28beSHerbert Xu 
76583ca28beSHerbert Xu 		/* Adjust Reno SACK estimate. */
76691fed7a1SIlpo Järvinen 		if (tcp_is_reno(tp) && diff > 0) {
767af610b4cSIlpo Järvinen 			tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
768005903bcSIlpo Järvinen 			tcp_verify_left_out(tp);
76983ca28beSHerbert Xu 		}
770a47e5a98SIlpo Järvinen 		tcp_adjust_fackets_out(sk, skb, diff);
7711da177e4SLinus Torvalds 	}
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
774f44b5271SDavid S. Miller 	skb_header_release(buff);
775fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
7761da177e4SLinus Torvalds 
7771da177e4SLinus Torvalds 	return 0;
7781da177e4SLinus Torvalds }
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
7811da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
7821da177e4SLinus Torvalds  * immediately discarded.
7831da177e4SLinus Torvalds  */
784f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len)
7851da177e4SLinus Torvalds {
7861da177e4SLinus Torvalds 	int i, k, eat;
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds 	eat = len;
7891da177e4SLinus Torvalds 	k = 0;
7901da177e4SLinus Torvalds 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
7911da177e4SLinus Torvalds 		if (skb_shinfo(skb)->frags[i].size <= eat) {
7921da177e4SLinus Torvalds 			put_page(skb_shinfo(skb)->frags[i].page);
7931da177e4SLinus Torvalds 			eat -= skb_shinfo(skb)->frags[i].size;
7941da177e4SLinus Torvalds 		} else {
7951da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
7961da177e4SLinus Torvalds 			if (eat) {
7971da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
7981da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].size -= eat;
7991da177e4SLinus Torvalds 				eat = 0;
8001da177e4SLinus Torvalds 			}
8011da177e4SLinus Torvalds 			k++;
8021da177e4SLinus Torvalds 		}
8031da177e4SLinus Torvalds 	}
8041da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
8051da177e4SLinus Torvalds 
80627a884dcSArnaldo Carvalho de Melo 	skb_reset_tail_pointer(skb);
8071da177e4SLinus Torvalds 	skb->data_len -= len;
8081da177e4SLinus Torvalds 	skb->len = skb->data_len;
8091da177e4SLinus Torvalds }
8101da177e4SLinus Torvalds 
8111da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
8121da177e4SLinus Torvalds {
8131da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
8141da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
8151da177e4SLinus Torvalds 		return -ENOMEM;
8161da177e4SLinus Torvalds 
817f2911969SHerbert Xu ~{PmVHI~} 	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
818f2911969SHerbert Xu ~{PmVHI~} 	if (unlikely(len < skb_headlen(skb)))
8191da177e4SLinus Torvalds 		__skb_pull(skb, len);
820f2911969SHerbert Xu ~{PmVHI~} 	else
821f2911969SHerbert Xu ~{PmVHI~} 		__pskb_trim_head(skb, len - skb_headlen(skb));
8221da177e4SLinus Torvalds 
8231da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
82484fa7933SPatrick McHardy 	skb->ip_summed = CHECKSUM_PARTIAL;
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds 	skb->truesize	     -= len;
8271da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
8281da177e4SLinus Torvalds 	sk->sk_forward_alloc += len;
8291da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
8301da177e4SLinus Torvalds 
8311da177e4SLinus Torvalds 	/* Any change of skb->len requires recalculation of tso
8321da177e4SLinus Torvalds 	 * factor and mss.
8331da177e4SLinus Torvalds 	 */
8341da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
835846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds 	return 0;
8381da177e4SLinus Torvalds }
8391da177e4SLinus Torvalds 
8405d424d5aSJohn Heffner /* Not accounting for SACKs here. */
8415d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu)
8425d424d5aSJohn Heffner {
8435d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8445d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8455d424d5aSJohn Heffner 	int mss_now;
8465d424d5aSJohn Heffner 
8475d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
8485d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
8495d424d5aSJohn Heffner 	 */
8505d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
8515d424d5aSJohn Heffner 
8525d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
8535d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
8545d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
8555d424d5aSJohn Heffner 
8565d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
8575d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
8585d424d5aSJohn Heffner 
8595d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
8605d424d5aSJohn Heffner 	if (mss_now < 48)
8615d424d5aSJohn Heffner 		mss_now = 48;
8625d424d5aSJohn Heffner 
8635d424d5aSJohn Heffner 	/* Now subtract TCP options size, not including SACKs */
8645d424d5aSJohn Heffner 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
8655d424d5aSJohn Heffner 
8665d424d5aSJohn Heffner 	return mss_now;
8675d424d5aSJohn Heffner }
8685d424d5aSJohn Heffner 
8695d424d5aSJohn Heffner /* Inverse of above */
8705d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss)
8715d424d5aSJohn Heffner {
8725d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8735d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8745d424d5aSJohn Heffner 	int mtu;
8755d424d5aSJohn Heffner 
8765d424d5aSJohn Heffner 	mtu = mss +
8775d424d5aSJohn Heffner 	      tp->tcp_header_len +
8785d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
8795d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
8805d424d5aSJohn Heffner 
8815d424d5aSJohn Heffner 	return mtu;
8825d424d5aSJohn Heffner }
8835d424d5aSJohn Heffner 
8845d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
8855d424d5aSJohn Heffner {
8865d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
8875d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
8885d424d5aSJohn Heffner 
8895d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
8905d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
8915d424d5aSJohn Heffner 			       icsk->icsk_af_ops->net_header_len;
8925d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
8935d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
8945d424d5aSJohn Heffner }
8955d424d5aSJohn Heffner 
8961da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
8971da177e4SLinus Torvalds 
8981da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
8991da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
902caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
9031da177e4SLinus Torvalds    It also does not include TCP options.
9041da177e4SLinus Torvalds 
905d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
9081da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
9091da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
9101da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
9111da177e4SLinus Torvalds 
9121da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
9131da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
9141da177e4SLinus Torvalds 
915d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
916d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
9171da177e4SLinus Torvalds  */
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
9201da177e4SLinus Torvalds {
9211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
922d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
9235d424d5aSJohn Heffner 	int mss_now;
9241da177e4SLinus Torvalds 
9255d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
9265d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
9271da177e4SLinus Torvalds 
9285d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
9291da177e4SLinus Torvalds 
9301da177e4SLinus Torvalds 	/* Bound mss with half of window */
9311da177e4SLinus Torvalds 	if (tp->max_window && mss_now > (tp->max_window>>1))
9321da177e4SLinus Torvalds 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds 	/* And store cached results */
935d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
9365d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
9375d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
938c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds 	return mss_now;
9411da177e4SLinus Torvalds }
9421da177e4SLinus Torvalds 
9431da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
9441da177e4SLinus Torvalds  * and even PMTU discovery events into account.
9451da177e4SLinus Torvalds  *
9461da177e4SLinus Torvalds  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
9471da177e4SLinus Torvalds  * cannot be large. However, taking into account rare use of URG, this
9481da177e4SLinus Torvalds  * is not a big flaw.
9491da177e4SLinus Torvalds  */
950c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
9511da177e4SLinus Torvalds {
9521da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9531da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
954c1b4a7e6SDavid S. Miller 	u32 mss_now;
955c1b4a7e6SDavid S. Miller 	u16 xmit_size_goal;
956c1b4a7e6SDavid S. Miller 	int doing_tso = 0;
9571da177e4SLinus Torvalds 
958c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
959c1b4a7e6SDavid S. Miller 
960bcd76111SHerbert Xu 	if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
961c1b4a7e6SDavid S. Miller 		doing_tso = 1;
962c1b4a7e6SDavid S. Miller 
9631da177e4SLinus Torvalds 	if (dst) {
9641da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
965d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
9661da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
9671da177e4SLinus Torvalds 	}
9681da177e4SLinus Torvalds 
9691da177e4SLinus Torvalds 	if (tp->rx_opt.eff_sacks)
9701da177e4SLinus Torvalds 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
9711da177e4SLinus Torvalds 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
972c1b4a7e6SDavid S. Miller 
973cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
974cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk))
975cfb6eeb4SYOSHIFUJI Hideaki 		mss_now -= TCPOLEN_MD5SIG_ALIGNED;
976cfb6eeb4SYOSHIFUJI Hideaki #endif
977cfb6eeb4SYOSHIFUJI Hideaki 
978c1b4a7e6SDavid S. Miller 	xmit_size_goal = mss_now;
979c1b4a7e6SDavid S. Miller 
980c1b4a7e6SDavid S. Miller 	if (doing_tso) {
9818292a17aSArnaldo Carvalho de Melo 		xmit_size_goal = (65535 -
9828292a17aSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_af_ops->net_header_len -
983d83d8461SArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_ext_hdr_len -
984d83d8461SArnaldo Carvalho de Melo 				  tp->tcp_header_len);
985c1b4a7e6SDavid S. Miller 
986c1b4a7e6SDavid S. Miller 		if (tp->max_window &&
987c1b4a7e6SDavid S. Miller 		    (xmit_size_goal > (tp->max_window >> 1)))
988c1b4a7e6SDavid S. Miller 			xmit_size_goal = max((tp->max_window >> 1),
989c1b4a7e6SDavid S. Miller 					     68U - tp->tcp_header_len);
990c1b4a7e6SDavid S. Miller 
991c1b4a7e6SDavid S. Miller 		xmit_size_goal -= (xmit_size_goal % mss_now);
992c1b4a7e6SDavid S. Miller 	}
993c1b4a7e6SDavid S. Miller 	tp->xmit_size_goal = xmit_size_goal;
994c1b4a7e6SDavid S. Miller 
9951da177e4SLinus Torvalds 	return mss_now;
9961da177e4SLinus Torvalds }
9971da177e4SLinus Torvalds 
998a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
999a762a980SDavid S. Miller 
10009e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk)
1001a762a980SDavid S. Miller {
10029e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1003a762a980SDavid S. Miller 	__u32 packets_out = tp->packets_out;
1004a762a980SDavid S. Miller 
1005a762a980SDavid S. Miller 	if (packets_out >= tp->snd_cwnd) {
1006a762a980SDavid S. Miller 		/* Network is feed fully. */
1007a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
1008a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
1009a762a980SDavid S. Miller 	} else {
1010a762a980SDavid S. Miller 		/* Network starves. */
1011a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
1012a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
1013a762a980SDavid S. Miller 
101415d33c07SDavid S. Miller 		if (sysctl_tcp_slow_start_after_idle &&
101515d33c07SDavid S. Miller 		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1016a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
1017a762a980SDavid S. Miller 	}
1018a762a980SDavid S. Miller }
1019a762a980SDavid S. Miller 
1020c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
1021c1b4a7e6SDavid S. Miller {
1022c1b4a7e6SDavid S. Miller 	u32 window, cwnd_len;
1023c1b4a7e6SDavid S. Miller 
1024c1b4a7e6SDavid S. Miller 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
1025c1b4a7e6SDavid S. Miller 	cwnd_len = mss_now * cwnd;
1026c1b4a7e6SDavid S. Miller 	return min(window, cwnd_len);
1027c1b4a7e6SDavid S. Miller }
1028c1b4a7e6SDavid S. Miller 
1029c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
1030c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
1031c1b4a7e6SDavid S. Miller  */
1032c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
1033c1b4a7e6SDavid S. Miller {
1034c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
1035c1b4a7e6SDavid S. Miller 
1036c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
1037104439a8SJohn Heffner 	if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1038104439a8SJohn Heffner 	    tcp_skb_pcount(skb) == 1)
1039c1b4a7e6SDavid S. Miller 		return 1;
1040c1b4a7e6SDavid S. Miller 
1041c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1042c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
1043c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
1044c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
1045c1b4a7e6SDavid S. Miller 
1046c1b4a7e6SDavid S. Miller 	return 0;
1047c1b4a7e6SDavid S. Miller }
1048c1b4a7e6SDavid S. Miller 
1049c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting
1050c1b4a7e6SDavid S. Miller  * SKB onto the wire.
1051c1b4a7e6SDavid S. Miller  */
105240efc6faSStephen Hemminger static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
1053c1b4a7e6SDavid S. Miller {
1054c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
1055c1b4a7e6SDavid S. Miller 
1056846998aeSDavid S. Miller 	if (!tso_segs ||
1057846998aeSDavid S. Miller 	    (tso_segs > 1 &&
10587967168cSHerbert Xu 	     tcp_skb_mss(skb) != mss_now)) {
1059846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
1060c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
1061c1b4a7e6SDavid S. Miller 	}
1062c1b4a7e6SDavid S. Miller 	return tso_segs;
1063c1b4a7e6SDavid S. Miller }
1064c1b4a7e6SDavid S. Miller 
1065c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp)
1066c1b4a7e6SDavid S. Miller {
1067c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml,tp->snd_una) &&
1068c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
1069c1b4a7e6SDavid S. Miller }
1070c1b4a7e6SDavid S. Miller 
1071c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules:
1072c1b4a7e6SDavid S. Miller  * 1. It is full sized.
1073c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
1074c1b4a7e6SDavid S. Miller  * 3. Or TCP_NODELAY was set.
1075c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1076c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
1077c1b4a7e6SDavid S. Miller  */
1078c1b4a7e6SDavid S. Miller 
1079c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp,
1080c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
1081c1b4a7e6SDavid S. Miller 				  unsigned mss_now, int nonagle)
1082c1b4a7e6SDavid S. Miller {
1083c1b4a7e6SDavid S. Miller 	return (skb->len < mss_now &&
1084c1b4a7e6SDavid S. Miller 		((nonagle&TCP_NAGLE_CORK) ||
1085c1b4a7e6SDavid S. Miller 		 (!nonagle &&
1086c1b4a7e6SDavid S. Miller 		  tp->packets_out &&
1087c1b4a7e6SDavid S. Miller 		  tcp_minshall_check(tp))));
1088c1b4a7e6SDavid S. Miller }
1089c1b4a7e6SDavid S. Miller 
1090c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be
1091c1b4a7e6SDavid S. Miller  * sent now.
1092c1b4a7e6SDavid S. Miller  */
1093c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1094c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1095c1b4a7e6SDavid S. Miller {
1096c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
1097c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
1098c1b4a7e6SDavid S. Miller 	 *
1099c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
1100c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
1101c1b4a7e6SDavid S. Miller 	 */
1102c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
1103c1b4a7e6SDavid S. Miller 		return 1;
1104c1b4a7e6SDavid S. Miller 
1105d551e454SIlpo Järvinen 	/* Don't use the nagle rule for urgent data (or for the final FIN).
1106d551e454SIlpo Järvinen 	 * Nagle can be ignored during F-RTO too (see RFC4138).
1107d551e454SIlpo Järvinen 	 */
1108d551e454SIlpo Järvinen 	if (tp->urg_mode || (tp->frto_counter == 2) ||
1109c1b4a7e6SDavid S. Miller 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
1110c1b4a7e6SDavid S. Miller 		return 1;
1111c1b4a7e6SDavid S. Miller 
1112c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1113c1b4a7e6SDavid S. Miller 		return 1;
1114c1b4a7e6SDavid S. Miller 
1115c1b4a7e6SDavid S. Miller 	return 0;
1116c1b4a7e6SDavid S. Miller }
1117c1b4a7e6SDavid S. Miller 
1118c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
1119c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
1120c1b4a7e6SDavid S. Miller {
1121c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1122c1b4a7e6SDavid S. Miller 
1123c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
1124c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1125c1b4a7e6SDavid S. Miller 
1126c1b4a7e6SDavid S. Miller 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
1127c1b4a7e6SDavid S. Miller }
1128c1b4a7e6SDavid S. Miller 
1129fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1130c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
1131c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
1132c1b4a7e6SDavid S. Miller  */
1133c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1134c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
1135c1b4a7e6SDavid S. Miller {
1136c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1137c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
1138c1b4a7e6SDavid S. Miller 
1139846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
1140c1b4a7e6SDavid S. Miller 
1141c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1142c1b4a7e6SDavid S. Miller 		return 0;
1143c1b4a7e6SDavid S. Miller 
1144c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
1145c1b4a7e6SDavid S. Miller 	if (cwnd_quota &&
1146c1b4a7e6SDavid S. Miller 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
1147c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
1148c1b4a7e6SDavid S. Miller 
1149c1b4a7e6SDavid S. Miller 	return cwnd_quota;
1150c1b4a7e6SDavid S. Miller }
1151c1b4a7e6SDavid S. Miller 
11529e412ba7SIlpo Järvinen int tcp_may_send_now(struct sock *sk)
1153c1b4a7e6SDavid S. Miller {
11549e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
1155fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1156c1b4a7e6SDavid S. Miller 
1157c1b4a7e6SDavid S. Miller 	return (skb &&
1158c1b4a7e6SDavid S. Miller 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1159c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
11604e67d876SIlpo Järvinen 			      tp->nonagle : TCP_NAGLE_PUSH)));
1161c1b4a7e6SDavid S. Miller }
1162c1b4a7e6SDavid S. Miller 
1163c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1164c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1165c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1166c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1167c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1168c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1169c1b4a7e6SDavid S. Miller  */
1170846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1171c1b4a7e6SDavid S. Miller {
1172c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1173c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
1174c1b4a7e6SDavid S. Miller 	u16 flags;
1175c1b4a7e6SDavid S. Miller 
1176c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1177c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1178c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1179c1b4a7e6SDavid S. Miller 
1180df97c708SPavel Emelyanov 	buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
1181c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1182c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1183c1b4a7e6SDavid S. Miller 
1184b60b49eaSHerbert Xu 	sk_charge_skb(sk, buff);
1185b60b49eaSHerbert Xu 	buff->truesize += nlen;
1186c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1187c1b4a7e6SDavid S. Miller 
1188c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1189c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1190c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1191c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1192c1b4a7e6SDavid S. Miller 
1193c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
1194c1b4a7e6SDavid S. Miller 	flags = TCP_SKB_CB(skb)->flags;
1195c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1196c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->flags = flags;
1197c1b4a7e6SDavid S. Miller 
1198c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1199c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1200c1b4a7e6SDavid S. Miller 
120184fa7933SPatrick McHardy 	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1202c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1203c1b4a7e6SDavid S. Miller 
1204c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1205846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1206846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1207c1b4a7e6SDavid S. Miller 
1208c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1209c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
1210fe067e8aSDavid S. Miller 	tcp_insert_write_queue_after(skb, buff, sk);
1211c1b4a7e6SDavid S. Miller 
1212c1b4a7e6SDavid S. Miller 	return 0;
1213c1b4a7e6SDavid S. Miller }
1214c1b4a7e6SDavid S. Miller 
1215c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1216c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1217c1b4a7e6SDavid S. Miller  *
1218c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1219c1b4a7e6SDavid S. Miller  */
12209e412ba7SIlpo Järvinen static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1221c1b4a7e6SDavid S. Miller {
12229e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
12236687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1224c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1225c1b4a7e6SDavid S. Miller 
1226c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1227ae8064acSJohn Heffner 		goto send_now;
1228c1b4a7e6SDavid S. Miller 
12296687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1230ae8064acSJohn Heffner 		goto send_now;
1231ae8064acSJohn Heffner 
1232ae8064acSJohn Heffner 	/* Defer for less than two clock ticks. */
1233*bd515c3eSIlpo Järvinen 	if (tp->tso_deferred &&
1234*bd515c3eSIlpo Järvinen 	    ((jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1235ae8064acSJohn Heffner 		goto send_now;
1236908a75c1SDavid S. Miller 
1237c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1238c1b4a7e6SDavid S. Miller 
1239c1b4a7e6SDavid S. Miller 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1240c1b4a7e6SDavid S. Miller 	       (tp->snd_cwnd <= in_flight));
1241c1b4a7e6SDavid S. Miller 
1242c1b4a7e6SDavid S. Miller 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1243c1b4a7e6SDavid S. Miller 
1244c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1245c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1246c1b4a7e6SDavid S. Miller 
1247c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1248c1b4a7e6SDavid S. Miller 
1249ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1250ba244fe9SDavid S. Miller 	if (limit >= 65536)
1251ae8064acSJohn Heffner 		goto send_now;
1252ba244fe9SDavid S. Miller 
1253c1b4a7e6SDavid S. Miller 	if (sysctl_tcp_tso_win_divisor) {
1254c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1255c1b4a7e6SDavid S. Miller 
1256c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1257c1b4a7e6SDavid S. Miller 		 * just use it.
1258c1b4a7e6SDavid S. Miller 		 */
1259c1b4a7e6SDavid S. Miller 		chunk /= sysctl_tcp_tso_win_divisor;
1260c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1261ae8064acSJohn Heffner 			goto send_now;
1262c1b4a7e6SDavid S. Miller 	} else {
1263c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1264c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1265c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1266c1b4a7e6SDavid S. Miller 		 * then send now.
1267c1b4a7e6SDavid S. Miller 		 */
1268c1b4a7e6SDavid S. Miller 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1269ae8064acSJohn Heffner 			goto send_now;
1270c1b4a7e6SDavid S. Miller 	}
1271c1b4a7e6SDavid S. Miller 
1272c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
1273ae8064acSJohn Heffner 	tp->tso_deferred = 1 | (jiffies<<1);
1274ae8064acSJohn Heffner 
1275c1b4a7e6SDavid S. Miller 	return 1;
1276ae8064acSJohn Heffner 
1277ae8064acSJohn Heffner send_now:
1278ae8064acSJohn Heffner 	tp->tso_deferred = 0;
1279ae8064acSJohn Heffner 	return 0;
1280c1b4a7e6SDavid S. Miller }
1281c1b4a7e6SDavid S. Miller 
12825d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
12835d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
12845d424d5aSJohn Heffner  *         1 if a probe was sent,
12855d424d5aSJohn Heffner  *         -1 otherwise */
12865d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
12875d424d5aSJohn Heffner {
12885d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
12895d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
12905d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
12915d424d5aSJohn Heffner 	int len;
12925d424d5aSJohn Heffner 	int probe_size;
129391cc17c0SIlpo Järvinen 	int size_needed;
12945d424d5aSJohn Heffner 	int copy;
12955d424d5aSJohn Heffner 	int mss_now;
12965d424d5aSJohn Heffner 
12975d424d5aSJohn Heffner 	/* Not currently probing/verifying,
12985d424d5aSJohn Heffner 	 * not in recovery,
12995d424d5aSJohn Heffner 	 * have enough cwnd, and
13005d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
13015d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
13025d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
13035d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
13045d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
13055d424d5aSJohn Heffner 	    tp->rx_opt.eff_sacks)
13065d424d5aSJohn Heffner 		return -1;
13075d424d5aSJohn Heffner 
13085d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
13095d424d5aSJohn Heffner 	mss_now = tcp_current_mss(sk, 0);
13105d424d5aSJohn Heffner 	probe_size = 2*tp->mss_cache;
131191cc17c0SIlpo Järvinen 	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
13125d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
13135d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
13145d424d5aSJohn Heffner 		return -1;
13155d424d5aSJohn Heffner 	}
13165d424d5aSJohn Heffner 
13175d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
13187f9c33e5SIlpo Järvinen 	if (tp->write_seq - tp->snd_nxt < size_needed)
13195d424d5aSJohn Heffner 		return -1;
13205d424d5aSJohn Heffner 
132191cc17c0SIlpo Järvinen 	if (tp->snd_wnd < size_needed)
13225d424d5aSJohn Heffner 		return -1;
132391cc17c0SIlpo Järvinen 	if (after(tp->snd_nxt + size_needed, tp->snd_una + tp->snd_wnd))
13245d424d5aSJohn Heffner 		return 0;
13255d424d5aSJohn Heffner 
1326d67c58e9SIlpo Järvinen 	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1327d67c58e9SIlpo Järvinen 	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1328d67c58e9SIlpo Järvinen 		if (!tcp_packets_in_flight(tp))
13295d424d5aSJohn Heffner 			return -1;
13305d424d5aSJohn Heffner 		else
13315d424d5aSJohn Heffner 			return 0;
13325d424d5aSJohn Heffner 	}
13335d424d5aSJohn Heffner 
13345d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
13355d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
13365d424d5aSJohn Heffner 		return -1;
13375d424d5aSJohn Heffner 	sk_charge_skb(sk, nskb);
13385d424d5aSJohn Heffner 
1339fe067e8aSDavid S. Miller 	skb = tcp_send_head(sk);
13405d424d5aSJohn Heffner 
13415d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
13425d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
13435d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
13445d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
13455d424d5aSJohn Heffner 	nskb->csum = 0;
134684fa7933SPatrick McHardy 	nskb->ip_summed = skb->ip_summed;
13475d424d5aSJohn Heffner 
134850c4817eSIlpo Järvinen 	tcp_insert_write_queue_before(nskb, skb, sk);
134950c4817eSIlpo Järvinen 
13505d424d5aSJohn Heffner 	len = 0;
1351234b6860SIlpo Järvinen 	tcp_for_write_queue_from_safe(skb, next, sk) {
13525d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
13535d424d5aSJohn Heffner 		if (nskb->ip_summed)
13545d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
13555d424d5aSJohn Heffner 		else
13565d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
13575d424d5aSJohn Heffner 					 skb_put(nskb, copy), copy, nskb->csum);
13585d424d5aSJohn Heffner 
13595d424d5aSJohn Heffner 		if (skb->len <= copy) {
13605d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
13615d424d5aSJohn Heffner 			 * Throw it away. */
13625d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1363fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
13645d424d5aSJohn Heffner 			sk_stream_free_skb(sk, skb);
13655d424d5aSJohn Heffner 		} else {
13665d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
13675d424d5aSJohn Heffner 						   ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
13685d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
13695d424d5aSJohn Heffner 				skb_pull(skb, copy);
137084fa7933SPatrick McHardy 				if (skb->ip_summed != CHECKSUM_PARTIAL)
13715d424d5aSJohn Heffner 					skb->csum = csum_partial(skb->data, skb->len, 0);
13725d424d5aSJohn Heffner 			} else {
13735d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
13745d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
13755d424d5aSJohn Heffner 			}
13765d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
13775d424d5aSJohn Heffner 		}
13785d424d5aSJohn Heffner 
13795d424d5aSJohn Heffner 		len += copy;
1380234b6860SIlpo Järvinen 
1381234b6860SIlpo Järvinen 		if (len >= probe_size)
1382234b6860SIlpo Järvinen 			break;
13835d424d5aSJohn Heffner 	}
13845d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
13855d424d5aSJohn Heffner 
13865d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
13875d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
13885d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
13895d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
13905d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
13915d424d5aSJohn Heffner 		* effectively two packets. */
13925d424d5aSJohn Heffner 		tp->snd_cwnd--;
13939e412ba7SIlpo Järvinen 		update_send_head(sk, nskb);
13945d424d5aSJohn Heffner 
13955d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
13960e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
13970e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
13985d424d5aSJohn Heffner 
13995d424d5aSJohn Heffner 		return 1;
14005d424d5aSJohn Heffner 	}
14015d424d5aSJohn Heffner 
14025d424d5aSJohn Heffner 	return -1;
14035d424d5aSJohn Heffner }
14045d424d5aSJohn Heffner 
14055d424d5aSJohn Heffner 
14061da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
14071da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
14081da177e4SLinus Torvalds  * window for us.
14091da177e4SLinus Torvalds  *
14101da177e4SLinus Torvalds  * Returns 1, if no segments are in flight and we have queued segments, but
14111da177e4SLinus Torvalds  * cannot send anything now because of SWS or another problem.
14121da177e4SLinus Torvalds  */
1413a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
14141da177e4SLinus Torvalds {
14151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
141692df7b51SDavid S. Miller 	struct sk_buff *skb;
1417c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1418c1b4a7e6SDavid S. Miller 	int cwnd_quota;
14195d424d5aSJohn Heffner 	int result;
14201da177e4SLinus Torvalds 
14211da177e4SLinus Torvalds 	/* If we are closed, the bytes will have to remain here.
14221da177e4SLinus Torvalds 	 * In time closedown will finish, we empty the write queue and all
14231da177e4SLinus Torvalds 	 * will be happy.
14241da177e4SLinus Torvalds 	 */
142592df7b51SDavid S. Miller 	if (unlikely(sk->sk_state == TCP_CLOSE))
142692df7b51SDavid S. Miller 		return 0;
142792df7b51SDavid S. Miller 
1428c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
14295d424d5aSJohn Heffner 
14305d424d5aSJohn Heffner 	/* Do MTU probing. */
14315d424d5aSJohn Heffner 	if ((result = tcp_mtu_probe(sk)) == 0) {
14325d424d5aSJohn Heffner 		return 0;
14335d424d5aSJohn Heffner 	} else if (result > 0) {
14345d424d5aSJohn Heffner 		sent_pkts = 1;
14355d424d5aSJohn Heffner 	}
14365d424d5aSJohn Heffner 
1437fe067e8aSDavid S. Miller 	while ((skb = tcp_send_head(sk))) {
1438c8ac3774SHerbert Xu 		unsigned int limit;
1439c8ac3774SHerbert Xu 
1440b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1441c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1442c1b4a7e6SDavid S. Miller 
1443b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
1444b68e9f85SHerbert Xu 		if (!cwnd_quota)
1445b68e9f85SHerbert Xu 			break;
1446b68e9f85SHerbert Xu 
1447b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1448b68e9f85SHerbert Xu 			break;
1449b68e9f85SHerbert Xu 
1450c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1451aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1452aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1453aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1454aa93466bSDavid S. Miller 				break;
1455c1b4a7e6SDavid S. Miller 		} else {
14569e412ba7SIlpo Järvinen 			if (tcp_tso_should_defer(sk, skb))
1457aa93466bSDavid S. Miller 				break;
1458c1b4a7e6SDavid S. Miller 		}
1459aa93466bSDavid S. Miller 
1460c8ac3774SHerbert Xu 		limit = mss_now;
1461c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1462c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1463c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1464c1b4a7e6SDavid S. Miller 
1465c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1466c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1467c1b4a7e6SDavid S. Miller 
1468c1b4a7e6SDavid S. Miller 				if (trim)
1469c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1470c1b4a7e6SDavid S. Miller 			}
1471c1b4a7e6SDavid S. Miller 		}
1472c8ac3774SHerbert Xu 
1473c8ac3774SHerbert Xu 		if (skb->len > limit &&
1474c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
14751da177e4SLinus Torvalds 			break;
14761da177e4SLinus Torvalds 
14771da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1478c1b4a7e6SDavid S. Miller 
1479dfb4b9dcSDavid S. Miller 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
14801da177e4SLinus Torvalds 			break;
14811da177e4SLinus Torvalds 
14821da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
14831da177e4SLinus Torvalds 		 * This call will increment packets_out.
14841da177e4SLinus Torvalds 		 */
14859e412ba7SIlpo Järvinen 		update_send_head(sk, skb);
14861da177e4SLinus Torvalds 
14871da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1488aa93466bSDavid S. Miller 		sent_pkts++;
14891da177e4SLinus Torvalds 	}
14901da177e4SLinus Torvalds 
1491aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
14929e412ba7SIlpo Järvinen 		tcp_cwnd_validate(sk);
14931da177e4SLinus Torvalds 		return 0;
14941da177e4SLinus Torvalds 	}
1495fe067e8aSDavid S. Miller 	return !tp->packets_out && tcp_send_head(sk);
14961da177e4SLinus Torvalds }
14971da177e4SLinus Torvalds 
1498a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
1499a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
1500a762a980SDavid S. Miller  * The socket must be locked by the caller.
1501a762a980SDavid S. Miller  */
15029e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
15039e412ba7SIlpo Järvinen 			       int nonagle)
1504a762a980SDavid S. Miller {
1505fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1506a762a980SDavid S. Miller 
1507a762a980SDavid S. Miller 	if (skb) {
150855c97f3eSDavid S. Miller 		if (tcp_write_xmit(sk, cur_mss, nonagle))
15099e412ba7SIlpo Järvinen 			tcp_check_probe_timer(sk);
1510a762a980SDavid S. Miller 	}
1511a762a980SDavid S. Miller }
1512a762a980SDavid S. Miller 
1513c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
1514c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
1515c1b4a7e6SDavid S. Miller  */
1516c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
1517c1b4a7e6SDavid S. Miller {
1518c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1519fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_send_head(sk);
1520c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, cwnd_quota;
1521c1b4a7e6SDavid S. Miller 
1522c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
1523c1b4a7e6SDavid S. Miller 
1524846998aeSDavid S. Miller 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1525c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1526c1b4a7e6SDavid S. Miller 
1527c1b4a7e6SDavid S. Miller 	if (likely(cwnd_quota)) {
1528c8ac3774SHerbert Xu 		unsigned int limit;
1529c8ac3774SHerbert Xu 
1530c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1531c1b4a7e6SDavid S. Miller 
1532c8ac3774SHerbert Xu 		limit = mss_now;
1533c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1534c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1535c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1536c1b4a7e6SDavid S. Miller 
1537c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1538c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1539c1b4a7e6SDavid S. Miller 
1540c1b4a7e6SDavid S. Miller 				if (trim)
1541c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1542c1b4a7e6SDavid S. Miller 			}
1543c1b4a7e6SDavid S. Miller 		}
1544c8ac3774SHerbert Xu 
1545c8ac3774SHerbert Xu 		if (skb->len > limit &&
1546c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1547c1b4a7e6SDavid S. Miller 			return;
1548c1b4a7e6SDavid S. Miller 
1549c1b4a7e6SDavid S. Miller 		/* Send it out now. */
1550c1b4a7e6SDavid S. Miller 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1551c1b4a7e6SDavid S. Miller 
1552dfb4b9dcSDavid S. Miller 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
15539e412ba7SIlpo Järvinen 			update_send_head(sk, skb);
15549e412ba7SIlpo Järvinen 			tcp_cwnd_validate(sk);
1555c1b4a7e6SDavid S. Miller 			return;
1556c1b4a7e6SDavid S. Miller 		}
1557c1b4a7e6SDavid S. Miller 	}
1558c1b4a7e6SDavid S. Miller }
1559c1b4a7e6SDavid S. Miller 
15601da177e4SLinus Torvalds /* This function returns the amount that we can raise the
15611da177e4SLinus Torvalds  * usable window based on the following constraints
15621da177e4SLinus Torvalds  *
15631da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
15641da177e4SLinus Torvalds  * 2. We limit memory per socket
15651da177e4SLinus Torvalds  *
15661da177e4SLinus Torvalds  * RFC 1122:
15671da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
15681da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
15691da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
15701da177e4SLinus Torvalds  *
15711da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
15721da177e4SLinus Torvalds  * it at least MSS bytes.
15731da177e4SLinus Torvalds  *
15741da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
15751da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
15761da177e4SLinus Torvalds  *
15771da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
15781da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
15791da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
15801da177e4SLinus Torvalds  * window to always advance by a single byte.
15811da177e4SLinus Torvalds  *
15821da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
15831da177e4SLinus Torvalds  * then this will not be a problem.
15841da177e4SLinus Torvalds  *
15851da177e4SLinus Torvalds  * BSD seems to make the following compromise:
15861da177e4SLinus Torvalds  *
15871da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
15881da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
15891da177e4SLinus Torvalds  *	then set the window to 0.
15901da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
15911da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
15921da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
15931da177e4SLinus Torvalds  *
15941da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
15951da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
15961da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
15971da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
15981da177e4SLinus Torvalds  * because the pipeline is full.
15991da177e4SLinus Torvalds  *
16001da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
16011da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
16021da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
16031da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
16041da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
16051da177e4SLinus Torvalds  *
16061da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
16071da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
16081da177e4SLinus Torvalds  *
16091da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
16101da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
16111da177e4SLinus Torvalds  */
16121da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
16131da177e4SLinus Torvalds {
1614463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
16151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1616caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
16171da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
16181da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
16191da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
16201da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
16211da177e4SLinus Torvalds 	 */
1622463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
16231da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
16241da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
16251da177e4SLinus Torvalds 	int window;
16261da177e4SLinus Torvalds 
16271da177e4SLinus Torvalds 	if (mss > full_space)
16281da177e4SLinus Torvalds 		mss = full_space;
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds 	if (free_space < full_space/2) {
1631463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds 		if (tcp_memory_pressure)
16341da177e4SLinus Torvalds 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds 		if (free_space < mss)
16371da177e4SLinus Torvalds 			return 0;
16381da177e4SLinus Torvalds 	}
16391da177e4SLinus Torvalds 
16401da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
16411da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
16421da177e4SLinus Torvalds 
16431da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
16441da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
16451da177e4SLinus Torvalds 	 */
16461da177e4SLinus Torvalds 	window = tp->rcv_wnd;
16471da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
16481da177e4SLinus Torvalds 		window = free_space;
16491da177e4SLinus Torvalds 
16501da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
16511da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
16521da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
16531da177e4SLinus Torvalds 		 */
16541da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
16551da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
16561da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
16571da177e4SLinus Torvalds 	} else {
16581da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
16591da177e4SLinus Torvalds 		 * Window clamp already applied above.
16601da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
16611da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
16621da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
16631da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
16641da177e4SLinus Torvalds 		 * is too small.
16651da177e4SLinus Torvalds 		 */
16661da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
16671da177e4SLinus Torvalds 			window = (free_space/mss)*mss;
166884565070SJohn Heffner 		else if (mss == full_space &&
166984565070SJohn Heffner 			 free_space > window + full_space/2)
167084565070SJohn Heffner 			window = free_space;
16711da177e4SLinus Torvalds 	}
16721da177e4SLinus Torvalds 
16731da177e4SLinus Torvalds 	return window;
16741da177e4SLinus Torvalds }
16751da177e4SLinus Torvalds 
16761da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */
16771da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
16781da177e4SLinus Torvalds {
16791da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1680fe067e8aSDavid S. Miller 	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
16811da177e4SLinus Torvalds 
16821da177e4SLinus Torvalds 	/* The first test we must make is that neither of these two
16831da177e4SLinus Torvalds 	 * SKB's are still referenced by someone else.
16841da177e4SLinus Torvalds 	 */
16851da177e4SLinus Torvalds 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
16861da177e4SLinus Torvalds 		int skb_size = skb->len, next_skb_size = next_skb->len;
16871da177e4SLinus Torvalds 		u16 flags = TCP_SKB_CB(skb)->flags;
16881da177e4SLinus Torvalds 
16891da177e4SLinus Torvalds 		/* Also punt if next skb has been SACK'd. */
16901da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
16911da177e4SLinus Torvalds 			return;
16921da177e4SLinus Torvalds 
16931da177e4SLinus Torvalds 		/* Next skb is out of window. */
16941da177e4SLinus Torvalds 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
16951da177e4SLinus Torvalds 			return;
16961da177e4SLinus Torvalds 
16971da177e4SLinus Torvalds 		/* Punt if not enough space exists in the first SKB for
16981da177e4SLinus Torvalds 		 * the data in the second, or the total combined payload
16991da177e4SLinus Torvalds 		 * would exceed the MSS.
17001da177e4SLinus Torvalds 		 */
17011da177e4SLinus Torvalds 		if ((next_skb_size > skb_tailroom(skb)) ||
17021da177e4SLinus Torvalds 		    ((skb_size + next_skb_size) > mss_now))
17031da177e4SLinus Torvalds 			return;
17041da177e4SLinus Torvalds 
17051da177e4SLinus Torvalds 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
17061da177e4SLinus Torvalds 		       tcp_skb_pcount(next_skb) != 1);
17071da177e4SLinus Torvalds 
17086859d494SIlpo Järvinen 		tcp_highest_sack_combine(sk, next_skb, skb);
1709a6963a6bSIlpo Järvinen 
17101da177e4SLinus Torvalds 		/* Ok.	We will be able to collapse the packet. */
1711fe067e8aSDavid S. Miller 		tcp_unlink_write_queue(next_skb, sk);
17121da177e4SLinus Torvalds 
17131a4e2d09SArnaldo Carvalho de Melo 		skb_copy_from_linear_data(next_skb,
17141a4e2d09SArnaldo Carvalho de Melo 					  skb_put(skb, next_skb_size),
17151a4e2d09SArnaldo Carvalho de Melo 					  next_skb_size);
17161da177e4SLinus Torvalds 
171752d570aaSJarek Poplawski 		if (next_skb->ip_summed == CHECKSUM_PARTIAL)
171852d570aaSJarek Poplawski 			skb->ip_summed = CHECKSUM_PARTIAL;
17191da177e4SLinus Torvalds 
172084fa7933SPatrick McHardy 		if (skb->ip_summed != CHECKSUM_PARTIAL)
17211da177e4SLinus Torvalds 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
17221da177e4SLinus Torvalds 
17231da177e4SLinus Torvalds 		/* Update sequence range on original skb. */
17241da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds 		/* Merge over control information. */
17271da177e4SLinus Torvalds 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
17281da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = flags;
17291da177e4SLinus Torvalds 
17301da177e4SLinus Torvalds 		/* All done, get rid of second SKB and account for it so
17311da177e4SLinus Torvalds 		 * packet counting does not break.
17321da177e4SLinus Torvalds 		 */
17331da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
17341da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
17351da177e4SLinus Torvalds 			tp->retrans_out -= tcp_skb_pcount(next_skb);
1736b5860bbaSIlpo Järvinen 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
17371da177e4SLinus Torvalds 			tp->lost_out -= tcp_skb_pcount(next_skb);
17381da177e4SLinus Torvalds 		/* Reno case is special. Sigh... */
1739e60402d0SIlpo Järvinen 		if (tcp_is_reno(tp) && tp->sacked_out)
17401da177e4SLinus Torvalds 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
17411da177e4SLinus Torvalds 
1742a47e5a98SIlpo Järvinen 		tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
1743e9144bd8SIlpo Järvinen 		tp->packets_out -= tcp_skb_pcount(next_skb);
1744b7689205SIlpo Järvinen 
1745b7689205SIlpo Järvinen 		/* changed transmit queue under us so clear hints */
1746b7689205SIlpo Järvinen 		tcp_clear_retrans_hints_partial(tp);
1747b7689205SIlpo Järvinen 
17481da177e4SLinus Torvalds 		sk_stream_free_skb(sk, next_skb);
17491da177e4SLinus Torvalds 	}
17501da177e4SLinus Torvalds }
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in
17531da177e4SLinus Torvalds  * tcp_timer. This is used for path mtu discovery.
17541da177e4SLinus Torvalds  * The socket is already locked here.
17551da177e4SLinus Torvalds  */
17561da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk)
17571da177e4SLinus Torvalds {
17586687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
17591da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17601da177e4SLinus Torvalds 	struct sk_buff *skb;
17611da177e4SLinus Torvalds 	unsigned int mss = tcp_current_mss(sk, 0);
17621da177e4SLinus Torvalds 	int lost = 0;
17631da177e4SLinus Torvalds 
1764fe067e8aSDavid S. Miller 	tcp_for_write_queue(skb, sk) {
1765fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
1766fe067e8aSDavid S. Miller 			break;
17671da177e4SLinus Torvalds 		if (skb->len > mss &&
17681da177e4SLinus Torvalds 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
17691da177e4SLinus Torvalds 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
17701da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
17711da177e4SLinus Torvalds 				tp->retrans_out -= tcp_skb_pcount(skb);
17721da177e4SLinus Torvalds 			}
17731da177e4SLinus Torvalds 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
17741da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
17751da177e4SLinus Torvalds 				tp->lost_out += tcp_skb_pcount(skb);
17761da177e4SLinus Torvalds 				lost = 1;
17771da177e4SLinus Torvalds 			}
17781da177e4SLinus Torvalds 		}
17791da177e4SLinus Torvalds 	}
17801da177e4SLinus Torvalds 
17815af4ec23SIlpo Järvinen 	tcp_clear_all_retrans_hints(tp);
17826a438bbeSStephen Hemminger 
17831da177e4SLinus Torvalds 	if (!lost)
17841da177e4SLinus Torvalds 		return;
17851da177e4SLinus Torvalds 
1786005903bcSIlpo Järvinen 	tcp_verify_left_out(tp);
17871da177e4SLinus Torvalds 
17881da177e4SLinus Torvalds 	/* Don't muck with the congestion window here.
17891da177e4SLinus Torvalds 	 * Reason is that we do not increase amount of _data_
17901da177e4SLinus Torvalds 	 * in network, but units changed and effective
17911da177e4SLinus Torvalds 	 * cwnd/ssthresh really reduced now.
17921da177e4SLinus Torvalds 	 */
17936687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
17941da177e4SLinus Torvalds 		tp->high_seq = tp->snd_nxt;
17956687e988SArnaldo Carvalho de Melo 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
17961da177e4SLinus Torvalds 		tp->prior_ssthresh = 0;
17971da177e4SLinus Torvalds 		tp->undo_marker = 0;
17986687e988SArnaldo Carvalho de Melo 		tcp_set_ca_state(sk, TCP_CA_Loss);
17991da177e4SLinus Torvalds 	}
18001da177e4SLinus Torvalds 	tcp_xmit_retransmit_queue(sk);
18011da177e4SLinus Torvalds }
18021da177e4SLinus Torvalds 
18031da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
18041da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
18051da177e4SLinus Torvalds  * error occurred which prevented the send.
18061da177e4SLinus Torvalds  */
18071da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
18081da177e4SLinus Torvalds {
18091da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18105d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
18111da177e4SLinus Torvalds 	unsigned int cur_mss = tcp_current_mss(sk, 0);
18121da177e4SLinus Torvalds 	int err;
18131da177e4SLinus Torvalds 
18145d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
18155d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
18165d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
18175d424d5aSJohn Heffner 	}
18185d424d5aSJohn Heffner 
18191da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
1820caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
18211da177e4SLinus Torvalds 	 */
18221da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
18231da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
18241da177e4SLinus Torvalds 		return -EAGAIN;
18251da177e4SLinus Torvalds 
18261da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
18271da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
18281da177e4SLinus Torvalds 			BUG();
18291da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
18301da177e4SLinus Torvalds 			return -ENOMEM;
18311da177e4SLinus Torvalds 	}
18321da177e4SLinus Torvalds 
18331da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
18341da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
18351da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
18361da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
18371da177e4SLinus Torvalds 	 */
18381da177e4SLinus Torvalds 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
18391da177e4SLinus Torvalds 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
18401da177e4SLinus Torvalds 		return -EAGAIN;
18411da177e4SLinus Torvalds 
18421da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
1843846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
18441da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
18451da177e4SLinus Torvalds 	}
18461da177e4SLinus Torvalds 
18471da177e4SLinus Torvalds 	/* Collapse two adjacent packets if worthwhile and we can. */
18481da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
18491da177e4SLinus Torvalds 	    (skb->len < (cur_mss >> 1)) &&
1850fe067e8aSDavid S. Miller 	    (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1851fe067e8aSDavid S. Miller 	    (!tcp_skb_is_last(sk, skb)) &&
1852fe067e8aSDavid S. Miller 	    (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1853fe067e8aSDavid S. Miller 	    (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
18541da177e4SLinus Torvalds 	    (sysctl_tcp_retrans_collapse != 0))
18551da177e4SLinus Torvalds 		tcp_retrans_try_collapse(sk, skb, cur_mss);
18561da177e4SLinus Torvalds 
18578292a17aSArnaldo Carvalho de Melo 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
18581da177e4SLinus Torvalds 		return -EHOSTUNREACH; /* Routing failure or similar. */
18591da177e4SLinus Torvalds 
18601da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
18611da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
18621da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
18631da177e4SLinus Torvalds 	 */
18641da177e4SLinus Torvalds 	if (skb->len > 0 &&
18651da177e4SLinus Torvalds 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
18661da177e4SLinus Torvalds 	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
18671da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
18681da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
18697967168cSHerbert Xu 			skb_shinfo(skb)->gso_segs = 1;
18707967168cSHerbert Xu 			skb_shinfo(skb)->gso_size = 0;
18717967168cSHerbert Xu 			skb_shinfo(skb)->gso_type = 0;
18721da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
18731da177e4SLinus Torvalds 			skb->csum = 0;
18741da177e4SLinus Torvalds 		}
18751da177e4SLinus Torvalds 	}
18761da177e4SLinus Torvalds 
18771da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
18781da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
18791da177e4SLinus Torvalds 	 */
18801da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
18811da177e4SLinus Torvalds 
1882dfb4b9dcSDavid S. Miller 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
18831da177e4SLinus Torvalds 
18841da177e4SLinus Torvalds 	if (err == 0) {
18851da177e4SLinus Torvalds 		/* Update global TCP statistics. */
18861da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
18871da177e4SLinus Torvalds 
18881da177e4SLinus Torvalds 		tp->total_retrans++;
18891da177e4SLinus Torvalds 
18901da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
18911da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
18921da177e4SLinus Torvalds 			if (net_ratelimit())
18931da177e4SLinus Torvalds 				printk(KERN_DEBUG "retrans_out leaked.\n");
18941da177e4SLinus Torvalds 		}
18951da177e4SLinus Torvalds #endif
1896b08d6cb2SIlpo Järvinen 		if (!tp->retrans_out)
1897b08d6cb2SIlpo Järvinen 			tp->lost_retrans_low = tp->snd_nxt;
18981da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
18991da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
19001da177e4SLinus Torvalds 
19011da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
19021da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
19031da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
19041da177e4SLinus Torvalds 
19051da177e4SLinus Torvalds 		tp->undo_retrans++;
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
19081da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
19091da177e4SLinus Torvalds 		 */
19101da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
19111da177e4SLinus Torvalds 	}
19121da177e4SLinus Torvalds 	return err;
19131da177e4SLinus Torvalds }
19141da177e4SLinus Torvalds 
19151da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
19161da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
19171da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
19181da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
19191da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
19201da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
19211da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
19221da177e4SLinus Torvalds  */
19231da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
19241da177e4SLinus Torvalds {
19256687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
19261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
19271da177e4SLinus Torvalds 	struct sk_buff *skb;
19286a438bbeSStephen Hemminger 	int packet_cnt;
19296a438bbeSStephen Hemminger 
19306a438bbeSStephen Hemminger 	if (tp->retransmit_skb_hint) {
19316a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
19326a438bbeSStephen Hemminger 		packet_cnt = tp->retransmit_cnt_hint;
19336a438bbeSStephen Hemminger 	}else{
1934fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
19356a438bbeSStephen Hemminger 		packet_cnt = 0;
19366a438bbeSStephen Hemminger 	}
19371da177e4SLinus Torvalds 
19381da177e4SLinus Torvalds 	/* First pass: retransmit lost packets. */
19396a438bbeSStephen Hemminger 	if (tp->lost_out) {
1940fe067e8aSDavid S. Miller 		tcp_for_write_queue_from(skb, sk) {
19411da177e4SLinus Torvalds 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
19421da177e4SLinus Torvalds 
1943fe067e8aSDavid S. Miller 			if (skb == tcp_send_head(sk))
1944fe067e8aSDavid S. Miller 				break;
19456a438bbeSStephen Hemminger 			/* we could do better than to assign each time */
19466a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
19476a438bbeSStephen Hemminger 			tp->retransmit_cnt_hint = packet_cnt;
19486a438bbeSStephen Hemminger 
19491da177e4SLinus Torvalds 			/* Assume this retransmit will generate
19501da177e4SLinus Torvalds 			 * only one packet for congestion window
19511da177e4SLinus Torvalds 			 * calculation purposes.  This works because
19521da177e4SLinus Torvalds 			 * tcp_retransmit_skb() will chop up the
19531da177e4SLinus Torvalds 			 * packet to be MSS sized and all the
19541da177e4SLinus Torvalds 			 * packet counting works out.
19551da177e4SLinus Torvalds 			 */
19561da177e4SLinus Torvalds 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
19571da177e4SLinus Torvalds 				return;
19581da177e4SLinus Torvalds 
19591da177e4SLinus Torvalds 			if (sacked & TCPCB_LOST) {
19601da177e4SLinus Torvalds 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
19616a438bbeSStephen Hemminger 					if (tcp_retransmit_skb(sk, skb)) {
19626a438bbeSStephen Hemminger 						tp->retransmit_skb_hint = NULL;
19631da177e4SLinus Torvalds 						return;
19646a438bbeSStephen Hemminger 					}
19656687e988SArnaldo Carvalho de Melo 					if (icsk->icsk_ca_state != TCP_CA_Loss)
19661da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
19671da177e4SLinus Torvalds 					else
19681da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
19691da177e4SLinus Torvalds 
1970fe067e8aSDavid S. Miller 					if (skb == tcp_write_queue_head(sk))
1971463c84b9SArnaldo Carvalho de Melo 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
19723f421baaSArnaldo Carvalho de Melo 									  inet_csk(sk)->icsk_rto,
19733f421baaSArnaldo Carvalho de Melo 									  TCP_RTO_MAX);
19741da177e4SLinus Torvalds 				}
19751da177e4SLinus Torvalds 
19766a438bbeSStephen Hemminger 				packet_cnt += tcp_skb_pcount(skb);
19776a438bbeSStephen Hemminger 				if (packet_cnt >= tp->lost_out)
19781da177e4SLinus Torvalds 					break;
19791da177e4SLinus Torvalds 			}
19801da177e4SLinus Torvalds 		}
19811da177e4SLinus Torvalds 	}
19821da177e4SLinus Torvalds 
19831da177e4SLinus Torvalds 	/* OK, demanded retransmission is finished. */
19841da177e4SLinus Torvalds 
19851da177e4SLinus Torvalds 	/* Forward retransmissions are possible only during Recovery. */
19866687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
19871da177e4SLinus Torvalds 		return;
19881da177e4SLinus Torvalds 
19891da177e4SLinus Torvalds 	/* No forward retransmissions in Reno are possible. */
1990e60402d0SIlpo Järvinen 	if (tcp_is_reno(tp))
19911da177e4SLinus Torvalds 		return;
19921da177e4SLinus Torvalds 
19931da177e4SLinus Torvalds 	/* Yeah, we have to make difficult choice between forward transmission
19941da177e4SLinus Torvalds 	 * and retransmission... Both ways have their merits...
19951da177e4SLinus Torvalds 	 *
19961da177e4SLinus Torvalds 	 * For now we do not retransmit anything, while we have some new
1997539d243fSIlpo Järvinen 	 * segments to send. In the other cases, follow rule 3 for
1998539d243fSIlpo Järvinen 	 * NextSeg() specified in RFC3517.
19991da177e4SLinus Torvalds 	 */
20001da177e4SLinus Torvalds 
20019e412ba7SIlpo Järvinen 	if (tcp_may_send_now(sk))
20021da177e4SLinus Torvalds 		return;
20031da177e4SLinus Torvalds 
2004539d243fSIlpo Järvinen 	/* If nothing is SACKed, highest_sack in the loop won't be valid */
2005539d243fSIlpo Järvinen 	if (!tp->sacked_out)
2006539d243fSIlpo Järvinen 		return;
2007539d243fSIlpo Järvinen 
2008539d243fSIlpo Järvinen 	if (tp->forward_skb_hint)
20096a438bbeSStephen Hemminger 		skb = tp->forward_skb_hint;
2010539d243fSIlpo Järvinen 	else
2011fe067e8aSDavid S. Miller 		skb = tcp_write_queue_head(sk);
20121da177e4SLinus Torvalds 
2013fe067e8aSDavid S. Miller 	tcp_for_write_queue_from(skb, sk) {
2014fe067e8aSDavid S. Miller 		if (skb == tcp_send_head(sk))
2015fe067e8aSDavid S. Miller 			break;
20166a438bbeSStephen Hemminger 		tp->forward_skb_hint = skb;
20176a438bbeSStephen Hemminger 
20186859d494SIlpo Järvinen 		if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
20191da177e4SLinus Torvalds 			break;
20201da177e4SLinus Torvalds 
20211da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
20221da177e4SLinus Torvalds 			break;
20231da177e4SLinus Torvalds 
20241da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
20251da177e4SLinus Torvalds 			continue;
20261da177e4SLinus Torvalds 
20271da177e4SLinus Torvalds 		/* Ok, retransmit it. */
20286a438bbeSStephen Hemminger 		if (tcp_retransmit_skb(sk, skb)) {
20296a438bbeSStephen Hemminger 			tp->forward_skb_hint = NULL;
20301da177e4SLinus Torvalds 			break;
20316a438bbeSStephen Hemminger 		}
20321da177e4SLinus Torvalds 
2033fe067e8aSDavid S. Miller 		if (skb == tcp_write_queue_head(sk))
20343f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
20353f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
20363f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
20371da177e4SLinus Torvalds 
20381da177e4SLinus Torvalds 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
20391da177e4SLinus Torvalds 	}
20401da177e4SLinus Torvalds }
20411da177e4SLinus Torvalds 
20421da177e4SLinus Torvalds 
20431da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
20441da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
20451da177e4SLinus Torvalds  */
20461da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
20471da177e4SLinus Torvalds {
20481da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2049fe067e8aSDavid S. Miller 	struct sk_buff *skb = tcp_write_queue_tail(sk);
20501da177e4SLinus Torvalds 	int mss_now;
20511da177e4SLinus Torvalds 
20521da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
20531da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
20541da177e4SLinus Torvalds 	 * and IP options.
20551da177e4SLinus Torvalds 	 */
20561da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, 1);
20571da177e4SLinus Torvalds 
2058fe067e8aSDavid S. Miller 	if (tcp_send_head(sk) != NULL) {
20591da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
20601da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
20611da177e4SLinus Torvalds 		tp->write_seq++;
20621da177e4SLinus Torvalds 	} else {
20631da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
20641da177e4SLinus Torvalds 		for (;;) {
2065d179cd12SDavid S. Miller 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
20661da177e4SLinus Torvalds 			if (skb)
20671da177e4SLinus Torvalds 				break;
20681da177e4SLinus Torvalds 			yield();
20691da177e4SLinus Torvalds 		}
20701da177e4SLinus Torvalds 
20711da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
20721da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
20731da177e4SLinus Torvalds 		skb->csum = 0;
20741da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
20751da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked = 0;
20767967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 1;
20777967168cSHerbert Xu 		skb_shinfo(skb)->gso_size = 0;
20787967168cSHerbert Xu 		skb_shinfo(skb)->gso_type = 0;
20791da177e4SLinus Torvalds 
20801da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
20811da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->seq = tp->write_seq;
20821da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
20831da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
20841da177e4SLinus Torvalds 	}
20859e412ba7SIlpo Järvinen 	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
20861da177e4SLinus Torvalds }
20871da177e4SLinus Torvalds 
20881da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
20891da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
20901da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
209165bb723cSGerrit Renker  * by RFC 2525, section 2.17.  -DaveM
20921da177e4SLinus Torvalds  */
2093dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
20941da177e4SLinus Torvalds {
20951da177e4SLinus Torvalds 	struct sk_buff *skb;
20961da177e4SLinus Torvalds 
20971da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
20981da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
20991da177e4SLinus Torvalds 	if (!skb) {
21001da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
21011da177e4SLinus Torvalds 		return;
21021da177e4SLinus Torvalds 	}
21031da177e4SLinus Torvalds 
21041da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
21051da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
21061da177e4SLinus Torvalds 	skb->csum = 0;
21071da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
21081da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
21097967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
21107967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
21117967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
21121da177e4SLinus Torvalds 
21131da177e4SLinus Torvalds 	/* Send it off. */
21149e412ba7SIlpo Järvinen 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
21151da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
21161da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2117dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
21181da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
21191da177e4SLinus Torvalds }
21201da177e4SLinus Torvalds 
21211da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent
21221da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
21231da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
21241da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
21251da177e4SLinus Torvalds  */
21261da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
21271da177e4SLinus Torvalds {
21281da177e4SLinus Torvalds 	struct sk_buff* skb;
21291da177e4SLinus Torvalds 
2130fe067e8aSDavid S. Miller 	skb = tcp_write_queue_head(sk);
21311da177e4SLinus Torvalds 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
21321da177e4SLinus Torvalds 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
21331da177e4SLinus Torvalds 		return -EFAULT;
21341da177e4SLinus Torvalds 	}
21351da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
21361da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
21371da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
21381da177e4SLinus Torvalds 			if (nskb == NULL)
21391da177e4SLinus Torvalds 				return -ENOMEM;
2140fe067e8aSDavid S. Miller 			tcp_unlink_write_queue(skb, sk);
21411da177e4SLinus Torvalds 			skb_header_release(nskb);
2142fe067e8aSDavid S. Miller 			__tcp_add_write_queue_head(sk, nskb);
21431da177e4SLinus Torvalds 			sk_stream_free_skb(sk, skb);
21441da177e4SLinus Torvalds 			sk_charge_skb(sk, nskb);
21451da177e4SLinus Torvalds 			skb = nskb;
21461da177e4SLinus Torvalds 		}
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
21491da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
21501da177e4SLinus Torvalds 	}
21511da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2152dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
21531da177e4SLinus Torvalds }
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds /*
21561da177e4SLinus Torvalds  * Prepare a SYN-ACK.
21571da177e4SLinus Torvalds  */
21581da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
215960236fddSArnaldo Carvalho de Melo 				 struct request_sock *req)
21601da177e4SLinus Torvalds {
21612e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
21621da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
21631da177e4SLinus Torvalds 	struct tcphdr *th;
21641da177e4SLinus Torvalds 	int tcp_header_size;
21651da177e4SLinus Torvalds 	struct sk_buff *skb;
2166cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2167cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_key *md5;
2168cfb6eeb4SYOSHIFUJI Hideaki 	__u8 *md5_hash_location;
2169cfb6eeb4SYOSHIFUJI Hideaki #endif
21701da177e4SLinus Torvalds 
21711da177e4SLinus Torvalds 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
21721da177e4SLinus Torvalds 	if (skb == NULL)
21731da177e4SLinus Torvalds 		return NULL;
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds 	/* Reserve space for headers. */
21761da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
21771da177e4SLinus Torvalds 
21781da177e4SLinus Torvalds 	skb->dst = dst_clone(dst);
21791da177e4SLinus Torvalds 
21801da177e4SLinus Torvalds 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
21812e6599cbSArnaldo Carvalho de Melo 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
21822e6599cbSArnaldo Carvalho de Melo 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
21831da177e4SLinus Torvalds 			   /* SACK_PERM is in the place of NOP NOP of TS */
21842e6599cbSArnaldo Carvalho de Melo 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
2185cfb6eeb4SYOSHIFUJI Hideaki 
2186cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2187cfb6eeb4SYOSHIFUJI Hideaki 	/* Are we doing MD5 on this segment? If so - make room for it */
2188cfb6eeb4SYOSHIFUJI Hideaki 	md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
2189cfb6eeb4SYOSHIFUJI Hideaki 	if (md5)
2190cfb6eeb4SYOSHIFUJI Hideaki 		tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
2191cfb6eeb4SYOSHIFUJI Hideaki #endif
2192aa8223c7SArnaldo Carvalho de Melo 	skb_push(skb, tcp_header_size);
2193aa8223c7SArnaldo Carvalho de Melo 	skb_reset_transport_header(skb);
21941da177e4SLinus Torvalds 
2195aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
21961da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
21971da177e4SLinus Torvalds 	th->syn = 1;
21981da177e4SLinus Torvalds 	th->ack = 1;
21991da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
22001da177e4SLinus Torvalds 	th->source = inet_sk(sk)->sport;
22012e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
22022e6599cbSArnaldo Carvalho de Melo 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
22031da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
22041da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
22057967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
22067967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
22077967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
22081da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
22092e6599cbSArnaldo Carvalho de Melo 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
22101da177e4SLinus Torvalds 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
22111da177e4SLinus Torvalds 		__u8 rcv_wscale;
22121da177e4SLinus Torvalds 		/* Set this up on the first call only */
22131da177e4SLinus Torvalds 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
22141da177e4SLinus Torvalds 		/* tcp_full_space because it is guaranteed to be the first packet */
22151da177e4SLinus Torvalds 		tcp_select_initial_window(tcp_full_space(sk),
22162e6599cbSArnaldo Carvalho de Melo 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
22171da177e4SLinus Torvalds 			&req->rcv_wnd,
22181da177e4SLinus Torvalds 			&req->window_clamp,
22192e6599cbSArnaldo Carvalho de Melo 			ireq->wscale_ok,
22201da177e4SLinus Torvalds 			&rcv_wscale);
22212e6599cbSArnaldo Carvalho de Melo 		ireq->rcv_wscale = rcv_wscale;
22221da177e4SLinus Torvalds 	}
22231da177e4SLinus Torvalds 
22241da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2225600ff0c2SIlpo Järvinen 	th->window = htons(min(req->rcv_wnd, 65535U));
22261da177e4SLinus Torvalds 
22271da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2228df7a3b07SAl Viro 	tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
22292e6599cbSArnaldo Carvalho de Melo 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
22301da177e4SLinus Torvalds 			      TCP_SKB_CB(skb)->when,
2231cfb6eeb4SYOSHIFUJI Hideaki 			      req->ts_recent,
2232cfb6eeb4SYOSHIFUJI Hideaki 			      (
2233cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2234cfb6eeb4SYOSHIFUJI Hideaki 			       md5 ? &md5_hash_location :
2235cfb6eeb4SYOSHIFUJI Hideaki #endif
2236cfb6eeb4SYOSHIFUJI Hideaki 			       NULL)
2237cfb6eeb4SYOSHIFUJI Hideaki 			      );
22381da177e4SLinus Torvalds 
22391da177e4SLinus Torvalds 	skb->csum = 0;
22401da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
22411da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
2242cfb6eeb4SYOSHIFUJI Hideaki 
2243cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2244cfb6eeb4SYOSHIFUJI Hideaki 	/* Okay, we have all we need - do the md5 hash if needed */
2245cfb6eeb4SYOSHIFUJI Hideaki 	if (md5) {
2246cfb6eeb4SYOSHIFUJI Hideaki 		tp->af_specific->calc_md5_hash(md5_hash_location,
2247cfb6eeb4SYOSHIFUJI Hideaki 					       md5,
2248cfb6eeb4SYOSHIFUJI Hideaki 					       NULL, dst, req,
2249aa8223c7SArnaldo Carvalho de Melo 					       tcp_hdr(skb), sk->sk_protocol,
2250cfb6eeb4SYOSHIFUJI Hideaki 					       skb->len);
2251cfb6eeb4SYOSHIFUJI Hideaki 	}
2252cfb6eeb4SYOSHIFUJI Hideaki #endif
2253cfb6eeb4SYOSHIFUJI Hideaki 
22541da177e4SLinus Torvalds 	return skb;
22551da177e4SLinus Torvalds }
22561da177e4SLinus Torvalds 
22571da177e4SLinus Torvalds /*
22581da177e4SLinus Torvalds  * Do all connect socket setups that can be done AF independent.
22591da177e4SLinus Torvalds  */
226040efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk)
22611da177e4SLinus Torvalds {
22621da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
22631da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22641da177e4SLinus Torvalds 	__u8 rcv_wscale;
22651da177e4SLinus Torvalds 
22661da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
22671da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
22681da177e4SLinus Torvalds 	 */
22691da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
22701da177e4SLinus Torvalds 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
22711da177e4SLinus Torvalds 
2272cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2273cfb6eeb4SYOSHIFUJI Hideaki 	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2274cfb6eeb4SYOSHIFUJI Hideaki 		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2275cfb6eeb4SYOSHIFUJI Hideaki #endif
2276cfb6eeb4SYOSHIFUJI Hideaki 
22771da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
22781da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
22791da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
22801da177e4SLinus Torvalds 	tp->max_window = 0;
22815d424d5aSJohn Heffner 	tcp_mtup_init(sk);
22821da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
22831da177e4SLinus Torvalds 
22841da177e4SLinus Torvalds 	if (!tp->window_clamp)
22851da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
22861da177e4SLinus Torvalds 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
22871da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
22881da177e4SLinus Torvalds 
22891da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
22901da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
22911da177e4SLinus Torvalds 				  &tp->rcv_wnd,
22921da177e4SLinus Torvalds 				  &tp->window_clamp,
22931da177e4SLinus Torvalds 				  sysctl_tcp_window_scaling,
22941da177e4SLinus Torvalds 				  &rcv_wscale);
22951da177e4SLinus Torvalds 
22961da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
22971da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
22981da177e4SLinus Torvalds 
22991da177e4SLinus Torvalds 	sk->sk_err = 0;
23001da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
23011da177e4SLinus Torvalds 	tp->snd_wnd = 0;
23021da177e4SLinus Torvalds 	tcp_init_wl(tp, tp->write_seq, 0);
23031da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
23041da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
23051da177e4SLinus Torvalds 	tp->rcv_nxt = 0;
23061da177e4SLinus Torvalds 	tp->rcv_wup = 0;
23071da177e4SLinus Torvalds 	tp->copied_seq = 0;
23081da177e4SLinus Torvalds 
2309463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2310463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
23111da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
23121da177e4SLinus Torvalds }
23131da177e4SLinus Torvalds 
23141da177e4SLinus Torvalds /*
23151da177e4SLinus Torvalds  * Build a SYN and send it off.
23161da177e4SLinus Torvalds  */
23171da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
23181da177e4SLinus Torvalds {
23191da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
23201da177e4SLinus Torvalds 	struct sk_buff *buff;
23211da177e4SLinus Torvalds 
23221da177e4SLinus Torvalds 	tcp_connect_init(sk);
23231da177e4SLinus Torvalds 
2324d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
23251da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
23261da177e4SLinus Torvalds 		return -ENOBUFS;
23271da177e4SLinus Torvalds 
23281da177e4SLinus Torvalds 	/* Reserve space for headers. */
23291da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
23301da177e4SLinus Torvalds 
23311da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
23329e412ba7SIlpo Järvinen 	TCP_ECN_send_syn(sk, buff);
23331da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->sacked = 0;
23347967168cSHerbert Xu 	skb_shinfo(buff)->gso_segs = 1;
23357967168cSHerbert Xu 	skb_shinfo(buff)->gso_size = 0;
23367967168cSHerbert Xu 	skb_shinfo(buff)->gso_type = 0;
23371da177e4SLinus Torvalds 	buff->csum = 0;
2338bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
23391da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
23401da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
23411da177e4SLinus Torvalds 
23421da177e4SLinus Torvalds 	/* Send it off. */
23431da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
23441da177e4SLinus Torvalds 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
23451da177e4SLinus Torvalds 	skb_header_release(buff);
2346fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, buff);
23471da177e4SLinus Torvalds 	sk_charge_skb(sk, buff);
23481da177e4SLinus Torvalds 	tp->packets_out += tcp_skb_pcount(buff);
2349dfb4b9dcSDavid S. Miller 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
2350bd37a088SWei Yongjun 
2351bd37a088SWei Yongjun 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2352bd37a088SWei Yongjun 	 * in order to make this packet get counted in tcpOutSegs.
2353bd37a088SWei Yongjun 	 */
2354bd37a088SWei Yongjun 	tp->snd_nxt = tp->write_seq;
2355bd37a088SWei Yongjun 	tp->pushed_seq = tp->write_seq;
23561da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
23571da177e4SLinus Torvalds 
23581da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
23593f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
23603f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
23611da177e4SLinus Torvalds 	return 0;
23621da177e4SLinus Torvalds }
23631da177e4SLinus Torvalds 
23641da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
23651da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
23661da177e4SLinus Torvalds  * for details.
23671da177e4SLinus Torvalds  */
23681da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
23691da177e4SLinus Torvalds {
2370463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
2371463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
23721da177e4SLinus Torvalds 	unsigned long timeout;
23731da177e4SLinus Torvalds 
23741da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
2375463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
23761da177e4SLinus Torvalds 		int max_ato = HZ/2;
23771da177e4SLinus Torvalds 
2378463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
23791da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
23801da177e4SLinus Torvalds 
23811da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
23821da177e4SLinus Torvalds 
23831da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
2384463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
23851da177e4SLinus Torvalds 		 * directly.
23861da177e4SLinus Torvalds 		 */
23871da177e4SLinus Torvalds 		if (tp->srtt) {
23881da177e4SLinus Torvalds 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
23891da177e4SLinus Torvalds 
23901da177e4SLinus Torvalds 			if (rtt < max_ato)
23911da177e4SLinus Torvalds 				max_ato = rtt;
23921da177e4SLinus Torvalds 		}
23931da177e4SLinus Torvalds 
23941da177e4SLinus Torvalds 		ato = min(ato, max_ato);
23951da177e4SLinus Torvalds 	}
23961da177e4SLinus Torvalds 
23971da177e4SLinus Torvalds 	/* Stay within the limit we were given */
23981da177e4SLinus Torvalds 	timeout = jiffies + ato;
23991da177e4SLinus Torvalds 
24001da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
2401463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
24021da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
24031da177e4SLinus Torvalds 		 * send ACK now.
24041da177e4SLinus Torvalds 		 */
2405463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
2406463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
24071da177e4SLinus Torvalds 			tcp_send_ack(sk);
24081da177e4SLinus Torvalds 			return;
24091da177e4SLinus Torvalds 		}
24101da177e4SLinus Torvalds 
2411463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2412463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
24131da177e4SLinus Torvalds 	}
2414463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2415463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
2416463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
24171da177e4SLinus Torvalds }
24181da177e4SLinus Torvalds 
24191da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
24201da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
24211da177e4SLinus Torvalds {
24221da177e4SLinus Torvalds 	/* If we have been reset, we may not send again. */
24231da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
24241da177e4SLinus Torvalds 		struct sk_buff *buff;
24251da177e4SLinus Torvalds 
24261da177e4SLinus Torvalds 		/* We are not putting this on the write queue, so
24271da177e4SLinus Torvalds 		 * tcp_transmit_skb() will set the ownership to this
24281da177e4SLinus Torvalds 		 * sock.
24291da177e4SLinus Torvalds 		 */
24301da177e4SLinus Torvalds 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
24311da177e4SLinus Torvalds 		if (buff == NULL) {
2432463c84b9SArnaldo Carvalho de Melo 			inet_csk_schedule_ack(sk);
2433463c84b9SArnaldo Carvalho de Melo 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
24343f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
24353f421baaSArnaldo Carvalho de Melo 						  TCP_DELACK_MAX, TCP_RTO_MAX);
24361da177e4SLinus Torvalds 			return;
24371da177e4SLinus Torvalds 		}
24381da177e4SLinus Torvalds 
24391da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
24401da177e4SLinus Torvalds 		skb_reserve(buff, MAX_TCP_HEADER);
24411da177e4SLinus Torvalds 		buff->csum = 0;
24421da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
24431da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->sacked = 0;
24447967168cSHerbert Xu 		skb_shinfo(buff)->gso_segs = 1;
24457967168cSHerbert Xu 		skb_shinfo(buff)->gso_size = 0;
24467967168cSHerbert Xu 		skb_shinfo(buff)->gso_type = 0;
24471da177e4SLinus Torvalds 
24481da177e4SLinus Torvalds 		/* Send it off, this clears delayed acks for us. */
24499e412ba7SIlpo Järvinen 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
24501da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
2451dfb4b9dcSDavid S. Miller 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
24521da177e4SLinus Torvalds 	}
24531da177e4SLinus Torvalds }
24541da177e4SLinus Torvalds 
24551da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
24561da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
24571da177e4SLinus Torvalds  *
24581da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
24591da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
24601da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
24611da177e4SLinus Torvalds  *
24621da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
24631da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
24641da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
24651da177e4SLinus Torvalds  */
24661da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
24671da177e4SLinus Torvalds {
24681da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
24691da177e4SLinus Torvalds 	struct sk_buff *skb;
24701da177e4SLinus Torvalds 
24711da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
24721da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
24731da177e4SLinus Torvalds 	if (skb == NULL)
24741da177e4SLinus Torvalds 		return -1;
24751da177e4SLinus Torvalds 
24761da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
24771da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
24781da177e4SLinus Torvalds 	skb->csum = 0;
24791da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
24801da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = urgent;
24817967168cSHerbert Xu 	skb_shinfo(skb)->gso_segs = 1;
24827967168cSHerbert Xu 	skb_shinfo(skb)->gso_size = 0;
24837967168cSHerbert Xu 	skb_shinfo(skb)->gso_type = 0;
24841da177e4SLinus Torvalds 
24851da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
24861da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
24871da177e4SLinus Torvalds 	 * send it.
24881da177e4SLinus Torvalds 	 */
24891da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
24901da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
24911da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2492dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
24931da177e4SLinus Torvalds }
24941da177e4SLinus Torvalds 
24951da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
24961da177e4SLinus Torvalds {
24971da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
24981da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
24991da177e4SLinus Torvalds 		struct sk_buff *skb;
25001da177e4SLinus Torvalds 
2501fe067e8aSDavid S. Miller 		if ((skb = tcp_send_head(sk)) != NULL &&
25021da177e4SLinus Torvalds 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
25031da177e4SLinus Torvalds 			int err;
25041da177e4SLinus Torvalds 			unsigned int mss = tcp_current_mss(sk, 0);
25051da177e4SLinus Torvalds 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
25061da177e4SLinus Torvalds 
25071da177e4SLinus Torvalds 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
25081da177e4SLinus Torvalds 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
25091da177e4SLinus Torvalds 
25101da177e4SLinus Torvalds 			/* We are probing the opening of a window
25111da177e4SLinus Torvalds 			 * but the window size is != 0
25121da177e4SLinus Torvalds 			 * must have been a result SWS avoidance ( sender )
25131da177e4SLinus Torvalds 			 */
25141da177e4SLinus Torvalds 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
25151da177e4SLinus Torvalds 			    skb->len > mss) {
25161da177e4SLinus Torvalds 				seg_size = min(seg_size, mss);
25171da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2518846998aeSDavid S. Miller 				if (tcp_fragment(sk, skb, seg_size, mss))
25191da177e4SLinus Torvalds 					return -1;
25201da177e4SLinus Torvalds 			} else if (!tcp_skb_pcount(skb))
2521846998aeSDavid S. Miller 				tcp_set_skb_tso_segs(sk, skb, mss);
25221da177e4SLinus Torvalds 
25231da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
25241da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2525dfb4b9dcSDavid S. Miller 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
25261da177e4SLinus Torvalds 			if (!err) {
25279e412ba7SIlpo Järvinen 				update_send_head(sk, skb);
25281da177e4SLinus Torvalds 			}
25291da177e4SLinus Torvalds 			return err;
25301da177e4SLinus Torvalds 		} else {
25311da177e4SLinus Torvalds 			if (tp->urg_mode &&
25321da177e4SLinus Torvalds 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
25331da177e4SLinus Torvalds 				tcp_xmit_probe_skb(sk, TCPCB_URG);
25341da177e4SLinus Torvalds 			return tcp_xmit_probe_skb(sk, 0);
25351da177e4SLinus Torvalds 		}
25361da177e4SLinus Torvalds 	}
25371da177e4SLinus Torvalds 	return -1;
25381da177e4SLinus Torvalds }
25391da177e4SLinus Torvalds 
25401da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
25411da177e4SLinus Torvalds  * a partial packet else a zero probe.
25421da177e4SLinus Torvalds  */
25431da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
25441da177e4SLinus Torvalds {
2545463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
25461da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
25471da177e4SLinus Torvalds 	int err;
25481da177e4SLinus Torvalds 
25491da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
25501da177e4SLinus Torvalds 
2551fe067e8aSDavid S. Miller 	if (tp->packets_out || !tcp_send_head(sk)) {
25521da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
25536687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
2554463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
25551da177e4SLinus Torvalds 		return;
25561da177e4SLinus Torvalds 	}
25571da177e4SLinus Torvalds 
25581da177e4SLinus Torvalds 	if (err <= 0) {
2559463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2560463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
25616687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
2562463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
25633f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
25643f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
25651da177e4SLinus Torvalds 	} else {
25661da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
25676687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
25681da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
25691da177e4SLinus Torvalds 		 *
25701da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
25711da177e4SLinus Torvalds 		 */
25726687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
25736687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
2574463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2575463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
25763f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
25773f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
25781da177e4SLinus Torvalds 	}
25791da177e4SLinus Torvalds }
25801da177e4SLinus Torvalds 
25811da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect);
25821da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack);
25831da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit);
25841da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss);
2585f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
25865d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init);
2587