xref: /linux/net/ipv4/tcp_output.c (revision dfb4b9dceb35c567a595ae5e9d035cfda044a103)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
251da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
261da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
271da177e4SLinus Torvalds  *				:	AF independence
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
301da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
311da177e4SLinus Torvalds  *					during syn/ack processing.
321da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
331da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
341da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
351da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
361da177e4SLinus Torvalds  *
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
421da177e4SLinus Torvalds #include <linux/module.h>
431da177e4SLinus Torvalds #include <linux/smp_lock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
461da177e4SLinus Torvalds int sysctl_tcp_retrans_collapse = 1;
471da177e4SLinus Torvalds 
481da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
491da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
501da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
511da177e4SLinus Torvalds  */
52c1b4a7e6SDavid S. Miller int sysctl_tcp_tso_win_divisor = 3;
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds static inline void update_send_head(struct sock *sk, struct tcp_sock *tp,
551da177e4SLinus Torvalds 				    struct sk_buff *skb)
561da177e4SLinus Torvalds {
571da177e4SLinus Torvalds 	sk->sk_send_head = skb->next;
581da177e4SLinus Torvalds 	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
591da177e4SLinus Torvalds 		sk->sk_send_head = NULL;
601da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
611da177e4SLinus Torvalds 	tcp_packets_out_inc(sk, tp, skb);
621da177e4SLinus Torvalds }
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
651da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
661da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
671da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
681da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
691da177e4SLinus Torvalds  */
701da177e4SLinus Torvalds static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
711da177e4SLinus Torvalds {
721da177e4SLinus Torvalds 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
731da177e4SLinus Torvalds 		return tp->snd_nxt;
741da177e4SLinus Torvalds 	else
751da177e4SLinus Torvalds 		return tp->snd_una+tp->snd_wnd;
761da177e4SLinus Torvalds }
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
791da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
801da177e4SLinus Torvalds  *
811da177e4SLinus Torvalds  * 1. It is independent of path mtu.
821da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
831da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
841da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
851da177e4SLinus Torvalds  *    large MSS.
861da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
871da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
881da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
891da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
901da177e4SLinus Torvalds  *    probably even Jumbo".
911da177e4SLinus Torvalds  */
921da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
931da177e4SLinus Torvalds {
941da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
951da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
961da177e4SLinus Torvalds 	int mss = tp->advmss;
971da177e4SLinus Torvalds 
981da177e4SLinus Torvalds 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
991da177e4SLinus Torvalds 		mss = dst_metric(dst, RTAX_ADVMSS);
1001da177e4SLinus Torvalds 		tp->advmss = mss;
1011da177e4SLinus Torvalds 	}
1021da177e4SLinus Torvalds 
1031da177e4SLinus Torvalds 	return (__u16)mss;
1041da177e4SLinus Torvalds }
1051da177e4SLinus Torvalds 
1061da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1071da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
108463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1091da177e4SLinus Torvalds {
110463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1111da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1121da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1131da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1141da177e4SLinus Torvalds 
1156687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1161da177e4SLinus Torvalds 
1176687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1181da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1191da177e4SLinus Torvalds 
120463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1211da177e4SLinus Torvalds 		cwnd >>= 1;
1221da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1231da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1241da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1251da177e4SLinus Torvalds }
1261da177e4SLinus Torvalds 
1271da177e4SLinus Torvalds static inline void tcp_event_data_sent(struct tcp_sock *tp,
1281da177e4SLinus Torvalds 				       struct sk_buff *skb, struct sock *sk)
1291da177e4SLinus Torvalds {
130463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
131463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1321da177e4SLinus Torvalds 
133463c84b9SArnaldo Carvalho de Melo 	if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
134463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1351da177e4SLinus Torvalds 
1361da177e4SLinus Torvalds 	tp->lsndtime = now;
1371da177e4SLinus Torvalds 
1381da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1391da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1401da177e4SLinus Torvalds 	 */
141463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
142463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1431da177e4SLinus Torvalds }
1441da177e4SLinus Torvalds 
145fc6415bcSDavid S. Miller static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1461da177e4SLinus Torvalds {
147463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
148463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1521da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1531da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1541da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1551da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1561da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1571da177e4SLinus Torvalds  */
1581da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1591da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
1601da177e4SLinus Torvalds 			       int wscale_ok, __u8 *rcv_wscale)
1611da177e4SLinus Torvalds {
1621da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
1631da177e4SLinus Torvalds 
1641da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
1651da177e4SLinus Torvalds 	if (*window_clamp == 0)
1661da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
1671da177e4SLinus Torvalds 	space = min(*window_clamp, space);
1681da177e4SLinus Torvalds 
1691da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
1701da177e4SLinus Torvalds 	if (space > mss)
1711da177e4SLinus Torvalds 		space = (space / mss) * mss;
1721da177e4SLinus Torvalds 
1731da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
1741da177e4SLinus Torvalds 	 * will break some buggy TCP stacks. We try to be nice.
1751da177e4SLinus Torvalds 	 * If we are not window scaling, then this truncates
1761da177e4SLinus Torvalds 	 * our initial window offering to 32k. There should also
1771da177e4SLinus Torvalds 	 * be a sysctl option to stop being nice.
1781da177e4SLinus Torvalds 	 */
1791da177e4SLinus Torvalds 	(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1801da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
1811da177e4SLinus Torvalds 	if (wscale_ok) {
1821da177e4SLinus Torvalds 		/* Set window scaling on max possible window
1831da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
1841da177e4SLinus Torvalds 		 */
1851da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
1861da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
1871da177e4SLinus Torvalds 			space >>= 1;
1881da177e4SLinus Torvalds 			(*rcv_wscale)++;
1891da177e4SLinus Torvalds 		}
1901da177e4SLinus Torvalds 	}
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds 	/* Set initial window to value enough for senders,
1936b251858SDavid S. Miller 	 * following RFC2414. Senders, not following this RFC,
1941da177e4SLinus Torvalds 	 * will be satisfied with 2.
1951da177e4SLinus Torvalds 	 */
1961da177e4SLinus Torvalds 	if (mss > (1<<*rcv_wscale)) {
19701ff367eSDavid S. Miller 		int init_cwnd = 4;
19801ff367eSDavid S. Miller 		if (mss > 1460*3)
1991da177e4SLinus Torvalds 			init_cwnd = 2;
20001ff367eSDavid S. Miller 		else if (mss > 1460)
20101ff367eSDavid S. Miller 			init_cwnd = 3;
2021da177e4SLinus Torvalds 		if (*rcv_wnd > init_cwnd*mss)
2031da177e4SLinus Torvalds 			*rcv_wnd = init_cwnd*mss;
2041da177e4SLinus Torvalds 	}
2051da177e4SLinus Torvalds 
2061da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2071da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2081da177e4SLinus Torvalds }
2091da177e4SLinus Torvalds 
2101da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2111da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2121da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2131da177e4SLinus Torvalds  * frame.
2141da177e4SLinus Torvalds  */
2151da177e4SLinus Torvalds static __inline__ u16 tcp_select_window(struct sock *sk)
2161da177e4SLinus Torvalds {
2171da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2181da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2191da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2201da177e4SLinus Torvalds 
2211da177e4SLinus Torvalds 	/* Never shrink the offered window */
2221da177e4SLinus Torvalds 	if(new_win < cur_win) {
2231da177e4SLinus Torvalds 		/* Danger Will Robinson!
2241da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2251da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2261da177e4SLinus Torvalds 		 * window in time.  --DaveM
2271da177e4SLinus Torvalds 		 *
2281da177e4SLinus Torvalds 		 * Relax Will Robinson.
2291da177e4SLinus Torvalds 		 */
2301da177e4SLinus Torvalds 		new_win = cur_win;
2311da177e4SLinus Torvalds 	}
2321da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2331da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2341da177e4SLinus Torvalds 
2351da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2361da177e4SLinus Torvalds 	 * scaled window.
2371da177e4SLinus Torvalds 	 */
2381da177e4SLinus Torvalds 	if (!tp->rx_opt.rcv_wscale)
2391da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2401da177e4SLinus Torvalds 	else
2411da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2421da177e4SLinus Torvalds 
2431da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2441da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2451da177e4SLinus Torvalds 
2461da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2471da177e4SLinus Torvalds 	if (new_win == 0)
2481da177e4SLinus Torvalds 		tp->pred_flags = 0;
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds 	return new_win;
2511da177e4SLinus Torvalds }
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 
2541da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
2551da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
2561da177e4SLinus Torvalds  * transmission and possible later retransmissions.
2571da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
2581da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
2591da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
2601da177e4SLinus Torvalds  * device.
2611da177e4SLinus Torvalds  *
2621da177e4SLinus Torvalds  * We are working here with either a clone of the original
2631da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
2641da177e4SLinus Torvalds  */
265*dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
2661da177e4SLinus Torvalds {
2676687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
268*dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
269*dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
270*dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
271*dfb4b9dcSDavid S. Miller 	int tcp_header_size;
2721da177e4SLinus Torvalds 	struct tcphdr *th;
2731da177e4SLinus Torvalds 	int sysctl_flags;
2741da177e4SLinus Torvalds 	int err;
2751da177e4SLinus Torvalds 
276*dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
277*dfb4b9dcSDavid S. Miller 
278*dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
279*dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
280*dfb4b9dcSDavid S. Miller 	 */
281*dfb4b9dcSDavid S. Miller 	if (icsk->icsk_ca_ops->rtt_sample)
282*dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
283*dfb4b9dcSDavid S. Miller 
284*dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
285*dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
286*dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
287*dfb4b9dcSDavid S. Miller 		else
288*dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
289*dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
290*dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
291*dfb4b9dcSDavid S. Miller 	}
292*dfb4b9dcSDavid S. Miller 
293*dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
294*dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
295*dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
296*dfb4b9dcSDavid S. Miller 	tcp_header_size = tp->tcp_header_len;
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS	0x1
2991da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE	0x2
3001da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK	0x4
3011da177e4SLinus Torvalds 
3021da177e4SLinus Torvalds 	sysctl_flags = 0;
303*dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
3041da177e4SLinus Torvalds 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
3051da177e4SLinus Torvalds 		if(sysctl_tcp_timestamps) {
3061da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
3071da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
3081da177e4SLinus Torvalds 		}
3091da177e4SLinus Torvalds 		if (sysctl_tcp_window_scaling) {
3101da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
3111da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
3121da177e4SLinus Torvalds 		}
3131da177e4SLinus Torvalds 		if (sysctl_tcp_sack) {
3141da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_SACK;
3151da177e4SLinus Torvalds 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
3161da177e4SLinus Torvalds 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
3171da177e4SLinus Torvalds 		}
318*dfb4b9dcSDavid S. Miller 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
3191da177e4SLinus Torvalds 		/* A SACK is 2 pad bytes, a 2 byte header, plus
3201da177e4SLinus Torvalds 		 * 2 32-bit sequence numbers for each SACK block.
3211da177e4SLinus Torvalds 		 */
3221da177e4SLinus Torvalds 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
323*dfb4b9dcSDavid S. Miller 				    (tp->rx_opt.eff_sacks *
324*dfb4b9dcSDavid S. Miller 				     TCPOLEN_SACK_PERBLOCK));
3251da177e4SLinus Torvalds 	}
3261da177e4SLinus Torvalds 
327317a76f9SStephen Hemminger 	if (tcp_packets_in_flight(tp) == 0)
3286687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
3291da177e4SLinus Torvalds 
3301da177e4SLinus Torvalds 	th = (struct tcphdr *) skb_push(skb, tcp_header_size);
3311da177e4SLinus Torvalds 	skb->h.th = th;
3321da177e4SLinus Torvalds 	skb_set_owner_w(skb, sk);
3331da177e4SLinus Torvalds 
3341da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
3351da177e4SLinus Torvalds 	th->source		= inet->sport;
3361da177e4SLinus Torvalds 	th->dest		= inet->dport;
3371da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
3381da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
339*dfb4b9dcSDavid S. Miller 	*(((__u16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
340*dfb4b9dcSDavid S. Miller 					tcb->flags);
341*dfb4b9dcSDavid S. Miller 
342*dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
3431da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
3441da177e4SLinus Torvalds 		 * is never scaled.
3451da177e4SLinus Torvalds 		 */
3461da177e4SLinus Torvalds 		th->window	= htons(tp->rcv_wnd);
3471da177e4SLinus Torvalds 	} else {
3481da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
3491da177e4SLinus Torvalds 	}
3501da177e4SLinus Torvalds 	th->check		= 0;
3511da177e4SLinus Torvalds 	th->urg_ptr		= 0;
3521da177e4SLinus Torvalds 
353*dfb4b9dcSDavid S. Miller 	if (unlikely(tp->urg_mode &&
354*dfb4b9dcSDavid S. Miller 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
3551da177e4SLinus Torvalds 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
3561da177e4SLinus Torvalds 		th->urg			= 1;
3571da177e4SLinus Torvalds 	}
3581da177e4SLinus Torvalds 
359*dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
3601da177e4SLinus Torvalds 		tcp_syn_build_options((__u32 *)(th + 1),
3611da177e4SLinus Torvalds 				      tcp_advertise_mss(sk),
3621da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
3631da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_SACK),
3641da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
3651da177e4SLinus Torvalds 				      tp->rx_opt.rcv_wscale,
3661da177e4SLinus Torvalds 				      tcb->when,
3671da177e4SLinus Torvalds 				      tp->rx_opt.ts_recent);
3681da177e4SLinus Torvalds 	} else {
3691da177e4SLinus Torvalds 		tcp_build_and_update_options((__u32 *)(th + 1),
3701da177e4SLinus Torvalds 					     tp, tcb->when);
3711da177e4SLinus Torvalds 		TCP_ECN_send(sk, tp, skb, tcp_header_size);
3721da177e4SLinus Torvalds 	}
373*dfb4b9dcSDavid S. Miller 
3741da177e4SLinus Torvalds 	tp->af_specific->send_check(sk, th, skb->len, skb);
3751da177e4SLinus Torvalds 
376*dfb4b9dcSDavid S. Miller 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
377fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
3781da177e4SLinus Torvalds 
3791da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
3801da177e4SLinus Torvalds 		tcp_event_data_sent(tp, skb, sk);
3811da177e4SLinus Torvalds 
3821da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
3831da177e4SLinus Torvalds 
3841da177e4SLinus Torvalds 	err = tp->af_specific->queue_xmit(skb, 0);
385*dfb4b9dcSDavid S. Miller 	if (unlikely(err <= 0))
3861da177e4SLinus Torvalds 		return err;
3871da177e4SLinus Torvalds 
3886687e988SArnaldo Carvalho de Melo 	tcp_enter_cwr(sk);
3891da177e4SLinus Torvalds 
3901da177e4SLinus Torvalds 	/* NET_XMIT_CN is special. It does not guarantee,
3911da177e4SLinus Torvalds 	 * that this packet is lost. It tells that device
3921da177e4SLinus Torvalds 	 * is about to start to drop packets or already
3931da177e4SLinus Torvalds 	 * drops some packets of the same priority and
3941da177e4SLinus Torvalds 	 * invokes us to send less aggressively.
3951da177e4SLinus Torvalds 	 */
3961da177e4SLinus Torvalds 	return err == NET_XMIT_CN ? 0 : err;
397*dfb4b9dcSDavid S. Miller 
3981da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS
3991da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE
4001da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK
4011da177e4SLinus Torvalds }
4021da177e4SLinus Torvalds 
4031da177e4SLinus Torvalds 
4041da177e4SLinus Torvalds /* This routine just queue's the buffer
4051da177e4SLinus Torvalds  *
4061da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
4071da177e4SLinus Torvalds  * otherwise socket can stall.
4081da177e4SLinus Torvalds  */
4091da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
4101da177e4SLinus Torvalds {
4111da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4121da177e4SLinus Torvalds 
4131da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
4141da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
4151da177e4SLinus Torvalds 	skb_header_release(skb);
4161da177e4SLinus Torvalds 	__skb_queue_tail(&sk->sk_write_queue, skb);
4171da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 	/* Queue it, remembering where we must start sending. */
4201da177e4SLinus Torvalds 	if (sk->sk_send_head == NULL)
4211da177e4SLinus Torvalds 		sk->sk_send_head = skb;
4221da177e4SLinus Torvalds }
4231da177e4SLinus Torvalds 
424846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
425f6302d1dSDavid S. Miller {
426846998aeSDavid S. Miller 	if (skb->len <= mss_now ||
427f6302d1dSDavid S. Miller 	    !(sk->sk_route_caps & NETIF_F_TSO)) {
428f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
429f6302d1dSDavid S. Miller 		 * non-TSO case.
430f6302d1dSDavid S. Miller 		 */
431f6302d1dSDavid S. Miller 		skb_shinfo(skb)->tso_segs = 1;
432f6302d1dSDavid S. Miller 		skb_shinfo(skb)->tso_size = 0;
433f6302d1dSDavid S. Miller 	} else {
434f6302d1dSDavid S. Miller 		unsigned int factor;
435f6302d1dSDavid S. Miller 
436846998aeSDavid S. Miller 		factor = skb->len + (mss_now - 1);
437846998aeSDavid S. Miller 		factor /= mss_now;
438f6302d1dSDavid S. Miller 		skb_shinfo(skb)->tso_segs = factor;
439846998aeSDavid S. Miller 		skb_shinfo(skb)->tso_size = mss_now;
4401da177e4SLinus Torvalds 	}
4411da177e4SLinus Torvalds }
4421da177e4SLinus Torvalds 
4431da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
4441da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
4451da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
4461da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
4471da177e4SLinus Torvalds  */
4486475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
4491da177e4SLinus Torvalds {
4501da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4511da177e4SLinus Torvalds 	struct sk_buff *buff;
4526475be16SDavid S. Miller 	int nsize, old_factor;
4531da177e4SLinus Torvalds 	u16 flags;
4541da177e4SLinus Torvalds 
455b2cc99f0SHerbert Xu 	BUG_ON(len > skb->len);
4566a438bbeSStephen Hemminger 
4576a438bbeSStephen Hemminger  	clear_all_retrans_hints(tp);
4581da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
4591da177e4SLinus Torvalds 	if (nsize < 0)
4601da177e4SLinus Torvalds 		nsize = 0;
4611da177e4SLinus Torvalds 
4621da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
4631da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
4641da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
4651da177e4SLinus Torvalds 		return -ENOMEM;
4661da177e4SLinus Torvalds 
4671da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
4681da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
4691da177e4SLinus Torvalds 	if (buff == NULL)
4701da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
4711da177e4SLinus Torvalds 	sk_charge_skb(sk, buff);
4721da177e4SLinus Torvalds 
4731da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
4741da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
4751da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
4761da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
4771da177e4SLinus Torvalds 
4781da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
4791da177e4SLinus Torvalds 	flags = TCP_SKB_CB(skb)->flags;
4801da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
4811da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = flags;
482e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
4831da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
4841da177e4SLinus Torvalds 
4851da177e4SLinus Torvalds 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
4861da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
4871da177e4SLinus Torvalds 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
4881da177e4SLinus Torvalds 						       nsize, 0);
4891da177e4SLinus Torvalds 
4901da177e4SLinus Torvalds 		skb_trim(skb, len);
4911da177e4SLinus Torvalds 
4921da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
4931da177e4SLinus Torvalds 	} else {
4941da177e4SLinus Torvalds 		skb->ip_summed = CHECKSUM_HW;
4951da177e4SLinus Torvalds 		skb_split(skb, buff, len);
4961da177e4SLinus Torvalds 	}
4971da177e4SLinus Torvalds 
4981da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
4991da177e4SLinus Torvalds 
5001da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
5011da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
5021da177e4SLinus Torvalds 	 */
5031da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
504a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
5051da177e4SLinus Torvalds 
5066475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
5076475be16SDavid S. Miller 
5081da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
509846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
510846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
5111da177e4SLinus Torvalds 
5126475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
5136475be16SDavid S. Miller 	 * adjust the various packet counters.
5146475be16SDavid S. Miller 	 */
515cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
5166475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
5176475be16SDavid S. Miller 			tcp_skb_pcount(buff);
5181da177e4SLinus Torvalds 
5196475be16SDavid S. Miller 		tp->packets_out -= diff;
520e14c3cafSHerbert Xu 
521e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
522e14c3cafSHerbert Xu 			tp->sacked_out -= diff;
523e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
524e14c3cafSHerbert Xu 			tp->retrans_out -= diff;
525e14c3cafSHerbert Xu 
5266475be16SDavid S. Miller 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
5276475be16SDavid S. Miller 			tp->lost_out -= diff;
5286475be16SDavid S. Miller 			tp->left_out -= diff;
5296475be16SDavid S. Miller 		}
53083ca28beSHerbert Xu 
5316475be16SDavid S. Miller 		if (diff > 0) {
53283ca28beSHerbert Xu 			/* Adjust Reno SACK estimate. */
53383ca28beSHerbert Xu 			if (!tp->rx_opt.sack_ok) {
53483ca28beSHerbert Xu 				tp->sacked_out -= diff;
53583ca28beSHerbert Xu 				if ((int)tp->sacked_out < 0)
53683ca28beSHerbert Xu 					tp->sacked_out = 0;
53783ca28beSHerbert Xu 				tcp_sync_left_out(tp);
53883ca28beSHerbert Xu 			}
53983ca28beSHerbert Xu 
5406475be16SDavid S. Miller 			tp->fackets_out -= diff;
5416475be16SDavid S. Miller 			if ((int)tp->fackets_out < 0)
5426475be16SDavid S. Miller 				tp->fackets_out = 0;
5436475be16SDavid S. Miller 		}
5441da177e4SLinus Torvalds 	}
5451da177e4SLinus Torvalds 
5461da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
547f44b5271SDavid S. Miller 	skb_header_release(buff);
5488728b834SDavid S. Miller 	__skb_append(skb, buff, &sk->sk_write_queue);
5491da177e4SLinus Torvalds 
5501da177e4SLinus Torvalds 	return 0;
5511da177e4SLinus Torvalds }
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
5541da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
5551da177e4SLinus Torvalds  * immediately discarded.
5561da177e4SLinus Torvalds  */
5571da177e4SLinus Torvalds static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
5581da177e4SLinus Torvalds {
5591da177e4SLinus Torvalds 	int i, k, eat;
5601da177e4SLinus Torvalds 
5611da177e4SLinus Torvalds 	eat = len;
5621da177e4SLinus Torvalds 	k = 0;
5631da177e4SLinus Torvalds 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
5641da177e4SLinus Torvalds 		if (skb_shinfo(skb)->frags[i].size <= eat) {
5651da177e4SLinus Torvalds 			put_page(skb_shinfo(skb)->frags[i].page);
5661da177e4SLinus Torvalds 			eat -= skb_shinfo(skb)->frags[i].size;
5671da177e4SLinus Torvalds 		} else {
5681da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
5691da177e4SLinus Torvalds 			if (eat) {
5701da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
5711da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].size -= eat;
5721da177e4SLinus Torvalds 				eat = 0;
5731da177e4SLinus Torvalds 			}
5741da177e4SLinus Torvalds 			k++;
5751da177e4SLinus Torvalds 		}
5761da177e4SLinus Torvalds 	}
5771da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
5781da177e4SLinus Torvalds 
5791da177e4SLinus Torvalds 	skb->tail = skb->data;
5801da177e4SLinus Torvalds 	skb->data_len -= len;
5811da177e4SLinus Torvalds 	skb->len = skb->data_len;
5821da177e4SLinus Torvalds 	return skb->tail;
5831da177e4SLinus Torvalds }
5841da177e4SLinus Torvalds 
5851da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
5861da177e4SLinus Torvalds {
5871da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
5881da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
5891da177e4SLinus Torvalds 		return -ENOMEM;
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds 	if (len <= skb_headlen(skb)) {
5921da177e4SLinus Torvalds 		__skb_pull(skb, len);
5931da177e4SLinus Torvalds 	} else {
5941da177e4SLinus Torvalds 		if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL)
5951da177e4SLinus Torvalds 			return -ENOMEM;
5961da177e4SLinus Torvalds 	}
5971da177e4SLinus Torvalds 
5981da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
5991da177e4SLinus Torvalds 	skb->ip_summed = CHECKSUM_HW;
6001da177e4SLinus Torvalds 
6011da177e4SLinus Torvalds 	skb->truesize	     -= len;
6021da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
6031da177e4SLinus Torvalds 	sk->sk_forward_alloc += len;
6041da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
6051da177e4SLinus Torvalds 
6061da177e4SLinus Torvalds 	/* Any change of skb->len requires recalculation of tso
6071da177e4SLinus Torvalds 	 * factor and mss.
6081da177e4SLinus Torvalds 	 */
6091da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
610846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
6111da177e4SLinus Torvalds 
6121da177e4SLinus Torvalds 	return 0;
6131da177e4SLinus Torvalds }
6141da177e4SLinus Torvalds 
6151da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
6161da177e4SLinus Torvalds 
6171da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
6181da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
6191da177e4SLinus Torvalds 
6201da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
621caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
6221da177e4SLinus Torvalds    It also does not include TCP options.
6231da177e4SLinus Torvalds 
6241da177e4SLinus Torvalds    tp->pmtu_cookie is last pmtu, seen by this function.
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
6271da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
6281da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
6291da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
6301da177e4SLinus Torvalds 
6311da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
6321da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
6331da177e4SLinus Torvalds 
6341da177e4SLinus Torvalds    NOTE2. tp->pmtu_cookie and tp->mss_cache are READ ONLY outside
6351da177e4SLinus Torvalds    this function.			--ANK (980731)
6361da177e4SLinus Torvalds  */
6371da177e4SLinus Torvalds 
6381da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
6391da177e4SLinus Torvalds {
6401da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6411da177e4SLinus Torvalds 	int mss_now;
6421da177e4SLinus Torvalds 
6431da177e4SLinus Torvalds 	/* Calculate base mss without TCP options:
6441da177e4SLinus Torvalds 	   It is MMS_S - sizeof(tcphdr) of rfc1122
6451da177e4SLinus Torvalds 	 */
6461da177e4SLinus Torvalds 	mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr);
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds 	/* Clamp it (mss_clamp does not include tcp options) */
6491da177e4SLinus Torvalds 	if (mss_now > tp->rx_opt.mss_clamp)
6501da177e4SLinus Torvalds 		mss_now = tp->rx_opt.mss_clamp;
6511da177e4SLinus Torvalds 
6521da177e4SLinus Torvalds 	/* Now subtract optional transport overhead */
6531da177e4SLinus Torvalds 	mss_now -= tp->ext_header_len;
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds 	/* Then reserve room for full set of TCP options and 8 bytes of data */
6561da177e4SLinus Torvalds 	if (mss_now < 48)
6571da177e4SLinus Torvalds 		mss_now = 48;
6581da177e4SLinus Torvalds 
6591da177e4SLinus Torvalds 	/* Now subtract TCP options size, not including SACKs */
6601da177e4SLinus Torvalds 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds 	/* Bound mss with half of window */
6631da177e4SLinus Torvalds 	if (tp->max_window && mss_now > (tp->max_window>>1))
6641da177e4SLinus Torvalds 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
6651da177e4SLinus Torvalds 
6661da177e4SLinus Torvalds 	/* And store cached results */
6671da177e4SLinus Torvalds 	tp->pmtu_cookie = pmtu;
668c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
6691da177e4SLinus Torvalds 
6701da177e4SLinus Torvalds 	return mss_now;
6711da177e4SLinus Torvalds }
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
6741da177e4SLinus Torvalds  * and even PMTU discovery events into account.
6751da177e4SLinus Torvalds  *
6761da177e4SLinus Torvalds  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
6771da177e4SLinus Torvalds  * cannot be large. However, taking into account rare use of URG, this
6781da177e4SLinus Torvalds  * is not a big flaw.
6791da177e4SLinus Torvalds  */
680c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
6811da177e4SLinus Torvalds {
6821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6831da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
684c1b4a7e6SDavid S. Miller 	u32 mss_now;
685c1b4a7e6SDavid S. Miller 	u16 xmit_size_goal;
686c1b4a7e6SDavid S. Miller 	int doing_tso = 0;
6871da177e4SLinus Torvalds 
688c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
689c1b4a7e6SDavid S. Miller 
690c1b4a7e6SDavid S. Miller 	if (large_allowed &&
691c1b4a7e6SDavid S. Miller 	    (sk->sk_route_caps & NETIF_F_TSO) &&
692c1b4a7e6SDavid S. Miller 	    !tp->urg_mode)
693c1b4a7e6SDavid S. Miller 		doing_tso = 1;
694c1b4a7e6SDavid S. Miller 
6951da177e4SLinus Torvalds 	if (dst) {
6961da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
6971da177e4SLinus Torvalds 		if (mtu != tp->pmtu_cookie)
6981da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
6991da177e4SLinus Torvalds 	}
7001da177e4SLinus Torvalds 
7011da177e4SLinus Torvalds 	if (tp->rx_opt.eff_sacks)
7021da177e4SLinus Torvalds 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
7031da177e4SLinus Torvalds 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
704c1b4a7e6SDavid S. Miller 
705c1b4a7e6SDavid S. Miller 	xmit_size_goal = mss_now;
706c1b4a7e6SDavid S. Miller 
707c1b4a7e6SDavid S. Miller 	if (doing_tso) {
708c1b4a7e6SDavid S. Miller 		xmit_size_goal = 65535 -
709c1b4a7e6SDavid S. Miller 			tp->af_specific->net_header_len -
710c1b4a7e6SDavid S. Miller 			tp->ext_header_len - tp->tcp_header_len;
711c1b4a7e6SDavid S. Miller 
712c1b4a7e6SDavid S. Miller 		if (tp->max_window &&
713c1b4a7e6SDavid S. Miller 		    (xmit_size_goal > (tp->max_window >> 1)))
714c1b4a7e6SDavid S. Miller 			xmit_size_goal = max((tp->max_window >> 1),
715c1b4a7e6SDavid S. Miller 					     68U - tp->tcp_header_len);
716c1b4a7e6SDavid S. Miller 
717c1b4a7e6SDavid S. Miller 		xmit_size_goal -= (xmit_size_goal % mss_now);
718c1b4a7e6SDavid S. Miller 	}
719c1b4a7e6SDavid S. Miller 	tp->xmit_size_goal = xmit_size_goal;
720c1b4a7e6SDavid S. Miller 
7211da177e4SLinus Torvalds 	return mss_now;
7221da177e4SLinus Torvalds }
7231da177e4SLinus Torvalds 
724a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
725a762a980SDavid S. Miller 
726a762a980SDavid S. Miller static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
727a762a980SDavid S. Miller {
728a762a980SDavid S. Miller 	__u32 packets_out = tp->packets_out;
729a762a980SDavid S. Miller 
730a762a980SDavid S. Miller 	if (packets_out >= tp->snd_cwnd) {
731a762a980SDavid S. Miller 		/* Network is feed fully. */
732a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
733a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
734a762a980SDavid S. Miller 	} else {
735a762a980SDavid S. Miller 		/* Network starves. */
736a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
737a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
738a762a980SDavid S. Miller 
739463c84b9SArnaldo Carvalho de Melo 		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
740a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
741a762a980SDavid S. Miller 	}
742a762a980SDavid S. Miller }
743a762a980SDavid S. Miller 
744c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
745c1b4a7e6SDavid S. Miller {
746c1b4a7e6SDavid S. Miller 	u32 window, cwnd_len;
747c1b4a7e6SDavid S. Miller 
748c1b4a7e6SDavid S. Miller 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
749c1b4a7e6SDavid S. Miller 	cwnd_len = mss_now * cwnd;
750c1b4a7e6SDavid S. Miller 	return min(window, cwnd_len);
751c1b4a7e6SDavid S. Miller }
752c1b4a7e6SDavid S. Miller 
753c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
754c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
755c1b4a7e6SDavid S. Miller  */
756c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
757c1b4a7e6SDavid S. Miller {
758c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
759c1b4a7e6SDavid S. Miller 
760c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
761c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
762c1b4a7e6SDavid S. Miller 		return 1;
763c1b4a7e6SDavid S. Miller 
764c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
765c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
766c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
767c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
768c1b4a7e6SDavid S. Miller 
769c1b4a7e6SDavid S. Miller 	return 0;
770c1b4a7e6SDavid S. Miller }
771c1b4a7e6SDavid S. Miller 
772c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting
773c1b4a7e6SDavid S. Miller  * SKB onto the wire.
774c1b4a7e6SDavid S. Miller  */
775846998aeSDavid S. Miller static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
776c1b4a7e6SDavid S. Miller {
777c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
778c1b4a7e6SDavid S. Miller 
779846998aeSDavid S. Miller 	if (!tso_segs ||
780846998aeSDavid S. Miller 	    (tso_segs > 1 &&
781846998aeSDavid S. Miller 	     skb_shinfo(skb)->tso_size != mss_now)) {
782846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
783c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
784c1b4a7e6SDavid S. Miller 	}
785c1b4a7e6SDavid S. Miller 	return tso_segs;
786c1b4a7e6SDavid S. Miller }
787c1b4a7e6SDavid S. Miller 
788c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp)
789c1b4a7e6SDavid S. Miller {
790c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml,tp->snd_una) &&
791c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
792c1b4a7e6SDavid S. Miller }
793c1b4a7e6SDavid S. Miller 
794c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules:
795c1b4a7e6SDavid S. Miller  * 1. It is full sized.
796c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
797c1b4a7e6SDavid S. Miller  * 3. Or TCP_NODELAY was set.
798c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
799c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
800c1b4a7e6SDavid S. Miller  */
801c1b4a7e6SDavid S. Miller 
802c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp,
803c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
804c1b4a7e6SDavid S. Miller 				  unsigned mss_now, int nonagle)
805c1b4a7e6SDavid S. Miller {
806c1b4a7e6SDavid S. Miller 	return (skb->len < mss_now &&
807c1b4a7e6SDavid S. Miller 		((nonagle&TCP_NAGLE_CORK) ||
808c1b4a7e6SDavid S. Miller 		 (!nonagle &&
809c1b4a7e6SDavid S. Miller 		  tp->packets_out &&
810c1b4a7e6SDavid S. Miller 		  tcp_minshall_check(tp))));
811c1b4a7e6SDavid S. Miller }
812c1b4a7e6SDavid S. Miller 
813c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be
814c1b4a7e6SDavid S. Miller  * sent now.
815c1b4a7e6SDavid S. Miller  */
816c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
817c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
818c1b4a7e6SDavid S. Miller {
819c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
820c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
821c1b4a7e6SDavid S. Miller 	 *
822c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
823c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
824c1b4a7e6SDavid S. Miller 	 */
825c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
826c1b4a7e6SDavid S. Miller 		return 1;
827c1b4a7e6SDavid S. Miller 
828c1b4a7e6SDavid S. Miller 	/* Don't use the nagle rule for urgent data (or for the final FIN).  */
829c1b4a7e6SDavid S. Miller 	if (tp->urg_mode ||
830c1b4a7e6SDavid S. Miller 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
831c1b4a7e6SDavid S. Miller 		return 1;
832c1b4a7e6SDavid S. Miller 
833c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
834c1b4a7e6SDavid S. Miller 		return 1;
835c1b4a7e6SDavid S. Miller 
836c1b4a7e6SDavid S. Miller 	return 0;
837c1b4a7e6SDavid S. Miller }
838c1b4a7e6SDavid S. Miller 
839c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
840c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
841c1b4a7e6SDavid S. Miller {
842c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
843c1b4a7e6SDavid S. Miller 
844c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
845c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
846c1b4a7e6SDavid S. Miller 
847c1b4a7e6SDavid S. Miller 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
848c1b4a7e6SDavid S. Miller }
849c1b4a7e6SDavid S. Miller 
850c1b4a7e6SDavid S. Miller /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
851c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
852c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
853c1b4a7e6SDavid S. Miller  */
854c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
855c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
856c1b4a7e6SDavid S. Miller {
857c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
858c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
859c1b4a7e6SDavid S. Miller 
860846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
861c1b4a7e6SDavid S. Miller 
862c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
863c1b4a7e6SDavid S. Miller 		return 0;
864c1b4a7e6SDavid S. Miller 
865c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
866c1b4a7e6SDavid S. Miller 	if (cwnd_quota &&
867c1b4a7e6SDavid S. Miller 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
868c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
869c1b4a7e6SDavid S. Miller 
870c1b4a7e6SDavid S. Miller 	return cwnd_quota;
871c1b4a7e6SDavid S. Miller }
872c1b4a7e6SDavid S. Miller 
873c1b4a7e6SDavid S. Miller static inline int tcp_skb_is_last(const struct sock *sk,
874c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb)
875c1b4a7e6SDavid S. Miller {
876c1b4a7e6SDavid S. Miller 	return skb->next == (struct sk_buff *)&sk->sk_write_queue;
877c1b4a7e6SDavid S. Miller }
878c1b4a7e6SDavid S. Miller 
879c1b4a7e6SDavid S. Miller int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
880c1b4a7e6SDavid S. Miller {
881c1b4a7e6SDavid S. Miller 	struct sk_buff *skb = sk->sk_send_head;
882c1b4a7e6SDavid S. Miller 
883c1b4a7e6SDavid S. Miller 	return (skb &&
884c1b4a7e6SDavid S. Miller 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
885c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
886c1b4a7e6SDavid S. Miller 			      TCP_NAGLE_PUSH :
887c1b4a7e6SDavid S. Miller 			      tp->nonagle)));
888c1b4a7e6SDavid S. Miller }
889c1b4a7e6SDavid S. Miller 
890c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
891c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
892c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
893c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
894c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
895c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
896c1b4a7e6SDavid S. Miller  */
897846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
898c1b4a7e6SDavid S. Miller {
899c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
900c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
901c1b4a7e6SDavid S. Miller 	u16 flags;
902c1b4a7e6SDavid S. Miller 
903c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
904c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
905c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
906c1b4a7e6SDavid S. Miller 
907c1b4a7e6SDavid S. Miller 	buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
908c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
909c1b4a7e6SDavid S. Miller 		return -ENOMEM;
910c1b4a7e6SDavid S. Miller 
911c1b4a7e6SDavid S. Miller 	buff->truesize = nlen;
912c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
913c1b4a7e6SDavid S. Miller 
914c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
915c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
916c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
917c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
918c1b4a7e6SDavid S. Miller 
919c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
920c1b4a7e6SDavid S. Miller 	flags = TCP_SKB_CB(skb)->flags;
921c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
922c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->flags = flags;
923c1b4a7e6SDavid S. Miller 
924c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
925c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
926c1b4a7e6SDavid S. Miller 
927c1b4a7e6SDavid S. Miller 	buff->ip_summed = skb->ip_summed = CHECKSUM_HW;
928c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
929c1b4a7e6SDavid S. Miller 
930c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
931846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
932846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
933c1b4a7e6SDavid S. Miller 
934c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
935c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
9368728b834SDavid S. Miller 	__skb_append(skb, buff, &sk->sk_write_queue);
937c1b4a7e6SDavid S. Miller 
938c1b4a7e6SDavid S. Miller 	return 0;
939c1b4a7e6SDavid S. Miller }
940c1b4a7e6SDavid S. Miller 
941c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
942c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
943c1b4a7e6SDavid S. Miller  *
944c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
945c1b4a7e6SDavid S. Miller  */
946c1b4a7e6SDavid S. Miller static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
947c1b4a7e6SDavid S. Miller {
9486687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
949c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
950c1b4a7e6SDavid S. Miller 
951c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
952c1b4a7e6SDavid S. Miller 		return 0;
953c1b4a7e6SDavid S. Miller 
9546687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
955908a75c1SDavid S. Miller 		return 0;
956908a75c1SDavid S. Miller 
957c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
958c1b4a7e6SDavid S. Miller 
959c1b4a7e6SDavid S. Miller 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
960c1b4a7e6SDavid S. Miller 	       (tp->snd_cwnd <= in_flight));
961c1b4a7e6SDavid S. Miller 
962c1b4a7e6SDavid S. Miller 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
963c1b4a7e6SDavid S. Miller 
964c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
965c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
966c1b4a7e6SDavid S. Miller 
967c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
968c1b4a7e6SDavid S. Miller 
969c1b4a7e6SDavid S. Miller 	if (sysctl_tcp_tso_win_divisor) {
970c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
971c1b4a7e6SDavid S. Miller 
972c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
973c1b4a7e6SDavid S. Miller 		 * just use it.
974c1b4a7e6SDavid S. Miller 		 */
975c1b4a7e6SDavid S. Miller 		chunk /= sysctl_tcp_tso_win_divisor;
976c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
977c1b4a7e6SDavid S. Miller 			return 0;
978c1b4a7e6SDavid S. Miller 	} else {
979c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
980c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
981c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
982c1b4a7e6SDavid S. Miller 		 * then send now.
983c1b4a7e6SDavid S. Miller 		 */
984c1b4a7e6SDavid S. Miller 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
985c1b4a7e6SDavid S. Miller 			return 0;
986c1b4a7e6SDavid S. Miller 	}
987c1b4a7e6SDavid S. Miller 
988c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
989c1b4a7e6SDavid S. Miller 	return 1;
990c1b4a7e6SDavid S. Miller }
991c1b4a7e6SDavid S. Miller 
9921da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
9931da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
9941da177e4SLinus Torvalds  * window for us.
9951da177e4SLinus Torvalds  *
9961da177e4SLinus Torvalds  * Returns 1, if no segments are in flight and we have queued segments, but
9971da177e4SLinus Torvalds  * cannot send anything now because of SWS or another problem.
9981da177e4SLinus Torvalds  */
999a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
10001da177e4SLinus Torvalds {
10011da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
100292df7b51SDavid S. Miller 	struct sk_buff *skb;
1003c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1004c1b4a7e6SDavid S. Miller 	int cwnd_quota;
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds 	/* If we are closed, the bytes will have to remain here.
10071da177e4SLinus Torvalds 	 * In time closedown will finish, we empty the write queue and all
10081da177e4SLinus Torvalds 	 * will be happy.
10091da177e4SLinus Torvalds 	 */
101092df7b51SDavid S. Miller 	if (unlikely(sk->sk_state == TCP_CLOSE))
101192df7b51SDavid S. Miller 		return 0;
101292df7b51SDavid S. Miller 
1013c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
1014b68e9f85SHerbert Xu 	while ((skb = sk->sk_send_head)) {
1015c8ac3774SHerbert Xu 		unsigned int limit;
1016c8ac3774SHerbert Xu 
1017b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1018c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1019c1b4a7e6SDavid S. Miller 
1020b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
1021b68e9f85SHerbert Xu 		if (!cwnd_quota)
1022b68e9f85SHerbert Xu 			break;
1023b68e9f85SHerbert Xu 
1024b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1025b68e9f85SHerbert Xu 			break;
1026b68e9f85SHerbert Xu 
1027c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1028aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1029aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1030aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1031aa93466bSDavid S. Miller 				break;
1032c1b4a7e6SDavid S. Miller 		} else {
1033c1b4a7e6SDavid S. Miller 			if (tcp_tso_should_defer(sk, tp, skb))
1034aa93466bSDavid S. Miller 				break;
1035c1b4a7e6SDavid S. Miller 		}
1036aa93466bSDavid S. Miller 
1037c8ac3774SHerbert Xu 		limit = mss_now;
1038c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1039c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1040c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1041c1b4a7e6SDavid S. Miller 
1042c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1043c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1044c1b4a7e6SDavid S. Miller 
1045c1b4a7e6SDavid S. Miller 				if (trim)
1046c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1047c1b4a7e6SDavid S. Miller 			}
1048c1b4a7e6SDavid S. Miller 		}
1049c8ac3774SHerbert Xu 
1050c8ac3774SHerbert Xu 		if (skb->len > limit &&
1051c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
10521da177e4SLinus Torvalds 			break;
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1055c1b4a7e6SDavid S. Miller 
1056*dfb4b9dcSDavid S. Miller 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
10571da177e4SLinus Torvalds 			break;
10581da177e4SLinus Torvalds 
10591da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
10601da177e4SLinus Torvalds 		 * This call will increment packets_out.
10611da177e4SLinus Torvalds 		 */
10621da177e4SLinus Torvalds 		update_send_head(sk, tp, skb);
10631da177e4SLinus Torvalds 
10641da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1065aa93466bSDavid S. Miller 		sent_pkts++;
10661da177e4SLinus Torvalds 	}
10671da177e4SLinus Torvalds 
1068aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
10691da177e4SLinus Torvalds 		tcp_cwnd_validate(sk, tp);
10701da177e4SLinus Torvalds 		return 0;
10711da177e4SLinus Torvalds 	}
10721da177e4SLinus Torvalds 	return !tp->packets_out && sk->sk_send_head;
10731da177e4SLinus Torvalds }
10741da177e4SLinus Torvalds 
1075a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
1076a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
1077a762a980SDavid S. Miller  * The socket must be locked by the caller.
1078a762a980SDavid S. Miller  */
1079a762a980SDavid S. Miller void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1080a2e2a59cSDavid S. Miller 			       unsigned int cur_mss, int nonagle)
1081a762a980SDavid S. Miller {
1082a762a980SDavid S. Miller 	struct sk_buff *skb = sk->sk_send_head;
1083a762a980SDavid S. Miller 
1084a762a980SDavid S. Miller 	if (skb) {
108555c97f3eSDavid S. Miller 		if (tcp_write_xmit(sk, cur_mss, nonagle))
1086a762a980SDavid S. Miller 			tcp_check_probe_timer(sk, tp);
1087a762a980SDavid S. Miller 	}
1088a762a980SDavid S. Miller }
1089a762a980SDavid S. Miller 
1090c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
1091c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
1092c1b4a7e6SDavid S. Miller  */
1093c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
1094c1b4a7e6SDavid S. Miller {
1095c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1096c1b4a7e6SDavid S. Miller 	struct sk_buff *skb = sk->sk_send_head;
1097c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, cwnd_quota;
1098c1b4a7e6SDavid S. Miller 
1099c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
1100c1b4a7e6SDavid S. Miller 
1101846998aeSDavid S. Miller 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1102c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1103c1b4a7e6SDavid S. Miller 
1104c1b4a7e6SDavid S. Miller 	if (likely(cwnd_quota)) {
1105c8ac3774SHerbert Xu 		unsigned int limit;
1106c8ac3774SHerbert Xu 
1107c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1108c1b4a7e6SDavid S. Miller 
1109c8ac3774SHerbert Xu 		limit = mss_now;
1110c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1111c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1112c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1113c1b4a7e6SDavid S. Miller 
1114c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1115c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1116c1b4a7e6SDavid S. Miller 
1117c1b4a7e6SDavid S. Miller 				if (trim)
1118c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1119c1b4a7e6SDavid S. Miller 			}
1120c1b4a7e6SDavid S. Miller 		}
1121c8ac3774SHerbert Xu 
1122c8ac3774SHerbert Xu 		if (skb->len > limit &&
1123c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1124c1b4a7e6SDavid S. Miller 			return;
1125c1b4a7e6SDavid S. Miller 
1126c1b4a7e6SDavid S. Miller 		/* Send it out now. */
1127c1b4a7e6SDavid S. Miller 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1128c1b4a7e6SDavid S. Miller 
1129*dfb4b9dcSDavid S. Miller 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1130c1b4a7e6SDavid S. Miller 			update_send_head(sk, tp, skb);
1131c1b4a7e6SDavid S. Miller 			tcp_cwnd_validate(sk, tp);
1132c1b4a7e6SDavid S. Miller 			return;
1133c1b4a7e6SDavid S. Miller 		}
1134c1b4a7e6SDavid S. Miller 	}
1135c1b4a7e6SDavid S. Miller }
1136c1b4a7e6SDavid S. Miller 
11371da177e4SLinus Torvalds /* This function returns the amount that we can raise the
11381da177e4SLinus Torvalds  * usable window based on the following constraints
11391da177e4SLinus Torvalds  *
11401da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
11411da177e4SLinus Torvalds  * 2. We limit memory per socket
11421da177e4SLinus Torvalds  *
11431da177e4SLinus Torvalds  * RFC 1122:
11441da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
11451da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
11461da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
11471da177e4SLinus Torvalds  *
11481da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
11491da177e4SLinus Torvalds  * it at least MSS bytes.
11501da177e4SLinus Torvalds  *
11511da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
11521da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
11531da177e4SLinus Torvalds  *
11541da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
11551da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
11561da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
11571da177e4SLinus Torvalds  * window to always advance by a single byte.
11581da177e4SLinus Torvalds  *
11591da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
11601da177e4SLinus Torvalds  * then this will not be a problem.
11611da177e4SLinus Torvalds  *
11621da177e4SLinus Torvalds  * BSD seems to make the following compromise:
11631da177e4SLinus Torvalds  *
11641da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
11651da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
11661da177e4SLinus Torvalds  *	then set the window to 0.
11671da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
11681da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
11691da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
11701da177e4SLinus Torvalds  *
11711da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
11721da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
11731da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
11741da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
11751da177e4SLinus Torvalds  * because the pipeline is full.
11761da177e4SLinus Torvalds  *
11771da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
11781da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
11791da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
11801da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
11811da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
11821da177e4SLinus Torvalds  *
11831da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
11841da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
11851da177e4SLinus Torvalds  *
11861da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
11871da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
11881da177e4SLinus Torvalds  */
11891da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
11901da177e4SLinus Torvalds {
1191463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
11921da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1193caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
11941da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
11951da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
11961da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
11971da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
11981da177e4SLinus Torvalds 	 */
1199463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
12001da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
12011da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
12021da177e4SLinus Torvalds 	int window;
12031da177e4SLinus Torvalds 
12041da177e4SLinus Torvalds 	if (mss > full_space)
12051da177e4SLinus Torvalds 		mss = full_space;
12061da177e4SLinus Torvalds 
12071da177e4SLinus Torvalds 	if (free_space < full_space/2) {
1208463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds 		if (tcp_memory_pressure)
12111da177e4SLinus Torvalds 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
12121da177e4SLinus Torvalds 
12131da177e4SLinus Torvalds 		if (free_space < mss)
12141da177e4SLinus Torvalds 			return 0;
12151da177e4SLinus Torvalds 	}
12161da177e4SLinus Torvalds 
12171da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
12181da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
12191da177e4SLinus Torvalds 
12201da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
12211da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
12221da177e4SLinus Torvalds 	 */
12231da177e4SLinus Torvalds 	window = tp->rcv_wnd;
12241da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
12251da177e4SLinus Torvalds 		window = free_space;
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
12281da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
12291da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
12301da177e4SLinus Torvalds 		 */
12311da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
12321da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
12331da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
12341da177e4SLinus Torvalds 	} else {
12351da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
12361da177e4SLinus Torvalds 		 * Window clamp already applied above.
12371da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
12381da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
12391da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
12401da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
12411da177e4SLinus Torvalds 		 * is too small.
12421da177e4SLinus Torvalds 		 */
12431da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
12441da177e4SLinus Torvalds 			window = (free_space/mss)*mss;
12451da177e4SLinus Torvalds 	}
12461da177e4SLinus Torvalds 
12471da177e4SLinus Torvalds 	return window;
12481da177e4SLinus Torvalds }
12491da177e4SLinus Torvalds 
12501da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */
12511da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
12521da177e4SLinus Torvalds {
12531da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12541da177e4SLinus Torvalds 	struct sk_buff *next_skb = skb->next;
12551da177e4SLinus Torvalds 
12561da177e4SLinus Torvalds 	/* The first test we must make is that neither of these two
12571da177e4SLinus Torvalds 	 * SKB's are still referenced by someone else.
12581da177e4SLinus Torvalds 	 */
12591da177e4SLinus Torvalds 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
12601da177e4SLinus Torvalds 		int skb_size = skb->len, next_skb_size = next_skb->len;
12611da177e4SLinus Torvalds 		u16 flags = TCP_SKB_CB(skb)->flags;
12621da177e4SLinus Torvalds 
12631da177e4SLinus Torvalds 		/* Also punt if next skb has been SACK'd. */
12641da177e4SLinus Torvalds 		if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
12651da177e4SLinus Torvalds 			return;
12661da177e4SLinus Torvalds 
12671da177e4SLinus Torvalds 		/* Next skb is out of window. */
12681da177e4SLinus Torvalds 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
12691da177e4SLinus Torvalds 			return;
12701da177e4SLinus Torvalds 
12711da177e4SLinus Torvalds 		/* Punt if not enough space exists in the first SKB for
12721da177e4SLinus Torvalds 		 * the data in the second, or the total combined payload
12731da177e4SLinus Torvalds 		 * would exceed the MSS.
12741da177e4SLinus Torvalds 		 */
12751da177e4SLinus Torvalds 		if ((next_skb_size > skb_tailroom(skb)) ||
12761da177e4SLinus Torvalds 		    ((skb_size + next_skb_size) > mss_now))
12771da177e4SLinus Torvalds 			return;
12781da177e4SLinus Torvalds 
12791da177e4SLinus Torvalds 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
12801da177e4SLinus Torvalds 		       tcp_skb_pcount(next_skb) != 1);
12811da177e4SLinus Torvalds 
12826a438bbeSStephen Hemminger 		/* changing transmit queue under us so clear hints */
12836a438bbeSStephen Hemminger 		clear_all_retrans_hints(tp);
12846a438bbeSStephen Hemminger 
12851da177e4SLinus Torvalds 		/* Ok.	We will be able to collapse the packet. */
12868728b834SDavid S. Miller 		__skb_unlink(next_skb, &sk->sk_write_queue);
12871da177e4SLinus Torvalds 
12881da177e4SLinus Torvalds 		memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
12891da177e4SLinus Torvalds 
12901da177e4SLinus Torvalds 		if (next_skb->ip_summed == CHECKSUM_HW)
12911da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_HW;
12921da177e4SLinus Torvalds 
12931da177e4SLinus Torvalds 		if (skb->ip_summed != CHECKSUM_HW)
12941da177e4SLinus Torvalds 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
12951da177e4SLinus Torvalds 
12961da177e4SLinus Torvalds 		/* Update sequence range on original skb. */
12971da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
12981da177e4SLinus Torvalds 
12991da177e4SLinus Torvalds 		/* Merge over control information. */
13001da177e4SLinus Torvalds 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
13011da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = flags;
13021da177e4SLinus Torvalds 
13031da177e4SLinus Torvalds 		/* All done, get rid of second SKB and account for it so
13041da177e4SLinus Torvalds 		 * packet counting does not break.
13051da177e4SLinus Torvalds 		 */
13061da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
13071da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
13081da177e4SLinus Torvalds 			tp->retrans_out -= tcp_skb_pcount(next_skb);
13091da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
13101da177e4SLinus Torvalds 			tp->lost_out -= tcp_skb_pcount(next_skb);
13111da177e4SLinus Torvalds 			tp->left_out -= tcp_skb_pcount(next_skb);
13121da177e4SLinus Torvalds 		}
13131da177e4SLinus Torvalds 		/* Reno case is special. Sigh... */
13141da177e4SLinus Torvalds 		if (!tp->rx_opt.sack_ok && tp->sacked_out) {
13151da177e4SLinus Torvalds 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
13161da177e4SLinus Torvalds 			tp->left_out -= tcp_skb_pcount(next_skb);
13171da177e4SLinus Torvalds 		}
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 		/* Not quite right: it can be > snd.fack, but
13201da177e4SLinus Torvalds 		 * it is better to underestimate fackets.
13211da177e4SLinus Torvalds 		 */
13221da177e4SLinus Torvalds 		tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
13231da177e4SLinus Torvalds 		tcp_packets_out_dec(tp, next_skb);
13241da177e4SLinus Torvalds 		sk_stream_free_skb(sk, next_skb);
13251da177e4SLinus Torvalds 	}
13261da177e4SLinus Torvalds }
13271da177e4SLinus Torvalds 
13281da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in
13291da177e4SLinus Torvalds  * tcp_timer. This is used for path mtu discovery.
13301da177e4SLinus Torvalds  * The socket is already locked here.
13311da177e4SLinus Torvalds  */
13321da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk)
13331da177e4SLinus Torvalds {
13346687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
13351da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
13361da177e4SLinus Torvalds 	struct sk_buff *skb;
13371da177e4SLinus Torvalds 	unsigned int mss = tcp_current_mss(sk, 0);
13381da177e4SLinus Torvalds 	int lost = 0;
13391da177e4SLinus Torvalds 
13401da177e4SLinus Torvalds 	sk_stream_for_retrans_queue(skb, sk) {
13411da177e4SLinus Torvalds 		if (skb->len > mss &&
13421da177e4SLinus Torvalds 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
13431da177e4SLinus Torvalds 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
13441da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
13451da177e4SLinus Torvalds 				tp->retrans_out -= tcp_skb_pcount(skb);
13461da177e4SLinus Torvalds 			}
13471da177e4SLinus Torvalds 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
13481da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
13491da177e4SLinus Torvalds 				tp->lost_out += tcp_skb_pcount(skb);
13501da177e4SLinus Torvalds 				lost = 1;
13511da177e4SLinus Torvalds 			}
13521da177e4SLinus Torvalds 		}
13531da177e4SLinus Torvalds 	}
13541da177e4SLinus Torvalds 
13556a438bbeSStephen Hemminger 	clear_all_retrans_hints(tp);
13566a438bbeSStephen Hemminger 
13571da177e4SLinus Torvalds 	if (!lost)
13581da177e4SLinus Torvalds 		return;
13591da177e4SLinus Torvalds 
13601da177e4SLinus Torvalds 	tcp_sync_left_out(tp);
13611da177e4SLinus Torvalds 
13621da177e4SLinus Torvalds  	/* Don't muck with the congestion window here.
13631da177e4SLinus Torvalds 	 * Reason is that we do not increase amount of _data_
13641da177e4SLinus Torvalds 	 * in network, but units changed and effective
13651da177e4SLinus Torvalds 	 * cwnd/ssthresh really reduced now.
13661da177e4SLinus Torvalds 	 */
13676687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
13681da177e4SLinus Torvalds 		tp->high_seq = tp->snd_nxt;
13696687e988SArnaldo Carvalho de Melo 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
13701da177e4SLinus Torvalds 		tp->prior_ssthresh = 0;
13711da177e4SLinus Torvalds 		tp->undo_marker = 0;
13726687e988SArnaldo Carvalho de Melo 		tcp_set_ca_state(sk, TCP_CA_Loss);
13731da177e4SLinus Torvalds 	}
13741da177e4SLinus Torvalds 	tcp_xmit_retransmit_queue(sk);
13751da177e4SLinus Torvalds }
13761da177e4SLinus Torvalds 
13771da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
13781da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
13791da177e4SLinus Torvalds  * error occurred which prevented the send.
13801da177e4SLinus Torvalds  */
13811da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
13821da177e4SLinus Torvalds {
13831da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
13841da177e4SLinus Torvalds  	unsigned int cur_mss = tcp_current_mss(sk, 0);
13851da177e4SLinus Torvalds 	int err;
13861da177e4SLinus Torvalds 
13871da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
1388caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
13891da177e4SLinus Torvalds 	 */
13901da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
13911da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
13921da177e4SLinus Torvalds 		return -EAGAIN;
13931da177e4SLinus Torvalds 
13941da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
13951da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
13961da177e4SLinus Torvalds 			BUG();
13971da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
13981da177e4SLinus Torvalds 			return -ENOMEM;
13991da177e4SLinus Torvalds 	}
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
14021da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
14031da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
14041da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
14051da177e4SLinus Torvalds 	 */
14061da177e4SLinus Torvalds 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
14071da177e4SLinus Torvalds 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
14081da177e4SLinus Torvalds 		return -EAGAIN;
14091da177e4SLinus Torvalds 
14101da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
1411846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
14121da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
14131da177e4SLinus Torvalds 	}
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	/* Collapse two adjacent packets if worthwhile and we can. */
14161da177e4SLinus Torvalds 	if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
14171da177e4SLinus Torvalds 	   (skb->len < (cur_mss >> 1)) &&
14181da177e4SLinus Torvalds 	   (skb->next != sk->sk_send_head) &&
14191da177e4SLinus Torvalds 	   (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
14201da177e4SLinus Torvalds 	   (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
14211da177e4SLinus Torvalds 	   (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
14221da177e4SLinus Torvalds 	   (sysctl_tcp_retrans_collapse != 0))
14231da177e4SLinus Torvalds 		tcp_retrans_try_collapse(sk, skb, cur_mss);
14241da177e4SLinus Torvalds 
14251da177e4SLinus Torvalds 	if(tp->af_specific->rebuild_header(sk))
14261da177e4SLinus Torvalds 		return -EHOSTUNREACH; /* Routing failure or similar. */
14271da177e4SLinus Torvalds 
14281da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
14291da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
14301da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
14311da177e4SLinus Torvalds 	 */
14321da177e4SLinus Torvalds 	if(skb->len > 0 &&
14331da177e4SLinus Torvalds 	   (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
14341da177e4SLinus Torvalds 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
14351da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
14361da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
14371da177e4SLinus Torvalds 			skb_shinfo(skb)->tso_segs = 1;
14381da177e4SLinus Torvalds 			skb_shinfo(skb)->tso_size = 0;
14391da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
14401da177e4SLinus Torvalds 			skb->csum = 0;
14411da177e4SLinus Torvalds 		}
14421da177e4SLinus Torvalds 	}
14431da177e4SLinus Torvalds 
14441da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
14451da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
14461da177e4SLinus Torvalds 	 */
14471da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
14481da177e4SLinus Torvalds 
1449*dfb4b9dcSDavid S. Miller 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
14501da177e4SLinus Torvalds 
14511da177e4SLinus Torvalds 	if (err == 0) {
14521da177e4SLinus Torvalds 		/* Update global TCP statistics. */
14531da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
14541da177e4SLinus Torvalds 
14551da177e4SLinus Torvalds 		tp->total_retrans++;
14561da177e4SLinus Torvalds 
14571da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
14581da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
14591da177e4SLinus Torvalds 			if (net_ratelimit())
14601da177e4SLinus Torvalds 				printk(KERN_DEBUG "retrans_out leaked.\n");
14611da177e4SLinus Torvalds 		}
14621da177e4SLinus Torvalds #endif
14631da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
14641da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
14651da177e4SLinus Torvalds 
14661da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
14671da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
14681da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
14691da177e4SLinus Torvalds 
14701da177e4SLinus Torvalds 		tp->undo_retrans++;
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
14731da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
14741da177e4SLinus Torvalds 		 */
14751da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
14761da177e4SLinus Torvalds 	}
14771da177e4SLinus Torvalds 	return err;
14781da177e4SLinus Torvalds }
14791da177e4SLinus Torvalds 
14801da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
14811da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
14821da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
14831da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
14841da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
14851da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
14861da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
14871da177e4SLinus Torvalds  */
14881da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
14891da177e4SLinus Torvalds {
14906687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
14911da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
14921da177e4SLinus Torvalds 	struct sk_buff *skb;
14936a438bbeSStephen Hemminger 	int packet_cnt;
14946a438bbeSStephen Hemminger 
14956a438bbeSStephen Hemminger 	if (tp->retransmit_skb_hint) {
14966a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
14976a438bbeSStephen Hemminger 		packet_cnt = tp->retransmit_cnt_hint;
14986a438bbeSStephen Hemminger 	}else{
14996a438bbeSStephen Hemminger 		skb = sk->sk_write_queue.next;
15006a438bbeSStephen Hemminger 		packet_cnt = 0;
15016a438bbeSStephen Hemminger 	}
15021da177e4SLinus Torvalds 
15031da177e4SLinus Torvalds 	/* First pass: retransmit lost packets. */
15046a438bbeSStephen Hemminger 	if (tp->lost_out) {
15056a438bbeSStephen Hemminger 		sk_stream_for_retrans_queue_from(skb, sk) {
15061da177e4SLinus Torvalds 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
15071da177e4SLinus Torvalds 
15086a438bbeSStephen Hemminger 			/* we could do better than to assign each time */
15096a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
15106a438bbeSStephen Hemminger 			tp->retransmit_cnt_hint = packet_cnt;
15116a438bbeSStephen Hemminger 
15121da177e4SLinus Torvalds 			/* Assume this retransmit will generate
15131da177e4SLinus Torvalds 			 * only one packet for congestion window
15141da177e4SLinus Torvalds 			 * calculation purposes.  This works because
15151da177e4SLinus Torvalds 			 * tcp_retransmit_skb() will chop up the
15161da177e4SLinus Torvalds 			 * packet to be MSS sized and all the
15171da177e4SLinus Torvalds 			 * packet counting works out.
15181da177e4SLinus Torvalds 			 */
15191da177e4SLinus Torvalds 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
15201da177e4SLinus Torvalds 				return;
15211da177e4SLinus Torvalds 
15221da177e4SLinus Torvalds 			if (sacked & TCPCB_LOST) {
15231da177e4SLinus Torvalds 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
15246a438bbeSStephen Hemminger 					if (tcp_retransmit_skb(sk, skb)) {
15256a438bbeSStephen Hemminger 						tp->retransmit_skb_hint = NULL;
15261da177e4SLinus Torvalds 						return;
15276a438bbeSStephen Hemminger 					}
15286687e988SArnaldo Carvalho de Melo 					if (icsk->icsk_ca_state != TCP_CA_Loss)
15291da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
15301da177e4SLinus Torvalds 					else
15311da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
15321da177e4SLinus Torvalds 
15331da177e4SLinus Torvalds 					if (skb ==
15341da177e4SLinus Torvalds 					    skb_peek(&sk->sk_write_queue))
1535463c84b9SArnaldo Carvalho de Melo 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
15363f421baaSArnaldo Carvalho de Melo 									  inet_csk(sk)->icsk_rto,
15373f421baaSArnaldo Carvalho de Melo 									  TCP_RTO_MAX);
15381da177e4SLinus Torvalds 				}
15391da177e4SLinus Torvalds 
15406a438bbeSStephen Hemminger 				packet_cnt += tcp_skb_pcount(skb);
15416a438bbeSStephen Hemminger 				if (packet_cnt >= tp->lost_out)
15421da177e4SLinus Torvalds 					break;
15431da177e4SLinus Torvalds 			}
15441da177e4SLinus Torvalds 		}
15451da177e4SLinus Torvalds 	}
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds 	/* OK, demanded retransmission is finished. */
15481da177e4SLinus Torvalds 
15491da177e4SLinus Torvalds 	/* Forward retransmissions are possible only during Recovery. */
15506687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
15511da177e4SLinus Torvalds 		return;
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds 	/* No forward retransmissions in Reno are possible. */
15541da177e4SLinus Torvalds 	if (!tp->rx_opt.sack_ok)
15551da177e4SLinus Torvalds 		return;
15561da177e4SLinus Torvalds 
15571da177e4SLinus Torvalds 	/* Yeah, we have to make difficult choice between forward transmission
15581da177e4SLinus Torvalds 	 * and retransmission... Both ways have their merits...
15591da177e4SLinus Torvalds 	 *
15601da177e4SLinus Torvalds 	 * For now we do not retransmit anything, while we have some new
15611da177e4SLinus Torvalds 	 * segments to send.
15621da177e4SLinus Torvalds 	 */
15631da177e4SLinus Torvalds 
15641da177e4SLinus Torvalds 	if (tcp_may_send_now(sk, tp))
15651da177e4SLinus Torvalds 		return;
15661da177e4SLinus Torvalds 
15676a438bbeSStephen Hemminger 	if (tp->forward_skb_hint) {
15686a438bbeSStephen Hemminger 		skb = tp->forward_skb_hint;
15696a438bbeSStephen Hemminger 		packet_cnt = tp->forward_cnt_hint;
15706a438bbeSStephen Hemminger 	} else{
15716a438bbeSStephen Hemminger 		skb = sk->sk_write_queue.next;
15721da177e4SLinus Torvalds 		packet_cnt = 0;
15736a438bbeSStephen Hemminger 	}
15741da177e4SLinus Torvalds 
15756a438bbeSStephen Hemminger 	sk_stream_for_retrans_queue_from(skb, sk) {
15766a438bbeSStephen Hemminger 		tp->forward_cnt_hint = packet_cnt;
15776a438bbeSStephen Hemminger 		tp->forward_skb_hint = skb;
15786a438bbeSStephen Hemminger 
15791da177e4SLinus Torvalds 		/* Similar to the retransmit loop above we
15801da177e4SLinus Torvalds 		 * can pretend that the retransmitted SKB
15811da177e4SLinus Torvalds 		 * we send out here will be composed of one
15821da177e4SLinus Torvalds 		 * real MSS sized packet because tcp_retransmit_skb()
15831da177e4SLinus Torvalds 		 * will fragment it if necessary.
15841da177e4SLinus Torvalds 		 */
15851da177e4SLinus Torvalds 		if (++packet_cnt > tp->fackets_out)
15861da177e4SLinus Torvalds 			break;
15871da177e4SLinus Torvalds 
15881da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
15891da177e4SLinus Torvalds 			break;
15901da177e4SLinus Torvalds 
15911da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
15921da177e4SLinus Torvalds 			continue;
15931da177e4SLinus Torvalds 
15941da177e4SLinus Torvalds 		/* Ok, retransmit it. */
15956a438bbeSStephen Hemminger 		if (tcp_retransmit_skb(sk, skb)) {
15966a438bbeSStephen Hemminger 			tp->forward_skb_hint = NULL;
15971da177e4SLinus Torvalds 			break;
15986a438bbeSStephen Hemminger 		}
15991da177e4SLinus Torvalds 
16001da177e4SLinus Torvalds 		if (skb == skb_peek(&sk->sk_write_queue))
16013f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
16023f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
16033f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
16041da177e4SLinus Torvalds 
16051da177e4SLinus Torvalds 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
16061da177e4SLinus Torvalds 	}
16071da177e4SLinus Torvalds }
16081da177e4SLinus Torvalds 
16091da177e4SLinus Torvalds 
16101da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
16111da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
16121da177e4SLinus Torvalds  */
16131da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
16141da177e4SLinus Torvalds {
16151da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
16161da177e4SLinus Torvalds 	struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
16171da177e4SLinus Torvalds 	int mss_now;
16181da177e4SLinus Torvalds 
16191da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
16201da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
16211da177e4SLinus Torvalds 	 * and IP options.
16221da177e4SLinus Torvalds 	 */
16231da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, 1);
16241da177e4SLinus Torvalds 
16251da177e4SLinus Torvalds 	if (sk->sk_send_head != NULL) {
16261da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
16271da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
16281da177e4SLinus Torvalds 		tp->write_seq++;
16291da177e4SLinus Torvalds 	} else {
16301da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
16311da177e4SLinus Torvalds 		for (;;) {
1632d179cd12SDavid S. Miller 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
16331da177e4SLinus Torvalds 			if (skb)
16341da177e4SLinus Torvalds 				break;
16351da177e4SLinus Torvalds 			yield();
16361da177e4SLinus Torvalds 		}
16371da177e4SLinus Torvalds 
16381da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
16391da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
16401da177e4SLinus Torvalds 		skb->csum = 0;
16411da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
16421da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked = 0;
16431da177e4SLinus Torvalds 		skb_shinfo(skb)->tso_segs = 1;
16441da177e4SLinus Torvalds 		skb_shinfo(skb)->tso_size = 0;
16451da177e4SLinus Torvalds 
16461da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
16471da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->seq = tp->write_seq;
16481da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
16491da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
16501da177e4SLinus Torvalds 	}
16511da177e4SLinus Torvalds 	__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
16521da177e4SLinus Torvalds }
16531da177e4SLinus Torvalds 
16541da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
16551da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
16561da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
16571da177e4SLinus Torvalds  * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
16581da177e4SLinus Torvalds  */
1659dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
16601da177e4SLinus Torvalds {
16611da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
16621da177e4SLinus Torvalds 	struct sk_buff *skb;
16631da177e4SLinus Torvalds 
16641da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
16651da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
16661da177e4SLinus Torvalds 	if (!skb) {
16671da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
16681da177e4SLinus Torvalds 		return;
16691da177e4SLinus Torvalds 	}
16701da177e4SLinus Torvalds 
16711da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
16721da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
16731da177e4SLinus Torvalds 	skb->csum = 0;
16741da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
16751da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
16761da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_segs = 1;
16771da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_size = 0;
16781da177e4SLinus Torvalds 
16791da177e4SLinus Torvalds 	/* Send it off. */
16801da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
16811da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
16821da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1683*dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
16841da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
16851da177e4SLinus Torvalds }
16861da177e4SLinus Torvalds 
16871da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent
16881da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
16891da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
16901da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
16911da177e4SLinus Torvalds  */
16921da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
16931da177e4SLinus Torvalds {
16941da177e4SLinus Torvalds 	struct sk_buff* skb;
16951da177e4SLinus Torvalds 
16961da177e4SLinus Torvalds 	skb = skb_peek(&sk->sk_write_queue);
16971da177e4SLinus Torvalds 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
16981da177e4SLinus Torvalds 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
16991da177e4SLinus Torvalds 		return -EFAULT;
17001da177e4SLinus Torvalds 	}
17011da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
17021da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
17031da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
17041da177e4SLinus Torvalds 			if (nskb == NULL)
17051da177e4SLinus Torvalds 				return -ENOMEM;
17061da177e4SLinus Torvalds 			__skb_unlink(skb, &sk->sk_write_queue);
17071da177e4SLinus Torvalds 			skb_header_release(nskb);
17081da177e4SLinus Torvalds 			__skb_queue_head(&sk->sk_write_queue, nskb);
17091da177e4SLinus Torvalds 			sk_stream_free_skb(sk, skb);
17101da177e4SLinus Torvalds 			sk_charge_skb(sk, nskb);
17111da177e4SLinus Torvalds 			skb = nskb;
17121da177e4SLinus Torvalds 		}
17131da177e4SLinus Torvalds 
17141da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
17151da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
17161da177e4SLinus Torvalds 	}
17171da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1718*dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
17191da177e4SLinus Torvalds }
17201da177e4SLinus Torvalds 
17211da177e4SLinus Torvalds /*
17221da177e4SLinus Torvalds  * Prepare a SYN-ACK.
17231da177e4SLinus Torvalds  */
17241da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
172560236fddSArnaldo Carvalho de Melo 				 struct request_sock *req)
17261da177e4SLinus Torvalds {
17272e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
17281da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17291da177e4SLinus Torvalds 	struct tcphdr *th;
17301da177e4SLinus Torvalds 	int tcp_header_size;
17311da177e4SLinus Torvalds 	struct sk_buff *skb;
17321da177e4SLinus Torvalds 
17331da177e4SLinus Torvalds 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
17341da177e4SLinus Torvalds 	if (skb == NULL)
17351da177e4SLinus Torvalds 		return NULL;
17361da177e4SLinus Torvalds 
17371da177e4SLinus Torvalds 	/* Reserve space for headers. */
17381da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
17391da177e4SLinus Torvalds 
17401da177e4SLinus Torvalds 	skb->dst = dst_clone(dst);
17411da177e4SLinus Torvalds 
17421da177e4SLinus Torvalds 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
17432e6599cbSArnaldo Carvalho de Melo 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
17442e6599cbSArnaldo Carvalho de Melo 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
17451da177e4SLinus Torvalds 			   /* SACK_PERM is in the place of NOP NOP of TS */
17462e6599cbSArnaldo Carvalho de Melo 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
17471da177e4SLinus Torvalds 	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
17481da177e4SLinus Torvalds 
17491da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
17501da177e4SLinus Torvalds 	th->syn = 1;
17511da177e4SLinus Torvalds 	th->ack = 1;
17521da177e4SLinus Torvalds 	if (dst->dev->features&NETIF_F_TSO)
17532e6599cbSArnaldo Carvalho de Melo 		ireq->ecn_ok = 0;
17541da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
17551da177e4SLinus Torvalds 	th->source = inet_sk(sk)->sport;
17562e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
17572e6599cbSArnaldo Carvalho de Melo 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
17581da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
17591da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
17601da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_segs = 1;
17611da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_size = 0;
17621da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
17632e6599cbSArnaldo Carvalho de Melo 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
17641da177e4SLinus Torvalds 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
17651da177e4SLinus Torvalds 		__u8 rcv_wscale;
17661da177e4SLinus Torvalds 		/* Set this up on the first call only */
17671da177e4SLinus Torvalds 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
17681da177e4SLinus Torvalds 		/* tcp_full_space because it is guaranteed to be the first packet */
17691da177e4SLinus Torvalds 		tcp_select_initial_window(tcp_full_space(sk),
17702e6599cbSArnaldo Carvalho de Melo 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
17711da177e4SLinus Torvalds 			&req->rcv_wnd,
17721da177e4SLinus Torvalds 			&req->window_clamp,
17732e6599cbSArnaldo Carvalho de Melo 			ireq->wscale_ok,
17741da177e4SLinus Torvalds 			&rcv_wscale);
17752e6599cbSArnaldo Carvalho de Melo 		ireq->rcv_wscale = rcv_wscale;
17761da177e4SLinus Torvalds 	}
17771da177e4SLinus Torvalds 
17781da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
17791da177e4SLinus Torvalds 	th->window = htons(req->rcv_wnd);
17801da177e4SLinus Torvalds 
17811da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
17822e6599cbSArnaldo Carvalho de Melo 	tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
17832e6599cbSArnaldo Carvalho de Melo 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
17841da177e4SLinus Torvalds 			      TCP_SKB_CB(skb)->when,
17851da177e4SLinus Torvalds 			      req->ts_recent);
17861da177e4SLinus Torvalds 
17871da177e4SLinus Torvalds 	skb->csum = 0;
17881da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
17891da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
17901da177e4SLinus Torvalds 	return skb;
17911da177e4SLinus Torvalds }
17921da177e4SLinus Torvalds 
17931da177e4SLinus Torvalds /*
17941da177e4SLinus Torvalds  * Do all connect socket setups that can be done AF independent.
17951da177e4SLinus Torvalds  */
17961da177e4SLinus Torvalds static inline void tcp_connect_init(struct sock *sk)
17971da177e4SLinus Torvalds {
17981da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
17991da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18001da177e4SLinus Torvalds 	__u8 rcv_wscale;
18011da177e4SLinus Torvalds 
18021da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
18031da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
18041da177e4SLinus Torvalds 	 */
18051da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
18061da177e4SLinus Torvalds 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
18071da177e4SLinus Torvalds 
18081da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
18091da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
18101da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
18111da177e4SLinus Torvalds 	tp->max_window = 0;
18121da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
18131da177e4SLinus Torvalds 
18141da177e4SLinus Torvalds 	if (!tp->window_clamp)
18151da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
18161da177e4SLinus Torvalds 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
18171da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
18181da177e4SLinus Torvalds 
18191da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
18201da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
18211da177e4SLinus Torvalds 				  &tp->rcv_wnd,
18221da177e4SLinus Torvalds 				  &tp->window_clamp,
18231da177e4SLinus Torvalds 				  sysctl_tcp_window_scaling,
18241da177e4SLinus Torvalds 				  &rcv_wscale);
18251da177e4SLinus Torvalds 
18261da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
18271da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
18281da177e4SLinus Torvalds 
18291da177e4SLinus Torvalds 	sk->sk_err = 0;
18301da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
18311da177e4SLinus Torvalds 	tp->snd_wnd = 0;
18321da177e4SLinus Torvalds 	tcp_init_wl(tp, tp->write_seq, 0);
18331da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
18341da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
18351da177e4SLinus Torvalds 	tp->rcv_nxt = 0;
18361da177e4SLinus Torvalds 	tp->rcv_wup = 0;
18371da177e4SLinus Torvalds 	tp->copied_seq = 0;
18381da177e4SLinus Torvalds 
1839463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
1840463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
18411da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
18421da177e4SLinus Torvalds }
18431da177e4SLinus Torvalds 
18441da177e4SLinus Torvalds /*
18451da177e4SLinus Torvalds  * Build a SYN and send it off.
18461da177e4SLinus Torvalds  */
18471da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
18481da177e4SLinus Torvalds {
18491da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18501da177e4SLinus Torvalds 	struct sk_buff *buff;
18511da177e4SLinus Torvalds 
18521da177e4SLinus Torvalds 	tcp_connect_init(sk);
18531da177e4SLinus Torvalds 
1854d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
18551da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
18561da177e4SLinus Torvalds 		return -ENOBUFS;
18571da177e4SLinus Torvalds 
18581da177e4SLinus Torvalds 	/* Reserve space for headers. */
18591da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
18601da177e4SLinus Torvalds 
18611da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
18621da177e4SLinus Torvalds 	TCP_ECN_send_syn(sk, tp, buff);
18631da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->sacked = 0;
18641da177e4SLinus Torvalds 	skb_shinfo(buff)->tso_segs = 1;
18651da177e4SLinus Torvalds 	skb_shinfo(buff)->tso_size = 0;
18661da177e4SLinus Torvalds 	buff->csum = 0;
18671da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
18681da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
18691da177e4SLinus Torvalds 	tp->snd_nxt = tp->write_seq;
18701da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
18711da177e4SLinus Torvalds 
18721da177e4SLinus Torvalds 	/* Send it off. */
18731da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
18741da177e4SLinus Torvalds 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
18751da177e4SLinus Torvalds 	skb_header_release(buff);
18761da177e4SLinus Torvalds 	__skb_queue_tail(&sk->sk_write_queue, buff);
18771da177e4SLinus Torvalds 	sk_charge_skb(sk, buff);
18781da177e4SLinus Torvalds 	tp->packets_out += tcp_skb_pcount(buff);
1879*dfb4b9dcSDavid S. Miller 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
18801da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
18811da177e4SLinus Torvalds 
18821da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
18833f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
18843f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
18851da177e4SLinus Torvalds 	return 0;
18861da177e4SLinus Torvalds }
18871da177e4SLinus Torvalds 
18881da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
18891da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
18901da177e4SLinus Torvalds  * for details.
18911da177e4SLinus Torvalds  */
18921da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
18931da177e4SLinus Torvalds {
1894463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
1895463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
18961da177e4SLinus Torvalds 	unsigned long timeout;
18971da177e4SLinus Torvalds 
18981da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
1899463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
19001da177e4SLinus Torvalds 		int max_ato = HZ/2;
19011da177e4SLinus Torvalds 
1902463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
19031da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
19041da177e4SLinus Torvalds 
19051da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
1908463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
19091da177e4SLinus Torvalds 		 * directly.
19101da177e4SLinus Torvalds 		 */
19111da177e4SLinus Torvalds 		if (tp->srtt) {
19121da177e4SLinus Torvalds 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
19131da177e4SLinus Torvalds 
19141da177e4SLinus Torvalds 			if (rtt < max_ato)
19151da177e4SLinus Torvalds 				max_ato = rtt;
19161da177e4SLinus Torvalds 		}
19171da177e4SLinus Torvalds 
19181da177e4SLinus Torvalds 		ato = min(ato, max_ato);
19191da177e4SLinus Torvalds 	}
19201da177e4SLinus Torvalds 
19211da177e4SLinus Torvalds 	/* Stay within the limit we were given */
19221da177e4SLinus Torvalds 	timeout = jiffies + ato;
19231da177e4SLinus Torvalds 
19241da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
1925463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
19261da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
19271da177e4SLinus Torvalds 		 * send ACK now.
19281da177e4SLinus Torvalds 		 */
1929463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
1930463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
19311da177e4SLinus Torvalds 			tcp_send_ack(sk);
19321da177e4SLinus Torvalds 			return;
19331da177e4SLinus Torvalds 		}
19341da177e4SLinus Torvalds 
1935463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
1936463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
19371da177e4SLinus Torvalds 	}
1938463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
1939463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
1940463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
19411da177e4SLinus Torvalds }
19421da177e4SLinus Torvalds 
19431da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
19441da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
19451da177e4SLinus Torvalds {
19461da177e4SLinus Torvalds 	/* If we have been reset, we may not send again. */
19471da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
19481da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
19491da177e4SLinus Torvalds 		struct sk_buff *buff;
19501da177e4SLinus Torvalds 
19511da177e4SLinus Torvalds 		/* We are not putting this on the write queue, so
19521da177e4SLinus Torvalds 		 * tcp_transmit_skb() will set the ownership to this
19531da177e4SLinus Torvalds 		 * sock.
19541da177e4SLinus Torvalds 		 */
19551da177e4SLinus Torvalds 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
19561da177e4SLinus Torvalds 		if (buff == NULL) {
1957463c84b9SArnaldo Carvalho de Melo 			inet_csk_schedule_ack(sk);
1958463c84b9SArnaldo Carvalho de Melo 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
19593f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
19603f421baaSArnaldo Carvalho de Melo 						  TCP_DELACK_MAX, TCP_RTO_MAX);
19611da177e4SLinus Torvalds 			return;
19621da177e4SLinus Torvalds 		}
19631da177e4SLinus Torvalds 
19641da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
19651da177e4SLinus Torvalds 		skb_reserve(buff, MAX_TCP_HEADER);
19661da177e4SLinus Torvalds 		buff->csum = 0;
19671da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
19681da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->sacked = 0;
19691da177e4SLinus Torvalds 		skb_shinfo(buff)->tso_segs = 1;
19701da177e4SLinus Torvalds 		skb_shinfo(buff)->tso_size = 0;
19711da177e4SLinus Torvalds 
19721da177e4SLinus Torvalds 		/* Send it off, this clears delayed acks for us. */
19731da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
19741da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
1975*dfb4b9dcSDavid S. Miller 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
19761da177e4SLinus Torvalds 	}
19771da177e4SLinus Torvalds }
19781da177e4SLinus Torvalds 
19791da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
19801da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
19811da177e4SLinus Torvalds  *
19821da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
19831da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
19841da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
19851da177e4SLinus Torvalds  *
19861da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
19871da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
19881da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
19891da177e4SLinus Torvalds  */
19901da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
19911da177e4SLinus Torvalds {
19921da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
19931da177e4SLinus Torvalds 	struct sk_buff *skb;
19941da177e4SLinus Torvalds 
19951da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
19961da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
19971da177e4SLinus Torvalds 	if (skb == NULL)
19981da177e4SLinus Torvalds 		return -1;
19991da177e4SLinus Torvalds 
20001da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
20011da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
20021da177e4SLinus Torvalds 	skb->csum = 0;
20031da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
20041da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = urgent;
20051da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_segs = 1;
20061da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_size = 0;
20071da177e4SLinus Torvalds 
20081da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
20091da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
20101da177e4SLinus Torvalds 	 * send it.
20111da177e4SLinus Torvalds 	 */
20121da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
20131da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
20141da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2015*dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
20161da177e4SLinus Torvalds }
20171da177e4SLinus Torvalds 
20181da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
20191da177e4SLinus Torvalds {
20201da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
20211da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
20221da177e4SLinus Torvalds 		struct sk_buff *skb;
20231da177e4SLinus Torvalds 
20241da177e4SLinus Torvalds 		if ((skb = sk->sk_send_head) != NULL &&
20251da177e4SLinus Torvalds 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
20261da177e4SLinus Torvalds 			int err;
20271da177e4SLinus Torvalds 			unsigned int mss = tcp_current_mss(sk, 0);
20281da177e4SLinus Torvalds 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
20291da177e4SLinus Torvalds 
20301da177e4SLinus Torvalds 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
20311da177e4SLinus Torvalds 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
20321da177e4SLinus Torvalds 
20331da177e4SLinus Torvalds 			/* We are probing the opening of a window
20341da177e4SLinus Torvalds 			 * but the window size is != 0
20351da177e4SLinus Torvalds 			 * must have been a result SWS avoidance ( sender )
20361da177e4SLinus Torvalds 			 */
20371da177e4SLinus Torvalds 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
20381da177e4SLinus Torvalds 			    skb->len > mss) {
20391da177e4SLinus Torvalds 				seg_size = min(seg_size, mss);
20401da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2041846998aeSDavid S. Miller 				if (tcp_fragment(sk, skb, seg_size, mss))
20421da177e4SLinus Torvalds 					return -1;
20431da177e4SLinus Torvalds 			} else if (!tcp_skb_pcount(skb))
2044846998aeSDavid S. Miller 				tcp_set_skb_tso_segs(sk, skb, mss);
20451da177e4SLinus Torvalds 
20461da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
20471da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2048*dfb4b9dcSDavid S. Miller 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
20491da177e4SLinus Torvalds 			if (!err) {
20501da177e4SLinus Torvalds 				update_send_head(sk, tp, skb);
20511da177e4SLinus Torvalds 			}
20521da177e4SLinus Torvalds 			return err;
20531da177e4SLinus Torvalds 		} else {
20541da177e4SLinus Torvalds 			if (tp->urg_mode &&
20551da177e4SLinus Torvalds 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
20561da177e4SLinus Torvalds 				tcp_xmit_probe_skb(sk, TCPCB_URG);
20571da177e4SLinus Torvalds 			return tcp_xmit_probe_skb(sk, 0);
20581da177e4SLinus Torvalds 		}
20591da177e4SLinus Torvalds 	}
20601da177e4SLinus Torvalds 	return -1;
20611da177e4SLinus Torvalds }
20621da177e4SLinus Torvalds 
20631da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
20641da177e4SLinus Torvalds  * a partial packet else a zero probe.
20651da177e4SLinus Torvalds  */
20661da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
20671da177e4SLinus Torvalds {
2068463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
20691da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20701da177e4SLinus Torvalds 	int err;
20711da177e4SLinus Torvalds 
20721da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
20731da177e4SLinus Torvalds 
20741da177e4SLinus Torvalds 	if (tp->packets_out || !sk->sk_send_head) {
20751da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
20766687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
2077463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
20781da177e4SLinus Torvalds 		return;
20791da177e4SLinus Torvalds 	}
20801da177e4SLinus Torvalds 
20811da177e4SLinus Torvalds 	if (err <= 0) {
2082463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2083463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
20846687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
2085463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
20863f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
20873f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
20881da177e4SLinus Torvalds 	} else {
20891da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
20906687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
20911da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
20921da177e4SLinus Torvalds 		 *
20931da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
20941da177e4SLinus Torvalds 		 */
20956687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
20966687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
2097463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2098463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
20993f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
21003f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
21011da177e4SLinus Torvalds 	}
21021da177e4SLinus Torvalds }
21031da177e4SLinus Torvalds 
21041da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect);
21051da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack);
21061da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit);
21071da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss);
2108f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
2109