xref: /linux/net/ipv4/tcp_output.c (revision 0e7b13685f9a06949ea3070c97c0f0085a08cd37)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  */
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds /*
241da177e4SLinus Torvalds  * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
251da177e4SLinus Torvalds  *				:	Fragmentation on mtu decrease
261da177e4SLinus Torvalds  *				:	Segment collapse on retransmit
271da177e4SLinus Torvalds  *				:	AF independence
281da177e4SLinus Torvalds  *
291da177e4SLinus Torvalds  *		Linus Torvalds	:	send_delayed_ack
301da177e4SLinus Torvalds  *		David S. Miller	:	Charge memory using the right skb
311da177e4SLinus Torvalds  *					during syn/ack processing.
321da177e4SLinus Torvalds  *		David S. Miller :	Output engine completely rewritten.
331da177e4SLinus Torvalds  *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
341da177e4SLinus Torvalds  *		Cacophonix Gaul :	draft-minshall-nagle-01
351da177e4SLinus Torvalds  *		J Hadi Salim	:	ECN support
361da177e4SLinus Torvalds  *
371da177e4SLinus Torvalds  */
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds #include <net/tcp.h>
401da177e4SLinus Torvalds 
411da177e4SLinus Torvalds #include <linux/compiler.h>
421da177e4SLinus Torvalds #include <linux/module.h>
431da177e4SLinus Torvalds #include <linux/smp_lock.h>
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */
461da177e4SLinus Torvalds int sysctl_tcp_retrans_collapse = 1;
471da177e4SLinus Torvalds 
481da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we
491da177e4SLinus Torvalds  * will allow a single TSO frame to consume.  Building TSO frames
501da177e4SLinus Torvalds  * which are too large can cause TCP streams to be bursty.
511da177e4SLinus Torvalds  */
52c1b4a7e6SDavid S. Miller int sysctl_tcp_tso_win_divisor = 3;
531da177e4SLinus Torvalds 
545d424d5aSJohn Heffner int sysctl_tcp_mtu_probing = 0;
555d424d5aSJohn Heffner int sysctl_tcp_base_mss = 512;
565d424d5aSJohn Heffner 
575d424d5aSJohn Heffner EXPORT_SYMBOL(sysctl_tcp_mtu_probing);
585d424d5aSJohn Heffner EXPORT_SYMBOL(sysctl_tcp_base_mss);
595d424d5aSJohn Heffner 
6040efc6faSStephen Hemminger static void update_send_head(struct sock *sk, struct tcp_sock *tp,
611da177e4SLinus Torvalds 			     struct sk_buff *skb)
621da177e4SLinus Torvalds {
631da177e4SLinus Torvalds 	sk->sk_send_head = skb->next;
641da177e4SLinus Torvalds 	if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
651da177e4SLinus Torvalds 		sk->sk_send_head = NULL;
661da177e4SLinus Torvalds 	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
671da177e4SLinus Torvalds 	tcp_packets_out_inc(sk, tp, skb);
681da177e4SLinus Torvalds }
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk.
711da177e4SLinus Torvalds  * If window has been shrunk, what should we make? It is not clear at all.
721da177e4SLinus Torvalds  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
731da177e4SLinus Torvalds  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
741da177e4SLinus Torvalds  * invalid. OK, let's make this for now:
751da177e4SLinus Torvalds  */
761da177e4SLinus Torvalds static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp)
771da177e4SLinus Torvalds {
781da177e4SLinus Torvalds 	if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
791da177e4SLinus Torvalds 		return tp->snd_nxt;
801da177e4SLinus Torvalds 	else
811da177e4SLinus Torvalds 		return tp->snd_una+tp->snd_wnd;
821da177e4SLinus Torvalds }
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment.
851da177e4SLinus Torvalds  * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
861da177e4SLinus Torvalds  *
871da177e4SLinus Torvalds  * 1. It is independent of path mtu.
881da177e4SLinus Torvalds  * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
891da177e4SLinus Torvalds  * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
901da177e4SLinus Torvalds  *    attached devices, because some buggy hosts are confused by
911da177e4SLinus Torvalds  *    large MSS.
921da177e4SLinus Torvalds  * 4. We do not make 3, we advertise MSS, calculated from first
931da177e4SLinus Torvalds  *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
941da177e4SLinus Torvalds  *    This may be overridden via information stored in routing table.
951da177e4SLinus Torvalds  * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
961da177e4SLinus Torvalds  *    probably even Jumbo".
971da177e4SLinus Torvalds  */
981da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk)
991da177e4SLinus Torvalds {
1001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1011da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
1021da177e4SLinus Torvalds 	int mss = tp->advmss;
1031da177e4SLinus Torvalds 
1041da177e4SLinus Torvalds 	if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
1051da177e4SLinus Torvalds 		mss = dst_metric(dst, RTAX_ADVMSS);
1061da177e4SLinus Torvalds 		tp->advmss = mss;
1071da177e4SLinus Torvalds 	}
1081da177e4SLinus Torvalds 
1091da177e4SLinus Torvalds 	return (__u16)mss;
1101da177e4SLinus Torvalds }
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
1131da177e4SLinus Torvalds  * This is the first part of cwnd validation mechanism. */
114463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
1151da177e4SLinus Torvalds {
116463c84b9SArnaldo Carvalho de Melo 	struct tcp_sock *tp = tcp_sk(sk);
1171da177e4SLinus Torvalds 	s32 delta = tcp_time_stamp - tp->lsndtime;
1181da177e4SLinus Torvalds 	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
1191da177e4SLinus Torvalds 	u32 cwnd = tp->snd_cwnd;
1201da177e4SLinus Torvalds 
1216687e988SArnaldo Carvalho de Melo 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1221da177e4SLinus Torvalds 
1236687e988SArnaldo Carvalho de Melo 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
1241da177e4SLinus Torvalds 	restart_cwnd = min(restart_cwnd, cwnd);
1251da177e4SLinus Torvalds 
126463c84b9SArnaldo Carvalho de Melo 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1271da177e4SLinus Torvalds 		cwnd >>= 1;
1281da177e4SLinus Torvalds 	tp->snd_cwnd = max(cwnd, restart_cwnd);
1291da177e4SLinus Torvalds 	tp->snd_cwnd_stamp = tcp_time_stamp;
1301da177e4SLinus Torvalds 	tp->snd_cwnd_used = 0;
1311da177e4SLinus Torvalds }
1321da177e4SLinus Torvalds 
13340efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp,
1341da177e4SLinus Torvalds 				struct sk_buff *skb, struct sock *sk)
1351da177e4SLinus Torvalds {
136463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
137463c84b9SArnaldo Carvalho de Melo 	const u32 now = tcp_time_stamp;
1381da177e4SLinus Torvalds 
139463c84b9SArnaldo Carvalho de Melo 	if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
140463c84b9SArnaldo Carvalho de Melo 		tcp_cwnd_restart(sk, __sk_dst_get(sk));
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds 	tp->lsndtime = now;
1431da177e4SLinus Torvalds 
1441da177e4SLinus Torvalds 	/* If it is a reply for ato after last received
1451da177e4SLinus Torvalds 	 * packet, enter pingpong mode.
1461da177e4SLinus Torvalds 	 */
147463c84b9SArnaldo Carvalho de Melo 	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
148463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.pingpong = 1;
1491da177e4SLinus Torvalds }
1501da177e4SLinus Torvalds 
15140efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1521da177e4SLinus Torvalds {
153463c84b9SArnaldo Carvalho de Melo 	tcp_dec_quickack_mode(sk, pkts);
154463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1551da177e4SLinus Torvalds }
1561da177e4SLinus Torvalds 
1571da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer.
1581da177e4SLinus Torvalds  * Based on the assumption that the given amount of space
1591da177e4SLinus Torvalds  * will be offered. Store the results in the tp structure.
1601da177e4SLinus Torvalds  * NOTE: for smooth operation initial space offering should
1611da177e4SLinus Torvalds  * be a multiple of mss if possible. We assume here that mss >= 1.
1621da177e4SLinus Torvalds  * This MUST be enforced by all callers.
1631da177e4SLinus Torvalds  */
1641da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss,
1651da177e4SLinus Torvalds 			       __u32 *rcv_wnd, __u32 *window_clamp,
1661da177e4SLinus Torvalds 			       int wscale_ok, __u8 *rcv_wscale)
1671da177e4SLinus Torvalds {
1681da177e4SLinus Torvalds 	unsigned int space = (__space < 0 ? 0 : __space);
1691da177e4SLinus Torvalds 
1701da177e4SLinus Torvalds 	/* If no clamp set the clamp to the max possible scaled window */
1711da177e4SLinus Torvalds 	if (*window_clamp == 0)
1721da177e4SLinus Torvalds 		(*window_clamp) = (65535 << 14);
1731da177e4SLinus Torvalds 	space = min(*window_clamp, space);
1741da177e4SLinus Torvalds 
1751da177e4SLinus Torvalds 	/* Quantize space offering to a multiple of mss if possible. */
1761da177e4SLinus Torvalds 	if (space > mss)
1771da177e4SLinus Torvalds 		space = (space / mss) * mss;
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds 	/* NOTE: offering an initial window larger than 32767
1801da177e4SLinus Torvalds 	 * will break some buggy TCP stacks. We try to be nice.
1811da177e4SLinus Torvalds 	 * If we are not window scaling, then this truncates
1821da177e4SLinus Torvalds 	 * our initial window offering to 32k. There should also
1831da177e4SLinus Torvalds 	 * be a sysctl option to stop being nice.
1841da177e4SLinus Torvalds 	 */
1851da177e4SLinus Torvalds 	(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1861da177e4SLinus Torvalds 	(*rcv_wscale) = 0;
1871da177e4SLinus Torvalds 	if (wscale_ok) {
1881da177e4SLinus Torvalds 		/* Set window scaling on max possible window
1891da177e4SLinus Torvalds 		 * See RFC1323 for an explanation of the limit to 14
1901da177e4SLinus Torvalds 		 */
1911da177e4SLinus Torvalds 		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
1921da177e4SLinus Torvalds 		while (space > 65535 && (*rcv_wscale) < 14) {
1931da177e4SLinus Torvalds 			space >>= 1;
1941da177e4SLinus Torvalds 			(*rcv_wscale)++;
1951da177e4SLinus Torvalds 		}
1961da177e4SLinus Torvalds 	}
1971da177e4SLinus Torvalds 
1981da177e4SLinus Torvalds 	/* Set initial window to value enough for senders,
1996b251858SDavid S. Miller 	 * following RFC2414. Senders, not following this RFC,
2001da177e4SLinus Torvalds 	 * will be satisfied with 2.
2011da177e4SLinus Torvalds 	 */
2021da177e4SLinus Torvalds 	if (mss > (1<<*rcv_wscale)) {
20301ff367eSDavid S. Miller 		int init_cwnd = 4;
20401ff367eSDavid S. Miller 		if (mss > 1460*3)
2051da177e4SLinus Torvalds 			init_cwnd = 2;
20601ff367eSDavid S. Miller 		else if (mss > 1460)
20701ff367eSDavid S. Miller 			init_cwnd = 3;
2081da177e4SLinus Torvalds 		if (*rcv_wnd > init_cwnd*mss)
2091da177e4SLinus Torvalds 			*rcv_wnd = init_cwnd*mss;
2101da177e4SLinus Torvalds 	}
2111da177e4SLinus Torvalds 
2121da177e4SLinus Torvalds 	/* Set the clamp no higher than max representable value */
2131da177e4SLinus Torvalds 	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
2141da177e4SLinus Torvalds }
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the
2171da177e4SLinus Torvalds  * socket, and return result with RFC1323 scaling applied.  The return
2181da177e4SLinus Torvalds  * value can be stuffed directly into th->window for an outgoing
2191da177e4SLinus Torvalds  * frame.
2201da177e4SLinus Torvalds  */
22140efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk)
2221da177e4SLinus Torvalds {
2231da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2241da177e4SLinus Torvalds 	u32 cur_win = tcp_receive_window(tp);
2251da177e4SLinus Torvalds 	u32 new_win = __tcp_select_window(sk);
2261da177e4SLinus Torvalds 
2271da177e4SLinus Torvalds 	/* Never shrink the offered window */
2281da177e4SLinus Torvalds 	if(new_win < cur_win) {
2291da177e4SLinus Torvalds 		/* Danger Will Robinson!
2301da177e4SLinus Torvalds 		 * Don't update rcv_wup/rcv_wnd here or else
2311da177e4SLinus Torvalds 		 * we will not be able to advertise a zero
2321da177e4SLinus Torvalds 		 * window in time.  --DaveM
2331da177e4SLinus Torvalds 		 *
2341da177e4SLinus Torvalds 		 * Relax Will Robinson.
2351da177e4SLinus Torvalds 		 */
2361da177e4SLinus Torvalds 		new_win = cur_win;
2371da177e4SLinus Torvalds 	}
2381da177e4SLinus Torvalds 	tp->rcv_wnd = new_win;
2391da177e4SLinus Torvalds 	tp->rcv_wup = tp->rcv_nxt;
2401da177e4SLinus Torvalds 
2411da177e4SLinus Torvalds 	/* Make sure we do not exceed the maximum possible
2421da177e4SLinus Torvalds 	 * scaled window.
2431da177e4SLinus Torvalds 	 */
2441da177e4SLinus Torvalds 	if (!tp->rx_opt.rcv_wscale)
2451da177e4SLinus Torvalds 		new_win = min(new_win, MAX_TCP_WINDOW);
2461da177e4SLinus Torvalds 	else
2471da177e4SLinus Torvalds 		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
2481da177e4SLinus Torvalds 
2491da177e4SLinus Torvalds 	/* RFC1323 scaling applied */
2501da177e4SLinus Torvalds 	new_win >>= tp->rx_opt.rcv_wscale;
2511da177e4SLinus Torvalds 
2521da177e4SLinus Torvalds 	/* If we advertise zero window, disable fast path. */
2531da177e4SLinus Torvalds 	if (new_win == 0)
2541da177e4SLinus Torvalds 		tp->pred_flags = 0;
2551da177e4SLinus Torvalds 
2561da177e4SLinus Torvalds 	return new_win;
2571da177e4SLinus Torvalds }
2581da177e4SLinus Torvalds 
25940efc6faSStephen Hemminger static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp,
26040efc6faSStephen Hemminger 					 __u32 tstamp)
26140efc6faSStephen Hemminger {
26240efc6faSStephen Hemminger 	if (tp->rx_opt.tstamp_ok) {
26340efc6faSStephen Hemminger 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
26440efc6faSStephen Hemminger 					  (TCPOPT_NOP << 16) |
26540efc6faSStephen Hemminger 					  (TCPOPT_TIMESTAMP << 8) |
26640efc6faSStephen Hemminger 					  TCPOLEN_TIMESTAMP);
26740efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);
26840efc6faSStephen Hemminger 		*ptr++ = htonl(tp->rx_opt.ts_recent);
26940efc6faSStephen Hemminger 	}
27040efc6faSStephen Hemminger 	if (tp->rx_opt.eff_sacks) {
27140efc6faSStephen Hemminger 		struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
27240efc6faSStephen Hemminger 		int this_sack;
27340efc6faSStephen Hemminger 
27440efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP  << 24) |
27540efc6faSStephen Hemminger 			       (TCPOPT_NOP  << 16) |
27640efc6faSStephen Hemminger 			       (TCPOPT_SACK <<  8) |
27740efc6faSStephen Hemminger 			       (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
27840efc6faSStephen Hemminger 						     TCPOLEN_SACK_PERBLOCK)));
27940efc6faSStephen Hemminger 		for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
28040efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].start_seq);
28140efc6faSStephen Hemminger 			*ptr++ = htonl(sp[this_sack].end_seq);
28240efc6faSStephen Hemminger 		}
28340efc6faSStephen Hemminger 		if (tp->rx_opt.dsack) {
28440efc6faSStephen Hemminger 			tp->rx_opt.dsack = 0;
28540efc6faSStephen Hemminger 			tp->rx_opt.eff_sacks--;
28640efc6faSStephen Hemminger 		}
28740efc6faSStephen Hemminger 	}
28840efc6faSStephen Hemminger }
28940efc6faSStephen Hemminger 
29040efc6faSStephen Hemminger /* Construct a tcp options header for a SYN or SYN_ACK packet.
29140efc6faSStephen Hemminger  * If this is every changed make sure to change the definition of
29240efc6faSStephen Hemminger  * MAX_SYN_SIZE to match the new maximum number of options that you
29340efc6faSStephen Hemminger  * can generate.
29440efc6faSStephen Hemminger  */
29540efc6faSStephen Hemminger static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
29640efc6faSStephen Hemminger 				  int offer_wscale, int wscale, __u32 tstamp,
29740efc6faSStephen Hemminger 				  __u32 ts_recent)
29840efc6faSStephen Hemminger {
29940efc6faSStephen Hemminger 	/* We always get an MSS option.
30040efc6faSStephen Hemminger 	 * The option bytes which will be seen in normal data
30140efc6faSStephen Hemminger 	 * packets should timestamps be used, must be in the MSS
30240efc6faSStephen Hemminger 	 * advertised.  But we subtract them from tp->mss_cache so
30340efc6faSStephen Hemminger 	 * that calculations in tcp_sendmsg are simpler etc.
30440efc6faSStephen Hemminger 	 * So account for this fact here if necessary.  If we
30540efc6faSStephen Hemminger 	 * don't do this correctly, as a receiver we won't
30640efc6faSStephen Hemminger 	 * recognize data packets as being full sized when we
30740efc6faSStephen Hemminger 	 * should, and thus we won't abide by the delayed ACK
30840efc6faSStephen Hemminger 	 * rules correctly.
30940efc6faSStephen Hemminger 	 * SACKs don't matter, we never delay an ACK when we
31040efc6faSStephen Hemminger 	 * have any of those going out.
31140efc6faSStephen Hemminger 	 */
31240efc6faSStephen Hemminger 	*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
31340efc6faSStephen Hemminger 	if (ts) {
31440efc6faSStephen Hemminger 		if(sack)
31540efc6faSStephen Hemminger 			*ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
31640efc6faSStephen Hemminger 						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
31740efc6faSStephen Hemminger 		else
31840efc6faSStephen Hemminger 			*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
31940efc6faSStephen Hemminger 						  (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
32040efc6faSStephen Hemminger 		*ptr++ = htonl(tstamp);		/* TSVAL */
32140efc6faSStephen Hemminger 		*ptr++ = htonl(ts_recent);	/* TSECR */
32240efc6faSStephen Hemminger 	} else if(sack)
32340efc6faSStephen Hemminger 		*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
32440efc6faSStephen Hemminger 					  (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
32540efc6faSStephen Hemminger 	if (offer_wscale)
32640efc6faSStephen Hemminger 		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
32740efc6faSStephen Hemminger }
3281da177e4SLinus Torvalds 
3291da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by
3301da177e4SLinus Torvalds  * tcp_do_sendmsg().  This is used by both the initial
3311da177e4SLinus Torvalds  * transmission and possible later retransmissions.
3321da177e4SLinus Torvalds  * All SKB's seen here are completely headerless.  It is our
3331da177e4SLinus Torvalds  * job to build the TCP header, and pass the packet down to
3341da177e4SLinus Torvalds  * IP so it can do the same plus pass the packet off to the
3351da177e4SLinus Torvalds  * device.
3361da177e4SLinus Torvalds  *
3371da177e4SLinus Torvalds  * We are working here with either a clone of the original
3381da177e4SLinus Torvalds  * SKB, or a fresh unique copy made by the retransmit engine.
3391da177e4SLinus Torvalds  */
340dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
3411da177e4SLinus Torvalds {
3426687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
343dfb4b9dcSDavid S. Miller 	struct inet_sock *inet;
344dfb4b9dcSDavid S. Miller 	struct tcp_sock *tp;
345dfb4b9dcSDavid S. Miller 	struct tcp_skb_cb *tcb;
346dfb4b9dcSDavid S. Miller 	int tcp_header_size;
3471da177e4SLinus Torvalds 	struct tcphdr *th;
3481da177e4SLinus Torvalds 	int sysctl_flags;
3491da177e4SLinus Torvalds 	int err;
3501da177e4SLinus Torvalds 
351dfb4b9dcSDavid S. Miller 	BUG_ON(!skb || !tcp_skb_pcount(skb));
352dfb4b9dcSDavid S. Miller 
353dfb4b9dcSDavid S. Miller 	/* If congestion control is doing timestamping, we must
354dfb4b9dcSDavid S. Miller 	 * take such a timestamp before we potentially clone/copy.
355dfb4b9dcSDavid S. Miller 	 */
356dfb4b9dcSDavid S. Miller 	if (icsk->icsk_ca_ops->rtt_sample)
357dfb4b9dcSDavid S. Miller 		__net_timestamp(skb);
358dfb4b9dcSDavid S. Miller 
359dfb4b9dcSDavid S. Miller 	if (likely(clone_it)) {
360dfb4b9dcSDavid S. Miller 		if (unlikely(skb_cloned(skb)))
361dfb4b9dcSDavid S. Miller 			skb = pskb_copy(skb, gfp_mask);
362dfb4b9dcSDavid S. Miller 		else
363dfb4b9dcSDavid S. Miller 			skb = skb_clone(skb, gfp_mask);
364dfb4b9dcSDavid S. Miller 		if (unlikely(!skb))
365dfb4b9dcSDavid S. Miller 			return -ENOBUFS;
366dfb4b9dcSDavid S. Miller 	}
367dfb4b9dcSDavid S. Miller 
368dfb4b9dcSDavid S. Miller 	inet = inet_sk(sk);
369dfb4b9dcSDavid S. Miller 	tp = tcp_sk(sk);
370dfb4b9dcSDavid S. Miller 	tcb = TCP_SKB_CB(skb);
371dfb4b9dcSDavid S. Miller 	tcp_header_size = tp->tcp_header_len;
3721da177e4SLinus Torvalds 
3731da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS	0x1
3741da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE	0x2
3751da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK	0x4
3761da177e4SLinus Torvalds 
3771da177e4SLinus Torvalds 	sysctl_flags = 0;
378dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
3791da177e4SLinus Torvalds 		tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
3801da177e4SLinus Torvalds 		if(sysctl_tcp_timestamps) {
3811da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
3821da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
3831da177e4SLinus Torvalds 		}
3841da177e4SLinus Torvalds 		if (sysctl_tcp_window_scaling) {
3851da177e4SLinus Torvalds 			tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
3861da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_WSCALE;
3871da177e4SLinus Torvalds 		}
3881da177e4SLinus Torvalds 		if (sysctl_tcp_sack) {
3891da177e4SLinus Torvalds 			sysctl_flags |= SYSCTL_FLAG_SACK;
3901da177e4SLinus Torvalds 			if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
3911da177e4SLinus Torvalds 				tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
3921da177e4SLinus Torvalds 		}
393dfb4b9dcSDavid S. Miller 	} else if (unlikely(tp->rx_opt.eff_sacks)) {
3941da177e4SLinus Torvalds 		/* A SACK is 2 pad bytes, a 2 byte header, plus
3951da177e4SLinus Torvalds 		 * 2 32-bit sequence numbers for each SACK block.
3961da177e4SLinus Torvalds 		 */
3971da177e4SLinus Torvalds 		tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
398dfb4b9dcSDavid S. Miller 				    (tp->rx_opt.eff_sacks *
399dfb4b9dcSDavid S. Miller 				     TCPOLEN_SACK_PERBLOCK));
4001da177e4SLinus Torvalds 	}
4011da177e4SLinus Torvalds 
402317a76f9SStephen Hemminger 	if (tcp_packets_in_flight(tp) == 0)
4036687e988SArnaldo Carvalho de Melo 		tcp_ca_event(sk, CA_EVENT_TX_START);
4041da177e4SLinus Torvalds 
4051da177e4SLinus Torvalds 	th = (struct tcphdr *) skb_push(skb, tcp_header_size);
4061da177e4SLinus Torvalds 	skb->h.th = th;
4071da177e4SLinus Torvalds 	skb_set_owner_w(skb, sk);
4081da177e4SLinus Torvalds 
4091da177e4SLinus Torvalds 	/* Build TCP header and checksum it. */
4101da177e4SLinus Torvalds 	th->source		= inet->sport;
4111da177e4SLinus Torvalds 	th->dest		= inet->dport;
4121da177e4SLinus Torvalds 	th->seq			= htonl(tcb->seq);
4131da177e4SLinus Torvalds 	th->ack_seq		= htonl(tp->rcv_nxt);
414dfb4b9dcSDavid S. Miller 	*(((__u16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
415dfb4b9dcSDavid S. Miller 					tcb->flags);
416dfb4b9dcSDavid S. Miller 
417dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
4181da177e4SLinus Torvalds 		/* RFC1323: The window in SYN & SYN/ACK segments
4191da177e4SLinus Torvalds 		 * is never scaled.
4201da177e4SLinus Torvalds 		 */
4211da177e4SLinus Torvalds 		th->window	= htons(tp->rcv_wnd);
4221da177e4SLinus Torvalds 	} else {
4231da177e4SLinus Torvalds 		th->window	= htons(tcp_select_window(sk));
4241da177e4SLinus Torvalds 	}
4251da177e4SLinus Torvalds 	th->check		= 0;
4261da177e4SLinus Torvalds 	th->urg_ptr		= 0;
4271da177e4SLinus Torvalds 
428dfb4b9dcSDavid S. Miller 	if (unlikely(tp->urg_mode &&
429dfb4b9dcSDavid S. Miller 		     between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
4301da177e4SLinus Torvalds 		th->urg_ptr		= htons(tp->snd_up-tcb->seq);
4311da177e4SLinus Torvalds 		th->urg			= 1;
4321da177e4SLinus Torvalds 	}
4331da177e4SLinus Torvalds 
434dfb4b9dcSDavid S. Miller 	if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
4351da177e4SLinus Torvalds 		tcp_syn_build_options((__u32 *)(th + 1),
4361da177e4SLinus Torvalds 				      tcp_advertise_mss(sk),
4371da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
4381da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_SACK),
4391da177e4SLinus Torvalds 				      (sysctl_flags & SYSCTL_FLAG_WSCALE),
4401da177e4SLinus Torvalds 				      tp->rx_opt.rcv_wscale,
4411da177e4SLinus Torvalds 				      tcb->when,
4421da177e4SLinus Torvalds 				      tp->rx_opt.ts_recent);
4431da177e4SLinus Torvalds 	} else {
4441da177e4SLinus Torvalds 		tcp_build_and_update_options((__u32 *)(th + 1),
4451da177e4SLinus Torvalds 					     tp, tcb->when);
4461da177e4SLinus Torvalds 		TCP_ECN_send(sk, tp, skb, tcp_header_size);
4471da177e4SLinus Torvalds 	}
448dfb4b9dcSDavid S. Miller 
4498292a17aSArnaldo Carvalho de Melo 	icsk->icsk_af_ops->send_check(sk, skb->len, skb);
4501da177e4SLinus Torvalds 
451dfb4b9dcSDavid S. Miller 	if (likely(tcb->flags & TCPCB_FLAG_ACK))
452fc6415bcSDavid S. Miller 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds 	if (skb->len != tcp_header_size)
4551da177e4SLinus Torvalds 		tcp_event_data_sent(tp, skb, sk);
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
4581da177e4SLinus Torvalds 
4598292a17aSArnaldo Carvalho de Melo 	err = icsk->icsk_af_ops->queue_xmit(skb, 0);
460dfb4b9dcSDavid S. Miller 	if (unlikely(err <= 0))
4611da177e4SLinus Torvalds 		return err;
4621da177e4SLinus Torvalds 
4636687e988SArnaldo Carvalho de Melo 	tcp_enter_cwr(sk);
4641da177e4SLinus Torvalds 
4651da177e4SLinus Torvalds 	/* NET_XMIT_CN is special. It does not guarantee,
4661da177e4SLinus Torvalds 	 * that this packet is lost. It tells that device
4671da177e4SLinus Torvalds 	 * is about to start to drop packets or already
4681da177e4SLinus Torvalds 	 * drops some packets of the same priority and
4691da177e4SLinus Torvalds 	 * invokes us to send less aggressively.
4701da177e4SLinus Torvalds 	 */
4711da177e4SLinus Torvalds 	return err == NET_XMIT_CN ? 0 : err;
472dfb4b9dcSDavid S. Miller 
4731da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS
4741da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE
4751da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK
4761da177e4SLinus Torvalds }
4771da177e4SLinus Torvalds 
4781da177e4SLinus Torvalds 
4791da177e4SLinus Torvalds /* This routine just queue's the buffer
4801da177e4SLinus Torvalds  *
4811da177e4SLinus Torvalds  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
4821da177e4SLinus Torvalds  * otherwise socket can stall.
4831da177e4SLinus Torvalds  */
4841da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
4851da177e4SLinus Torvalds {
4861da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4871da177e4SLinus Torvalds 
4881da177e4SLinus Torvalds 	/* Advance write_seq and place onto the write_queue. */
4891da177e4SLinus Torvalds 	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
4901da177e4SLinus Torvalds 	skb_header_release(skb);
4911da177e4SLinus Torvalds 	__skb_queue_tail(&sk->sk_write_queue, skb);
4921da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds 	/* Queue it, remembering where we must start sending. */
4951da177e4SLinus Torvalds 	if (sk->sk_send_head == NULL)
4961da177e4SLinus Torvalds 		sk->sk_send_head = skb;
4971da177e4SLinus Torvalds }
4981da177e4SLinus Torvalds 
499846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
500f6302d1dSDavid S. Miller {
501846998aeSDavid S. Miller 	if (skb->len <= mss_now ||
502f6302d1dSDavid S. Miller 	    !(sk->sk_route_caps & NETIF_F_TSO)) {
503f6302d1dSDavid S. Miller 		/* Avoid the costly divide in the normal
504f6302d1dSDavid S. Miller 		 * non-TSO case.
505f6302d1dSDavid S. Miller 		 */
506f6302d1dSDavid S. Miller 		skb_shinfo(skb)->tso_segs = 1;
507f6302d1dSDavid S. Miller 		skb_shinfo(skb)->tso_size = 0;
508f6302d1dSDavid S. Miller 	} else {
509f6302d1dSDavid S. Miller 		unsigned int factor;
510f6302d1dSDavid S. Miller 
511846998aeSDavid S. Miller 		factor = skb->len + (mss_now - 1);
512846998aeSDavid S. Miller 		factor /= mss_now;
513f6302d1dSDavid S. Miller 		skb_shinfo(skb)->tso_segs = factor;
514846998aeSDavid S. Miller 		skb_shinfo(skb)->tso_size = mss_now;
5151da177e4SLinus Torvalds 	}
5161da177e4SLinus Torvalds }
5171da177e4SLinus Torvalds 
5181da177e4SLinus Torvalds /* Function to create two new TCP segments.  Shrinks the given segment
5191da177e4SLinus Torvalds  * to the specified size and appends a new segment with the rest of the
5201da177e4SLinus Torvalds  * packet to the list.  This won't be called frequently, I hope.
5211da177e4SLinus Torvalds  * Remember, these are still headerless SKBs at this point.
5221da177e4SLinus Torvalds  */
5236475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
5241da177e4SLinus Torvalds {
5251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
5261da177e4SLinus Torvalds 	struct sk_buff *buff;
5276475be16SDavid S. Miller 	int nsize, old_factor;
5281da177e4SLinus Torvalds 	u16 flags;
5291da177e4SLinus Torvalds 
530b2cc99f0SHerbert Xu 	BUG_ON(len > skb->len);
5316a438bbeSStephen Hemminger 
5326a438bbeSStephen Hemminger  	clear_all_retrans_hints(tp);
5331da177e4SLinus Torvalds 	nsize = skb_headlen(skb) - len;
5341da177e4SLinus Torvalds 	if (nsize < 0)
5351da177e4SLinus Torvalds 		nsize = 0;
5361da177e4SLinus Torvalds 
5371da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
5381da177e4SLinus Torvalds 	    skb_is_nonlinear(skb) &&
5391da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
5401da177e4SLinus Torvalds 		return -ENOMEM;
5411da177e4SLinus Torvalds 
5421da177e4SLinus Torvalds 	/* Get a new skb... force flag on. */
5431da177e4SLinus Torvalds 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
5441da177e4SLinus Torvalds 	if (buff == NULL)
5451da177e4SLinus Torvalds 		return -ENOMEM; /* We'll just try again later. */
5461da177e4SLinus Torvalds 	sk_charge_skb(sk, buff);
5471da177e4SLinus Torvalds 
5481da177e4SLinus Torvalds 	/* Correct the sequence numbers. */
5491da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
5501da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
5511da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds 	/* PSH and FIN should only be set in the second packet. */
5541da177e4SLinus Torvalds 	flags = TCP_SKB_CB(skb)->flags;
5551da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
5561da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = flags;
557e14c3cafSHerbert Xu 	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
5581da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
5591da177e4SLinus Torvalds 
5601da177e4SLinus Torvalds 	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
5611da177e4SLinus Torvalds 		/* Copy and checksum data tail into the new buffer. */
5621da177e4SLinus Torvalds 		buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
5631da177e4SLinus Torvalds 						       nsize, 0);
5641da177e4SLinus Torvalds 
5651da177e4SLinus Torvalds 		skb_trim(skb, len);
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
5681da177e4SLinus Torvalds 	} else {
5691da177e4SLinus Torvalds 		skb->ip_summed = CHECKSUM_HW;
5701da177e4SLinus Torvalds 		skb_split(skb, buff, len);
5711da177e4SLinus Torvalds 	}
5721da177e4SLinus Torvalds 
5731da177e4SLinus Torvalds 	buff->ip_summed = skb->ip_summed;
5741da177e4SLinus Torvalds 
5751da177e4SLinus Torvalds 	/* Looks stupid, but our code really uses when of
5761da177e4SLinus Torvalds 	 * skbs, which it never sent before. --ANK
5771da177e4SLinus Torvalds 	 */
5781da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
579a61bbcf2SPatrick McHardy 	buff->tstamp = skb->tstamp;
5801da177e4SLinus Torvalds 
5816475be16SDavid S. Miller 	old_factor = tcp_skb_pcount(skb);
5826475be16SDavid S. Miller 
5831da177e4SLinus Torvalds 	/* Fix up tso_factor for both original and new SKB.  */
584846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
585846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
5861da177e4SLinus Torvalds 
5876475be16SDavid S. Miller 	/* If this packet has been sent out already, we must
5886475be16SDavid S. Miller 	 * adjust the various packet counters.
5896475be16SDavid S. Miller 	 */
590cf0b450cSHerbert Xu 	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
5916475be16SDavid S. Miller 		int diff = old_factor - tcp_skb_pcount(skb) -
5926475be16SDavid S. Miller 			tcp_skb_pcount(buff);
5931da177e4SLinus Torvalds 
5946475be16SDavid S. Miller 		tp->packets_out -= diff;
595e14c3cafSHerbert Xu 
596e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
597e14c3cafSHerbert Xu 			tp->sacked_out -= diff;
598e14c3cafSHerbert Xu 		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
599e14c3cafSHerbert Xu 			tp->retrans_out -= diff;
600e14c3cafSHerbert Xu 
6016475be16SDavid S. Miller 		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
6026475be16SDavid S. Miller 			tp->lost_out -= diff;
6036475be16SDavid S. Miller 			tp->left_out -= diff;
6046475be16SDavid S. Miller 		}
60583ca28beSHerbert Xu 
6066475be16SDavid S. Miller 		if (diff > 0) {
60783ca28beSHerbert Xu 			/* Adjust Reno SACK estimate. */
60883ca28beSHerbert Xu 			if (!tp->rx_opt.sack_ok) {
60983ca28beSHerbert Xu 				tp->sacked_out -= diff;
61083ca28beSHerbert Xu 				if ((int)tp->sacked_out < 0)
61183ca28beSHerbert Xu 					tp->sacked_out = 0;
61283ca28beSHerbert Xu 				tcp_sync_left_out(tp);
61383ca28beSHerbert Xu 			}
61483ca28beSHerbert Xu 
6156475be16SDavid S. Miller 			tp->fackets_out -= diff;
6166475be16SDavid S. Miller 			if ((int)tp->fackets_out < 0)
6176475be16SDavid S. Miller 				tp->fackets_out = 0;
6186475be16SDavid S. Miller 		}
6191da177e4SLinus Torvalds 	}
6201da177e4SLinus Torvalds 
6211da177e4SLinus Torvalds 	/* Link BUFF into the send queue. */
622f44b5271SDavid S. Miller 	skb_header_release(buff);
6238728b834SDavid S. Miller 	__skb_append(skb, buff, &sk->sk_write_queue);
6241da177e4SLinus Torvalds 
6251da177e4SLinus Torvalds 	return 0;
6261da177e4SLinus Torvalds }
6271da177e4SLinus Torvalds 
6281da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
6291da177e4SLinus Torvalds  * eventually). The difference is that pulled data not copied, but
6301da177e4SLinus Torvalds  * immediately discarded.
6311da177e4SLinus Torvalds  */
6321da177e4SLinus Torvalds static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
6331da177e4SLinus Torvalds {
6341da177e4SLinus Torvalds 	int i, k, eat;
6351da177e4SLinus Torvalds 
6361da177e4SLinus Torvalds 	eat = len;
6371da177e4SLinus Torvalds 	k = 0;
6381da177e4SLinus Torvalds 	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
6391da177e4SLinus Torvalds 		if (skb_shinfo(skb)->frags[i].size <= eat) {
6401da177e4SLinus Torvalds 			put_page(skb_shinfo(skb)->frags[i].page);
6411da177e4SLinus Torvalds 			eat -= skb_shinfo(skb)->frags[i].size;
6421da177e4SLinus Torvalds 		} else {
6431da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
6441da177e4SLinus Torvalds 			if (eat) {
6451da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].page_offset += eat;
6461da177e4SLinus Torvalds 				skb_shinfo(skb)->frags[k].size -= eat;
6471da177e4SLinus Torvalds 				eat = 0;
6481da177e4SLinus Torvalds 			}
6491da177e4SLinus Torvalds 			k++;
6501da177e4SLinus Torvalds 		}
6511da177e4SLinus Torvalds 	}
6521da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
6531da177e4SLinus Torvalds 
6541da177e4SLinus Torvalds 	skb->tail = skb->data;
6551da177e4SLinus Torvalds 	skb->data_len -= len;
6561da177e4SLinus Torvalds 	skb->len = skb->data_len;
6571da177e4SLinus Torvalds 	return skb->tail;
6581da177e4SLinus Torvalds }
6591da177e4SLinus Torvalds 
6601da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
6611da177e4SLinus Torvalds {
6621da177e4SLinus Torvalds 	if (skb_cloned(skb) &&
6631da177e4SLinus Torvalds 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6641da177e4SLinus Torvalds 		return -ENOMEM;
6651da177e4SLinus Torvalds 
6661da177e4SLinus Torvalds 	if (len <= skb_headlen(skb)) {
6671da177e4SLinus Torvalds 		__skb_pull(skb, len);
6681da177e4SLinus Torvalds 	} else {
6691da177e4SLinus Torvalds 		if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL)
6701da177e4SLinus Torvalds 			return -ENOMEM;
6711da177e4SLinus Torvalds 	}
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq += len;
6741da177e4SLinus Torvalds 	skb->ip_summed = CHECKSUM_HW;
6751da177e4SLinus Torvalds 
6761da177e4SLinus Torvalds 	skb->truesize	     -= len;
6771da177e4SLinus Torvalds 	sk->sk_wmem_queued   -= len;
6781da177e4SLinus Torvalds 	sk->sk_forward_alloc += len;
6791da177e4SLinus Torvalds 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	/* Any change of skb->len requires recalculation of tso
6821da177e4SLinus Torvalds 	 * factor and mss.
6831da177e4SLinus Torvalds 	 */
6841da177e4SLinus Torvalds 	if (tcp_skb_pcount(skb) > 1)
685846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
6861da177e4SLinus Torvalds 
6871da177e4SLinus Torvalds 	return 0;
6881da177e4SLinus Torvalds }
6891da177e4SLinus Torvalds 
6905d424d5aSJohn Heffner /* Not accounting for SACKs here. */
6915d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu)
6925d424d5aSJohn Heffner {
6935d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
6945d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
6955d424d5aSJohn Heffner 	int mss_now;
6965d424d5aSJohn Heffner 
6975d424d5aSJohn Heffner 	/* Calculate base mss without TCP options:
6985d424d5aSJohn Heffner 	   It is MMS_S - sizeof(tcphdr) of rfc1122
6995d424d5aSJohn Heffner 	 */
7005d424d5aSJohn Heffner 	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
7015d424d5aSJohn Heffner 
7025d424d5aSJohn Heffner 	/* Clamp it (mss_clamp does not include tcp options) */
7035d424d5aSJohn Heffner 	if (mss_now > tp->rx_opt.mss_clamp)
7045d424d5aSJohn Heffner 		mss_now = tp->rx_opt.mss_clamp;
7055d424d5aSJohn Heffner 
7065d424d5aSJohn Heffner 	/* Now subtract optional transport overhead */
7075d424d5aSJohn Heffner 	mss_now -= icsk->icsk_ext_hdr_len;
7085d424d5aSJohn Heffner 
7095d424d5aSJohn Heffner 	/* Then reserve room for full set of TCP options and 8 bytes of data */
7105d424d5aSJohn Heffner 	if (mss_now < 48)
7115d424d5aSJohn Heffner 		mss_now = 48;
7125d424d5aSJohn Heffner 
7135d424d5aSJohn Heffner 	/* Now subtract TCP options size, not including SACKs */
7145d424d5aSJohn Heffner 	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
7155d424d5aSJohn Heffner 
7165d424d5aSJohn Heffner 	return mss_now;
7175d424d5aSJohn Heffner }
7185d424d5aSJohn Heffner 
7195d424d5aSJohn Heffner /* Inverse of above */
7205d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss)
7215d424d5aSJohn Heffner {
7225d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
7235d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
7245d424d5aSJohn Heffner 	int mtu;
7255d424d5aSJohn Heffner 
7265d424d5aSJohn Heffner 	mtu = mss +
7275d424d5aSJohn Heffner 	      tp->tcp_header_len +
7285d424d5aSJohn Heffner 	      icsk->icsk_ext_hdr_len +
7295d424d5aSJohn Heffner 	      icsk->icsk_af_ops->net_header_len;
7305d424d5aSJohn Heffner 
7315d424d5aSJohn Heffner 	return mtu;
7325d424d5aSJohn Heffner }
7335d424d5aSJohn Heffner 
7345d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk)
7355d424d5aSJohn Heffner {
7365d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
7375d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
7385d424d5aSJohn Heffner 
7395d424d5aSJohn Heffner 	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
7405d424d5aSJohn Heffner 	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
7415d424d5aSJohn Heffner 	                       icsk->icsk_af_ops->net_header_len;
7425d424d5aSJohn Heffner 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
7435d424d5aSJohn Heffner 	icsk->icsk_mtup.probe_size = 0;
7445d424d5aSJohn Heffner }
7455d424d5aSJohn Heffner 
7461da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set.
7471da177e4SLinus Torvalds 
7481da177e4SLinus Torvalds    tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
7491da177e4SLinus Torvalds    for TCP options, but includes only bare TCP header.
7501da177e4SLinus Torvalds 
7511da177e4SLinus Torvalds    tp->rx_opt.mss_clamp is mss negotiated at connection setup.
752caa20d9aSStephen Hemminger    It is minimum of user_mss and mss received with SYN.
7531da177e4SLinus Torvalds    It also does not include TCP options.
7541da177e4SLinus Torvalds 
755d83d8461SArnaldo Carvalho de Melo    inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
7561da177e4SLinus Torvalds 
7571da177e4SLinus Torvalds    tp->mss_cache is current effective sending mss, including
7581da177e4SLinus Torvalds    all tcp options except for SACKs. It is evaluated,
7591da177e4SLinus Torvalds    taking into account current pmtu, but never exceeds
7601da177e4SLinus Torvalds    tp->rx_opt.mss_clamp.
7611da177e4SLinus Torvalds 
7621da177e4SLinus Torvalds    NOTE1. rfc1122 clearly states that advertised MSS
7631da177e4SLinus Torvalds    DOES NOT include either tcp or ip options.
7641da177e4SLinus Torvalds 
765d83d8461SArnaldo Carvalho de Melo    NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
766d83d8461SArnaldo Carvalho de Melo    are READ ONLY outside this function.		--ANK (980731)
7671da177e4SLinus Torvalds  */
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
7701da177e4SLinus Torvalds {
7711da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
772d83d8461SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
7735d424d5aSJohn Heffner 	int mss_now;
7741da177e4SLinus Torvalds 
7755d424d5aSJohn Heffner 	if (icsk->icsk_mtup.search_high > pmtu)
7765d424d5aSJohn Heffner 		icsk->icsk_mtup.search_high = pmtu;
7771da177e4SLinus Torvalds 
7785d424d5aSJohn Heffner 	mss_now = tcp_mtu_to_mss(sk, pmtu);
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds 	/* Bound mss with half of window */
7811da177e4SLinus Torvalds 	if (tp->max_window && mss_now > (tp->max_window>>1))
7821da177e4SLinus Torvalds 		mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
7831da177e4SLinus Torvalds 
7841da177e4SLinus Torvalds 	/* And store cached results */
785d83d8461SArnaldo Carvalho de Melo 	icsk->icsk_pmtu_cookie = pmtu;
7865d424d5aSJohn Heffner 	if (icsk->icsk_mtup.enabled)
7875d424d5aSJohn Heffner 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
788c1b4a7e6SDavid S. Miller 	tp->mss_cache = mss_now;
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds 	return mss_now;
7911da177e4SLinus Torvalds }
7921da177e4SLinus Torvalds 
7931da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options,
7941da177e4SLinus Torvalds  * and even PMTU discovery events into account.
7951da177e4SLinus Torvalds  *
7961da177e4SLinus Torvalds  * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
7971da177e4SLinus Torvalds  * cannot be large. However, taking into account rare use of URG, this
7981da177e4SLinus Torvalds  * is not a big flaw.
7991da177e4SLinus Torvalds  */
800c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
8011da177e4SLinus Torvalds {
8021da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
8031da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
804c1b4a7e6SDavid S. Miller 	u32 mss_now;
805c1b4a7e6SDavid S. Miller 	u16 xmit_size_goal;
806c1b4a7e6SDavid S. Miller 	int doing_tso = 0;
8071da177e4SLinus Torvalds 
808c1b4a7e6SDavid S. Miller 	mss_now = tp->mss_cache;
809c1b4a7e6SDavid S. Miller 
810c1b4a7e6SDavid S. Miller 	if (large_allowed &&
811c1b4a7e6SDavid S. Miller 	    (sk->sk_route_caps & NETIF_F_TSO) &&
812c1b4a7e6SDavid S. Miller 	    !tp->urg_mode)
813c1b4a7e6SDavid S. Miller 		doing_tso = 1;
814c1b4a7e6SDavid S. Miller 
8151da177e4SLinus Torvalds 	if (dst) {
8161da177e4SLinus Torvalds 		u32 mtu = dst_mtu(dst);
817d83d8461SArnaldo Carvalho de Melo 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
8181da177e4SLinus Torvalds 			mss_now = tcp_sync_mss(sk, mtu);
8191da177e4SLinus Torvalds 	}
8201da177e4SLinus Torvalds 
8211da177e4SLinus Torvalds 	if (tp->rx_opt.eff_sacks)
8221da177e4SLinus Torvalds 		mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
8231da177e4SLinus Torvalds 			    (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
824c1b4a7e6SDavid S. Miller 
825c1b4a7e6SDavid S. Miller 	xmit_size_goal = mss_now;
826c1b4a7e6SDavid S. Miller 
827c1b4a7e6SDavid S. Miller 	if (doing_tso) {
8288292a17aSArnaldo Carvalho de Melo 		xmit_size_goal = (65535 -
8298292a17aSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_af_ops->net_header_len -
830d83d8461SArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_ext_hdr_len -
831d83d8461SArnaldo Carvalho de Melo 				  tp->tcp_header_len);
832c1b4a7e6SDavid S. Miller 
833c1b4a7e6SDavid S. Miller 		if (tp->max_window &&
834c1b4a7e6SDavid S. Miller 		    (xmit_size_goal > (tp->max_window >> 1)))
835c1b4a7e6SDavid S. Miller 			xmit_size_goal = max((tp->max_window >> 1),
836c1b4a7e6SDavid S. Miller 					     68U - tp->tcp_header_len);
837c1b4a7e6SDavid S. Miller 
838c1b4a7e6SDavid S. Miller 		xmit_size_goal -= (xmit_size_goal % mss_now);
839c1b4a7e6SDavid S. Miller 	}
840c1b4a7e6SDavid S. Miller 	tp->xmit_size_goal = xmit_size_goal;
841c1b4a7e6SDavid S. Miller 
8421da177e4SLinus Torvalds 	return mss_now;
8431da177e4SLinus Torvalds }
8441da177e4SLinus Torvalds 
845a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */
846a762a980SDavid S. Miller 
84740efc6faSStephen Hemminger static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
848a762a980SDavid S. Miller {
849a762a980SDavid S. Miller 	__u32 packets_out = tp->packets_out;
850a762a980SDavid S. Miller 
851a762a980SDavid S. Miller 	if (packets_out >= tp->snd_cwnd) {
852a762a980SDavid S. Miller 		/* Network is feed fully. */
853a762a980SDavid S. Miller 		tp->snd_cwnd_used = 0;
854a762a980SDavid S. Miller 		tp->snd_cwnd_stamp = tcp_time_stamp;
855a762a980SDavid S. Miller 	} else {
856a762a980SDavid S. Miller 		/* Network starves. */
857a762a980SDavid S. Miller 		if (tp->packets_out > tp->snd_cwnd_used)
858a762a980SDavid S. Miller 			tp->snd_cwnd_used = tp->packets_out;
859a762a980SDavid S. Miller 
860463c84b9SArnaldo Carvalho de Melo 		if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
861a762a980SDavid S. Miller 			tcp_cwnd_application_limited(sk);
862a762a980SDavid S. Miller 	}
863a762a980SDavid S. Miller }
864a762a980SDavid S. Miller 
865c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
866c1b4a7e6SDavid S. Miller {
867c1b4a7e6SDavid S. Miller 	u32 window, cwnd_len;
868c1b4a7e6SDavid S. Miller 
869c1b4a7e6SDavid S. Miller 	window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
870c1b4a7e6SDavid S. Miller 	cwnd_len = mss_now * cwnd;
871c1b4a7e6SDavid S. Miller 	return min(window, cwnd_len);
872c1b4a7e6SDavid S. Miller }
873c1b4a7e6SDavid S. Miller 
874c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the
875c1b4a7e6SDavid S. Miller  * congestion window rules?  If so, return how many segments are allowed.
876c1b4a7e6SDavid S. Miller  */
877c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
878c1b4a7e6SDavid S. Miller {
879c1b4a7e6SDavid S. Miller 	u32 in_flight, cwnd;
880c1b4a7e6SDavid S. Miller 
881c1b4a7e6SDavid S. Miller 	/* Don't be strict about the congestion window for the final FIN.  */
882c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
883c1b4a7e6SDavid S. Miller 		return 1;
884c1b4a7e6SDavid S. Miller 
885c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
886c1b4a7e6SDavid S. Miller 	cwnd = tp->snd_cwnd;
887c1b4a7e6SDavid S. Miller 	if (in_flight < cwnd)
888c1b4a7e6SDavid S. Miller 		return (cwnd - in_flight);
889c1b4a7e6SDavid S. Miller 
890c1b4a7e6SDavid S. Miller 	return 0;
891c1b4a7e6SDavid S. Miller }
892c1b4a7e6SDavid S. Miller 
893c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting
894c1b4a7e6SDavid S. Miller  * SKB onto the wire.
895c1b4a7e6SDavid S. Miller  */
89640efc6faSStephen Hemminger static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
897c1b4a7e6SDavid S. Miller {
898c1b4a7e6SDavid S. Miller 	int tso_segs = tcp_skb_pcount(skb);
899c1b4a7e6SDavid S. Miller 
900846998aeSDavid S. Miller 	if (!tso_segs ||
901846998aeSDavid S. Miller 	    (tso_segs > 1 &&
902846998aeSDavid S. Miller 	     skb_shinfo(skb)->tso_size != mss_now)) {
903846998aeSDavid S. Miller 		tcp_set_skb_tso_segs(sk, skb, mss_now);
904c1b4a7e6SDavid S. Miller 		tso_segs = tcp_skb_pcount(skb);
905c1b4a7e6SDavid S. Miller 	}
906c1b4a7e6SDavid S. Miller 	return tso_segs;
907c1b4a7e6SDavid S. Miller }
908c1b4a7e6SDavid S. Miller 
909c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp)
910c1b4a7e6SDavid S. Miller {
911c1b4a7e6SDavid S. Miller 	return after(tp->snd_sml,tp->snd_una) &&
912c1b4a7e6SDavid S. Miller 		!after(tp->snd_sml, tp->snd_nxt);
913c1b4a7e6SDavid S. Miller }
914c1b4a7e6SDavid S. Miller 
915c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules:
916c1b4a7e6SDavid S. Miller  * 1. It is full sized.
917c1b4a7e6SDavid S. Miller  * 2. Or it contains FIN. (already checked by caller)
918c1b4a7e6SDavid S. Miller  * 3. Or TCP_NODELAY was set.
919c1b4a7e6SDavid S. Miller  * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
920c1b4a7e6SDavid S. Miller  *    With Minshall's modification: all sent small packets are ACKed.
921c1b4a7e6SDavid S. Miller  */
922c1b4a7e6SDavid S. Miller 
923c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp,
924c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb,
925c1b4a7e6SDavid S. Miller 				  unsigned mss_now, int nonagle)
926c1b4a7e6SDavid S. Miller {
927c1b4a7e6SDavid S. Miller 	return (skb->len < mss_now &&
928c1b4a7e6SDavid S. Miller 		((nonagle&TCP_NAGLE_CORK) ||
929c1b4a7e6SDavid S. Miller 		 (!nonagle &&
930c1b4a7e6SDavid S. Miller 		  tp->packets_out &&
931c1b4a7e6SDavid S. Miller 		  tcp_minshall_check(tp))));
932c1b4a7e6SDavid S. Miller }
933c1b4a7e6SDavid S. Miller 
934c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be
935c1b4a7e6SDavid S. Miller  * sent now.
936c1b4a7e6SDavid S. Miller  */
937c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
938c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
939c1b4a7e6SDavid S. Miller {
940c1b4a7e6SDavid S. Miller 	/* Nagle rule does not apply to frames, which sit in the middle of the
941c1b4a7e6SDavid S. Miller 	 * write_queue (they have no chances to get new data).
942c1b4a7e6SDavid S. Miller 	 *
943c1b4a7e6SDavid S. Miller 	 * This is implemented in the callers, where they modify the 'nonagle'
944c1b4a7e6SDavid S. Miller 	 * argument based upon the location of SKB in the send queue.
945c1b4a7e6SDavid S. Miller 	 */
946c1b4a7e6SDavid S. Miller 	if (nonagle & TCP_NAGLE_PUSH)
947c1b4a7e6SDavid S. Miller 		return 1;
948c1b4a7e6SDavid S. Miller 
949c1b4a7e6SDavid S. Miller 	/* Don't use the nagle rule for urgent data (or for the final FIN).  */
950c1b4a7e6SDavid S. Miller 	if (tp->urg_mode ||
951c1b4a7e6SDavid S. Miller 	    (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
952c1b4a7e6SDavid S. Miller 		return 1;
953c1b4a7e6SDavid S. Miller 
954c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
955c1b4a7e6SDavid S. Miller 		return 1;
956c1b4a7e6SDavid S. Miller 
957c1b4a7e6SDavid S. Miller 	return 0;
958c1b4a7e6SDavid S. Miller }
959c1b4a7e6SDavid S. Miller 
960c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */
961c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
962c1b4a7e6SDavid S. Miller {
963c1b4a7e6SDavid S. Miller 	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
964c1b4a7e6SDavid S. Miller 
965c1b4a7e6SDavid S. Miller 	if (skb->len > cur_mss)
966c1b4a7e6SDavid S. Miller 		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
967c1b4a7e6SDavid S. Miller 
968c1b4a7e6SDavid S. Miller 	return !after(end_seq, tp->snd_una + tp->snd_wnd);
969c1b4a7e6SDavid S. Miller }
970c1b4a7e6SDavid S. Miller 
971c1b4a7e6SDavid S. Miller /* This checks if the data bearing packet SKB (usually sk->sk_send_head)
972c1b4a7e6SDavid S. Miller  * should be put on the wire right now.  If so, it returns the number of
973c1b4a7e6SDavid S. Miller  * packets allowed by the congestion window.
974c1b4a7e6SDavid S. Miller  */
975c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
976c1b4a7e6SDavid S. Miller 				 unsigned int cur_mss, int nonagle)
977c1b4a7e6SDavid S. Miller {
978c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
979c1b4a7e6SDavid S. Miller 	unsigned int cwnd_quota;
980c1b4a7e6SDavid S. Miller 
981846998aeSDavid S. Miller 	tcp_init_tso_segs(sk, skb, cur_mss);
982c1b4a7e6SDavid S. Miller 
983c1b4a7e6SDavid S. Miller 	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
984c1b4a7e6SDavid S. Miller 		return 0;
985c1b4a7e6SDavid S. Miller 
986c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_cwnd_test(tp, skb);
987c1b4a7e6SDavid S. Miller 	if (cwnd_quota &&
988c1b4a7e6SDavid S. Miller 	    !tcp_snd_wnd_test(tp, skb, cur_mss))
989c1b4a7e6SDavid S. Miller 		cwnd_quota = 0;
990c1b4a7e6SDavid S. Miller 
991c1b4a7e6SDavid S. Miller 	return cwnd_quota;
992c1b4a7e6SDavid S. Miller }
993c1b4a7e6SDavid S. Miller 
994c1b4a7e6SDavid S. Miller static inline int tcp_skb_is_last(const struct sock *sk,
995c1b4a7e6SDavid S. Miller 				  const struct sk_buff *skb)
996c1b4a7e6SDavid S. Miller {
997c1b4a7e6SDavid S. Miller 	return skb->next == (struct sk_buff *)&sk->sk_write_queue;
998c1b4a7e6SDavid S. Miller }
999c1b4a7e6SDavid S. Miller 
1000c1b4a7e6SDavid S. Miller int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp)
1001c1b4a7e6SDavid S. Miller {
1002c1b4a7e6SDavid S. Miller 	struct sk_buff *skb = sk->sk_send_head;
1003c1b4a7e6SDavid S. Miller 
1004c1b4a7e6SDavid S. Miller 	return (skb &&
1005c1b4a7e6SDavid S. Miller 		tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
1006c1b4a7e6SDavid S. Miller 			     (tcp_skb_is_last(sk, skb) ?
1007c1b4a7e6SDavid S. Miller 			      TCP_NAGLE_PUSH :
1008c1b4a7e6SDavid S. Miller 			      tp->nonagle)));
1009c1b4a7e6SDavid S. Miller }
1010c1b4a7e6SDavid S. Miller 
1011c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1012c1b4a7e6SDavid S. Miller  * which is put after SKB on the list.  It is very much like
1013c1b4a7e6SDavid S. Miller  * tcp_fragment() except that it may make several kinds of assumptions
1014c1b4a7e6SDavid S. Miller  * in order to speed up the splitting operation.  In particular, we
1015c1b4a7e6SDavid S. Miller  * know that all the data is in scatter-gather pages, and that the
1016c1b4a7e6SDavid S. Miller  * packet has never been sent out before (and thus is not cloned).
1017c1b4a7e6SDavid S. Miller  */
1018846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
1019c1b4a7e6SDavid S. Miller {
1020c1b4a7e6SDavid S. Miller 	struct sk_buff *buff;
1021c1b4a7e6SDavid S. Miller 	int nlen = skb->len - len;
1022c1b4a7e6SDavid S. Miller 	u16 flags;
1023c1b4a7e6SDavid S. Miller 
1024c1b4a7e6SDavid S. Miller 	/* All of a TSO frame must be composed of paged data.  */
1025c8ac3774SHerbert Xu 	if (skb->len != skb->data_len)
1026c8ac3774SHerbert Xu 		return tcp_fragment(sk, skb, len, mss_now);
1027c1b4a7e6SDavid S. Miller 
1028c1b4a7e6SDavid S. Miller 	buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC);
1029c1b4a7e6SDavid S. Miller 	if (unlikely(buff == NULL))
1030c1b4a7e6SDavid S. Miller 		return -ENOMEM;
1031c1b4a7e6SDavid S. Miller 
1032c1b4a7e6SDavid S. Miller 	buff->truesize = nlen;
1033c1b4a7e6SDavid S. Miller 	skb->truesize -= nlen;
1034c1b4a7e6SDavid S. Miller 
1035c1b4a7e6SDavid S. Miller 	/* Correct the sequence numbers. */
1036c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1037c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1038c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1039c1b4a7e6SDavid S. Miller 
1040c1b4a7e6SDavid S. Miller 	/* PSH and FIN should only be set in the second packet. */
1041c1b4a7e6SDavid S. Miller 	flags = TCP_SKB_CB(skb)->flags;
1042c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
1043c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->flags = flags;
1044c1b4a7e6SDavid S. Miller 
1045c1b4a7e6SDavid S. Miller 	/* This packet was never sent out yet, so no SACK bits. */
1046c1b4a7e6SDavid S. Miller 	TCP_SKB_CB(buff)->sacked = 0;
1047c1b4a7e6SDavid S. Miller 
1048c1b4a7e6SDavid S. Miller 	buff->ip_summed = skb->ip_summed = CHECKSUM_HW;
1049c1b4a7e6SDavid S. Miller 	skb_split(skb, buff, len);
1050c1b4a7e6SDavid S. Miller 
1051c1b4a7e6SDavid S. Miller 	/* Fix up tso_factor for both original and new SKB.  */
1052846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, skb, mss_now);
1053846998aeSDavid S. Miller 	tcp_set_skb_tso_segs(sk, buff, mss_now);
1054c1b4a7e6SDavid S. Miller 
1055c1b4a7e6SDavid S. Miller 	/* Link BUFF into the send queue. */
1056c1b4a7e6SDavid S. Miller 	skb_header_release(buff);
10578728b834SDavid S. Miller 	__skb_append(skb, buff, &sk->sk_write_queue);
1058c1b4a7e6SDavid S. Miller 
1059c1b4a7e6SDavid S. Miller 	return 0;
1060c1b4a7e6SDavid S. Miller }
1061c1b4a7e6SDavid S. Miller 
1062c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount
1063c1b4a7e6SDavid S. Miller  * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1064c1b4a7e6SDavid S. Miller  *
1065c1b4a7e6SDavid S. Miller  * This algorithm is from John Heffner.
1066c1b4a7e6SDavid S. Miller  */
1067c1b4a7e6SDavid S. Miller static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb)
1068c1b4a7e6SDavid S. Miller {
10696687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
1070c1b4a7e6SDavid S. Miller 	u32 send_win, cong_win, limit, in_flight;
1071c1b4a7e6SDavid S. Miller 
1072c1b4a7e6SDavid S. Miller 	if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
1073c1b4a7e6SDavid S. Miller 		return 0;
1074c1b4a7e6SDavid S. Miller 
10756687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Open)
1076908a75c1SDavid S. Miller 		return 0;
1077908a75c1SDavid S. Miller 
1078c1b4a7e6SDavid S. Miller 	in_flight = tcp_packets_in_flight(tp);
1079c1b4a7e6SDavid S. Miller 
1080c1b4a7e6SDavid S. Miller 	BUG_ON(tcp_skb_pcount(skb) <= 1 ||
1081c1b4a7e6SDavid S. Miller 	       (tp->snd_cwnd <= in_flight));
1082c1b4a7e6SDavid S. Miller 
1083c1b4a7e6SDavid S. Miller 	send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
1084c1b4a7e6SDavid S. Miller 
1085c1b4a7e6SDavid S. Miller 	/* From in_flight test above, we know that cwnd > in_flight.  */
1086c1b4a7e6SDavid S. Miller 	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1087c1b4a7e6SDavid S. Miller 
1088c1b4a7e6SDavid S. Miller 	limit = min(send_win, cong_win);
1089c1b4a7e6SDavid S. Miller 
1090ba244fe9SDavid S. Miller 	/* If a full-sized TSO skb can be sent, do it. */
1091ba244fe9SDavid S. Miller 	if (limit >= 65536)
1092ba244fe9SDavid S. Miller 		return 0;
1093ba244fe9SDavid S. Miller 
1094c1b4a7e6SDavid S. Miller 	if (sysctl_tcp_tso_win_divisor) {
1095c1b4a7e6SDavid S. Miller 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1096c1b4a7e6SDavid S. Miller 
1097c1b4a7e6SDavid S. Miller 		/* If at least some fraction of a window is available,
1098c1b4a7e6SDavid S. Miller 		 * just use it.
1099c1b4a7e6SDavid S. Miller 		 */
1100c1b4a7e6SDavid S. Miller 		chunk /= sysctl_tcp_tso_win_divisor;
1101c1b4a7e6SDavid S. Miller 		if (limit >= chunk)
1102c1b4a7e6SDavid S. Miller 			return 0;
1103c1b4a7e6SDavid S. Miller 	} else {
1104c1b4a7e6SDavid S. Miller 		/* Different approach, try not to defer past a single
1105c1b4a7e6SDavid S. Miller 		 * ACK.  Receiver should ACK every other full sized
1106c1b4a7e6SDavid S. Miller 		 * frame, so if we have space for more than 3 frames
1107c1b4a7e6SDavid S. Miller 		 * then send now.
1108c1b4a7e6SDavid S. Miller 		 */
1109c1b4a7e6SDavid S. Miller 		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1110c1b4a7e6SDavid S. Miller 			return 0;
1111c1b4a7e6SDavid S. Miller 	}
1112c1b4a7e6SDavid S. Miller 
1113c1b4a7e6SDavid S. Miller 	/* Ok, it looks like it is advisable to defer.  */
1114c1b4a7e6SDavid S. Miller 	return 1;
1115c1b4a7e6SDavid S. Miller }
1116c1b4a7e6SDavid S. Miller 
11175d424d5aSJohn Heffner /* Create a new MTU probe if we are ready.
11185d424d5aSJohn Heffner  * Returns 0 if we should wait to probe (no cwnd available),
11195d424d5aSJohn Heffner  *         1 if a probe was sent,
11205d424d5aSJohn Heffner  *         -1 otherwise */
11215d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk)
11225d424d5aSJohn Heffner {
11235d424d5aSJohn Heffner 	struct tcp_sock *tp = tcp_sk(sk);
11245d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
11255d424d5aSJohn Heffner 	struct sk_buff *skb, *nskb, *next;
11265d424d5aSJohn Heffner 	int len;
11275d424d5aSJohn Heffner 	int probe_size;
11285d424d5aSJohn Heffner 	unsigned int pif;
11295d424d5aSJohn Heffner 	int copy;
11305d424d5aSJohn Heffner 	int mss_now;
11315d424d5aSJohn Heffner 
11325d424d5aSJohn Heffner 	/* Not currently probing/verifying,
11335d424d5aSJohn Heffner 	 * not in recovery,
11345d424d5aSJohn Heffner 	 * have enough cwnd, and
11355d424d5aSJohn Heffner 	 * not SACKing (the variable headers throw things off) */
11365d424d5aSJohn Heffner 	if (!icsk->icsk_mtup.enabled ||
11375d424d5aSJohn Heffner 	    icsk->icsk_mtup.probe_size ||
11385d424d5aSJohn Heffner 	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
11395d424d5aSJohn Heffner 	    tp->snd_cwnd < 11 ||
11405d424d5aSJohn Heffner 	    tp->rx_opt.eff_sacks)
11415d424d5aSJohn Heffner 		return -1;
11425d424d5aSJohn Heffner 
11435d424d5aSJohn Heffner 	/* Very simple search strategy: just double the MSS. */
11445d424d5aSJohn Heffner 	mss_now = tcp_current_mss(sk, 0);
11455d424d5aSJohn Heffner 	probe_size = 2*tp->mss_cache;
11465d424d5aSJohn Heffner 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
11475d424d5aSJohn Heffner 		/* TODO: set timer for probe_converge_event */
11485d424d5aSJohn Heffner 		return -1;
11495d424d5aSJohn Heffner 	}
11505d424d5aSJohn Heffner 
11515d424d5aSJohn Heffner 	/* Have enough data in the send queue to probe? */
11525d424d5aSJohn Heffner 	len = 0;
11535d424d5aSJohn Heffner 	if ((skb = sk->sk_send_head) == NULL)
11545d424d5aSJohn Heffner 		return -1;
11555d424d5aSJohn Heffner 	while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb))
11565d424d5aSJohn Heffner 		skb = skb->next;
11575d424d5aSJohn Heffner 	if (len < probe_size)
11585d424d5aSJohn Heffner 		return -1;
11595d424d5aSJohn Heffner 
11605d424d5aSJohn Heffner 	/* Receive window check. */
11615d424d5aSJohn Heffner 	if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) {
11625d424d5aSJohn Heffner 		if (tp->snd_wnd < probe_size)
11635d424d5aSJohn Heffner 			return -1;
11645d424d5aSJohn Heffner 		else
11655d424d5aSJohn Heffner 			return 0;
11665d424d5aSJohn Heffner 	}
11675d424d5aSJohn Heffner 
11685d424d5aSJohn Heffner 	/* Do we need to wait to drain cwnd? */
11695d424d5aSJohn Heffner 	pif = tcp_packets_in_flight(tp);
11705d424d5aSJohn Heffner 	if (pif + 2 > tp->snd_cwnd) {
11715d424d5aSJohn Heffner 		/* With no packets in flight, don't stall. */
11725d424d5aSJohn Heffner 		if (pif == 0)
11735d424d5aSJohn Heffner 			return -1;
11745d424d5aSJohn Heffner 		else
11755d424d5aSJohn Heffner 			return 0;
11765d424d5aSJohn Heffner 	}
11775d424d5aSJohn Heffner 
11785d424d5aSJohn Heffner 	/* We're allowed to probe.  Build it now. */
11795d424d5aSJohn Heffner 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
11805d424d5aSJohn Heffner 		return -1;
11815d424d5aSJohn Heffner 	sk_charge_skb(sk, nskb);
11825d424d5aSJohn Heffner 
11835d424d5aSJohn Heffner 	skb = sk->sk_send_head;
11845d424d5aSJohn Heffner 	__skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue);
11855d424d5aSJohn Heffner 	sk->sk_send_head = nskb;
11865d424d5aSJohn Heffner 
11875d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
11885d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
11895d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
11905d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->sacked = 0;
11915d424d5aSJohn Heffner 	nskb->csum = 0;
11925d424d5aSJohn Heffner 	if (skb->ip_summed == CHECKSUM_HW)
11935d424d5aSJohn Heffner 		nskb->ip_summed = CHECKSUM_HW;
11945d424d5aSJohn Heffner 
11955d424d5aSJohn Heffner 	len = 0;
11965d424d5aSJohn Heffner 	while (len < probe_size) {
11975d424d5aSJohn Heffner 		next = skb->next;
11985d424d5aSJohn Heffner 
11995d424d5aSJohn Heffner 		copy = min_t(int, skb->len, probe_size - len);
12005d424d5aSJohn Heffner 		if (nskb->ip_summed)
12015d424d5aSJohn Heffner 			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
12025d424d5aSJohn Heffner 		else
12035d424d5aSJohn Heffner 			nskb->csum = skb_copy_and_csum_bits(skb, 0,
12045d424d5aSJohn Heffner 			                 skb_put(nskb, copy), copy, nskb->csum);
12055d424d5aSJohn Heffner 
12065d424d5aSJohn Heffner 		if (skb->len <= copy) {
12075d424d5aSJohn Heffner 			/* We've eaten all the data from this skb.
12085d424d5aSJohn Heffner 			 * Throw it away. */
12095d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
12105d424d5aSJohn Heffner 			__skb_unlink(skb, &sk->sk_write_queue);
12115d424d5aSJohn Heffner 			sk_stream_free_skb(sk, skb);
12125d424d5aSJohn Heffner 		} else {
12135d424d5aSJohn Heffner 			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
12145d424d5aSJohn Heffner 			                           ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
12155d424d5aSJohn Heffner 			if (!skb_shinfo(skb)->nr_frags) {
12165d424d5aSJohn Heffner 				skb_pull(skb, copy);
12175d424d5aSJohn Heffner 				if (skb->ip_summed != CHECKSUM_HW)
12185d424d5aSJohn Heffner 					skb->csum = csum_partial(skb->data, skb->len, 0);
12195d424d5aSJohn Heffner 			} else {
12205d424d5aSJohn Heffner 				__pskb_trim_head(skb, copy);
12215d424d5aSJohn Heffner 				tcp_set_skb_tso_segs(sk, skb, mss_now);
12225d424d5aSJohn Heffner 			}
12235d424d5aSJohn Heffner 			TCP_SKB_CB(skb)->seq += copy;
12245d424d5aSJohn Heffner 		}
12255d424d5aSJohn Heffner 
12265d424d5aSJohn Heffner 		len += copy;
12275d424d5aSJohn Heffner 		skb = next;
12285d424d5aSJohn Heffner 	}
12295d424d5aSJohn Heffner 	tcp_init_tso_segs(sk, nskb, nskb->len);
12305d424d5aSJohn Heffner 
12315d424d5aSJohn Heffner 	/* We're ready to send.  If this fails, the probe will
12325d424d5aSJohn Heffner 	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
12335d424d5aSJohn Heffner 	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
12345d424d5aSJohn Heffner 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
12355d424d5aSJohn Heffner 		/* Decrement cwnd here because we are sending
12365d424d5aSJohn Heffner 		* effectively two packets. */
12375d424d5aSJohn Heffner 		tp->snd_cwnd--;
12385d424d5aSJohn Heffner 		update_send_head(sk, tp, nskb);
12395d424d5aSJohn Heffner 
12405d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1241*0e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1242*0e7b1368SJohn Heffner 		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
12435d424d5aSJohn Heffner 
12445d424d5aSJohn Heffner 		return 1;
12455d424d5aSJohn Heffner 	}
12465d424d5aSJohn Heffner 
12475d424d5aSJohn Heffner 	return -1;
12485d424d5aSJohn Heffner }
12495d424d5aSJohn Heffner 
12505d424d5aSJohn Heffner 
12511da177e4SLinus Torvalds /* This routine writes packets to the network.  It advances the
12521da177e4SLinus Torvalds  * send_head.  This happens as incoming acks open up the remote
12531da177e4SLinus Torvalds  * window for us.
12541da177e4SLinus Torvalds  *
12551da177e4SLinus Torvalds  * Returns 1, if no segments are in flight and we have queued segments, but
12561da177e4SLinus Torvalds  * cannot send anything now because of SWS or another problem.
12571da177e4SLinus Torvalds  */
1258a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
12591da177e4SLinus Torvalds {
12601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
126192df7b51SDavid S. Miller 	struct sk_buff *skb;
1262c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, sent_pkts;
1263c1b4a7e6SDavid S. Miller 	int cwnd_quota;
12645d424d5aSJohn Heffner 	int result;
12651da177e4SLinus Torvalds 
12661da177e4SLinus Torvalds 	/* If we are closed, the bytes will have to remain here.
12671da177e4SLinus Torvalds 	 * In time closedown will finish, we empty the write queue and all
12681da177e4SLinus Torvalds 	 * will be happy.
12691da177e4SLinus Torvalds 	 */
127092df7b51SDavid S. Miller 	if (unlikely(sk->sk_state == TCP_CLOSE))
127192df7b51SDavid S. Miller 		return 0;
127292df7b51SDavid S. Miller 
1273c1b4a7e6SDavid S. Miller 	sent_pkts = 0;
12745d424d5aSJohn Heffner 
12755d424d5aSJohn Heffner 	/* Do MTU probing. */
12765d424d5aSJohn Heffner 	if ((result = tcp_mtu_probe(sk)) == 0) {
12775d424d5aSJohn Heffner 		return 0;
12785d424d5aSJohn Heffner 	} else if (result > 0) {
12795d424d5aSJohn Heffner 		sent_pkts = 1;
12805d424d5aSJohn Heffner 	}
12815d424d5aSJohn Heffner 
1282b68e9f85SHerbert Xu 	while ((skb = sk->sk_send_head)) {
1283c8ac3774SHerbert Xu 		unsigned int limit;
1284c8ac3774SHerbert Xu 
1285b68e9f85SHerbert Xu 		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1286c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1287c1b4a7e6SDavid S. Miller 
1288b68e9f85SHerbert Xu 		cwnd_quota = tcp_cwnd_test(tp, skb);
1289b68e9f85SHerbert Xu 		if (!cwnd_quota)
1290b68e9f85SHerbert Xu 			break;
1291b68e9f85SHerbert Xu 
1292b68e9f85SHerbert Xu 		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1293b68e9f85SHerbert Xu 			break;
1294b68e9f85SHerbert Xu 
1295c1b4a7e6SDavid S. Miller 		if (tso_segs == 1) {
1296aa93466bSDavid S. Miller 			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1297aa93466bSDavid S. Miller 						     (tcp_skb_is_last(sk, skb) ?
1298aa93466bSDavid S. Miller 						      nonagle : TCP_NAGLE_PUSH))))
1299aa93466bSDavid S. Miller 				break;
1300c1b4a7e6SDavid S. Miller 		} else {
1301c1b4a7e6SDavid S. Miller 			if (tcp_tso_should_defer(sk, tp, skb))
1302aa93466bSDavid S. Miller 				break;
1303c1b4a7e6SDavid S. Miller 		}
1304aa93466bSDavid S. Miller 
1305c8ac3774SHerbert Xu 		limit = mss_now;
1306c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1307c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1308c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1309c1b4a7e6SDavid S. Miller 
1310c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1311c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1312c1b4a7e6SDavid S. Miller 
1313c1b4a7e6SDavid S. Miller 				if (trim)
1314c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1315c1b4a7e6SDavid S. Miller 			}
1316c1b4a7e6SDavid S. Miller 		}
1317c8ac3774SHerbert Xu 
1318c8ac3774SHerbert Xu 		if (skb->len > limit &&
1319c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
13201da177e4SLinus Torvalds 			break;
13211da177e4SLinus Torvalds 
13221da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1323c1b4a7e6SDavid S. Miller 
1324dfb4b9dcSDavid S. Miller 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
13251da177e4SLinus Torvalds 			break;
13261da177e4SLinus Torvalds 
13271da177e4SLinus Torvalds 		/* Advance the send_head.  This one is sent out.
13281da177e4SLinus Torvalds 		 * This call will increment packets_out.
13291da177e4SLinus Torvalds 		 */
13301da177e4SLinus Torvalds 		update_send_head(sk, tp, skb);
13311da177e4SLinus Torvalds 
13321da177e4SLinus Torvalds 		tcp_minshall_update(tp, mss_now, skb);
1333aa93466bSDavid S. Miller 		sent_pkts++;
13341da177e4SLinus Torvalds 	}
13351da177e4SLinus Torvalds 
1336aa93466bSDavid S. Miller 	if (likely(sent_pkts)) {
13371da177e4SLinus Torvalds 		tcp_cwnd_validate(sk, tp);
13381da177e4SLinus Torvalds 		return 0;
13391da177e4SLinus Torvalds 	}
13401da177e4SLinus Torvalds 	return !tp->packets_out && sk->sk_send_head;
13411da177e4SLinus Torvalds }
13421da177e4SLinus Torvalds 
1343a762a980SDavid S. Miller /* Push out any pending frames which were held back due to
1344a762a980SDavid S. Miller  * TCP_CORK or attempt at coalescing tiny packets.
1345a762a980SDavid S. Miller  * The socket must be locked by the caller.
1346a762a980SDavid S. Miller  */
1347a762a980SDavid S. Miller void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp,
1348a2e2a59cSDavid S. Miller 			       unsigned int cur_mss, int nonagle)
1349a762a980SDavid S. Miller {
1350a762a980SDavid S. Miller 	struct sk_buff *skb = sk->sk_send_head;
1351a762a980SDavid S. Miller 
1352a762a980SDavid S. Miller 	if (skb) {
135355c97f3eSDavid S. Miller 		if (tcp_write_xmit(sk, cur_mss, nonagle))
1354a762a980SDavid S. Miller 			tcp_check_probe_timer(sk, tp);
1355a762a980SDavid S. Miller 	}
1356a762a980SDavid S. Miller }
1357a762a980SDavid S. Miller 
1358c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires
1359c1b4a7e6SDavid S. Miller  * true push pending frames to setup probe timer etc.
1360c1b4a7e6SDavid S. Miller  */
1361c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now)
1362c1b4a7e6SDavid S. Miller {
1363c1b4a7e6SDavid S. Miller 	struct tcp_sock *tp = tcp_sk(sk);
1364c1b4a7e6SDavid S. Miller 	struct sk_buff *skb = sk->sk_send_head;
1365c1b4a7e6SDavid S. Miller 	unsigned int tso_segs, cwnd_quota;
1366c1b4a7e6SDavid S. Miller 
1367c1b4a7e6SDavid S. Miller 	BUG_ON(!skb || skb->len < mss_now);
1368c1b4a7e6SDavid S. Miller 
1369846998aeSDavid S. Miller 	tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1370c1b4a7e6SDavid S. Miller 	cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
1371c1b4a7e6SDavid S. Miller 
1372c1b4a7e6SDavid S. Miller 	if (likely(cwnd_quota)) {
1373c8ac3774SHerbert Xu 		unsigned int limit;
1374c8ac3774SHerbert Xu 
1375c1b4a7e6SDavid S. Miller 		BUG_ON(!tso_segs);
1376c1b4a7e6SDavid S. Miller 
1377c8ac3774SHerbert Xu 		limit = mss_now;
1378c1b4a7e6SDavid S. Miller 		if (tso_segs > 1) {
1379c8ac3774SHerbert Xu 			limit = tcp_window_allows(tp, skb,
1380c1b4a7e6SDavid S. Miller 						  mss_now, cwnd_quota);
1381c1b4a7e6SDavid S. Miller 
1382c1b4a7e6SDavid S. Miller 			if (skb->len < limit) {
1383c1b4a7e6SDavid S. Miller 				unsigned int trim = skb->len % mss_now;
1384c1b4a7e6SDavid S. Miller 
1385c1b4a7e6SDavid S. Miller 				if (trim)
1386c1b4a7e6SDavid S. Miller 					limit = skb->len - trim;
1387c1b4a7e6SDavid S. Miller 			}
1388c1b4a7e6SDavid S. Miller 		}
1389c8ac3774SHerbert Xu 
1390c8ac3774SHerbert Xu 		if (skb->len > limit &&
1391c8ac3774SHerbert Xu 		    unlikely(tso_fragment(sk, skb, limit, mss_now)))
1392c1b4a7e6SDavid S. Miller 			return;
1393c1b4a7e6SDavid S. Miller 
1394c1b4a7e6SDavid S. Miller 		/* Send it out now. */
1395c1b4a7e6SDavid S. Miller 		TCP_SKB_CB(skb)->when = tcp_time_stamp;
1396c1b4a7e6SDavid S. Miller 
1397dfb4b9dcSDavid S. Miller 		if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
1398c1b4a7e6SDavid S. Miller 			update_send_head(sk, tp, skb);
1399c1b4a7e6SDavid S. Miller 			tcp_cwnd_validate(sk, tp);
1400c1b4a7e6SDavid S. Miller 			return;
1401c1b4a7e6SDavid S. Miller 		}
1402c1b4a7e6SDavid S. Miller 	}
1403c1b4a7e6SDavid S. Miller }
1404c1b4a7e6SDavid S. Miller 
14051da177e4SLinus Torvalds /* This function returns the amount that we can raise the
14061da177e4SLinus Torvalds  * usable window based on the following constraints
14071da177e4SLinus Torvalds  *
14081da177e4SLinus Torvalds  * 1. The window can never be shrunk once it is offered (RFC 793)
14091da177e4SLinus Torvalds  * 2. We limit memory per socket
14101da177e4SLinus Torvalds  *
14111da177e4SLinus Torvalds  * RFC 1122:
14121da177e4SLinus Torvalds  * "the suggested [SWS] avoidance algorithm for the receiver is to keep
14131da177e4SLinus Torvalds  *  RECV.NEXT + RCV.WIN fixed until:
14141da177e4SLinus Torvalds  *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
14151da177e4SLinus Torvalds  *
14161da177e4SLinus Torvalds  * i.e. don't raise the right edge of the window until you can raise
14171da177e4SLinus Torvalds  * it at least MSS bytes.
14181da177e4SLinus Torvalds  *
14191da177e4SLinus Torvalds  * Unfortunately, the recommended algorithm breaks header prediction,
14201da177e4SLinus Torvalds  * since header prediction assumes th->window stays fixed.
14211da177e4SLinus Torvalds  *
14221da177e4SLinus Torvalds  * Strictly speaking, keeping th->window fixed violates the receiver
14231da177e4SLinus Torvalds  * side SWS prevention criteria. The problem is that under this rule
14241da177e4SLinus Torvalds  * a stream of single byte packets will cause the right side of the
14251da177e4SLinus Torvalds  * window to always advance by a single byte.
14261da177e4SLinus Torvalds  *
14271da177e4SLinus Torvalds  * Of course, if the sender implements sender side SWS prevention
14281da177e4SLinus Torvalds  * then this will not be a problem.
14291da177e4SLinus Torvalds  *
14301da177e4SLinus Torvalds  * BSD seems to make the following compromise:
14311da177e4SLinus Torvalds  *
14321da177e4SLinus Torvalds  *	If the free space is less than the 1/4 of the maximum
14331da177e4SLinus Torvalds  *	space available and the free space is less than 1/2 mss,
14341da177e4SLinus Torvalds  *	then set the window to 0.
14351da177e4SLinus Torvalds  *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
14361da177e4SLinus Torvalds  *	Otherwise, just prevent the window from shrinking
14371da177e4SLinus Torvalds  *	and from being larger than the largest representable value.
14381da177e4SLinus Torvalds  *
14391da177e4SLinus Torvalds  * This prevents incremental opening of the window in the regime
14401da177e4SLinus Torvalds  * where TCP is limited by the speed of the reader side taking
14411da177e4SLinus Torvalds  * data out of the TCP receive queue. It does nothing about
14421da177e4SLinus Torvalds  * those cases where the window is constrained on the sender side
14431da177e4SLinus Torvalds  * because the pipeline is full.
14441da177e4SLinus Torvalds  *
14451da177e4SLinus Torvalds  * BSD also seems to "accidentally" limit itself to windows that are a
14461da177e4SLinus Torvalds  * multiple of MSS, at least until the free space gets quite small.
14471da177e4SLinus Torvalds  * This would appear to be a side effect of the mbuf implementation.
14481da177e4SLinus Torvalds  * Combining these two algorithms results in the observed behavior
14491da177e4SLinus Torvalds  * of having a fixed window size at almost all times.
14501da177e4SLinus Torvalds  *
14511da177e4SLinus Torvalds  * Below we obtain similar behavior by forcing the offered window to
14521da177e4SLinus Torvalds  * a multiple of the mss when it is feasible to do so.
14531da177e4SLinus Torvalds  *
14541da177e4SLinus Torvalds  * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
14551da177e4SLinus Torvalds  * Regular options like TIMESTAMP are taken into account.
14561da177e4SLinus Torvalds  */
14571da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk)
14581da177e4SLinus Torvalds {
1459463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
14601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1461caa20d9aSStephen Hemminger 	/* MSS for the peer's data.  Previous versions used mss_clamp
14621da177e4SLinus Torvalds 	 * here.  I don't know if the value based on our guesses
14631da177e4SLinus Torvalds 	 * of peer's MSS is better for the performance.  It's more correct
14641da177e4SLinus Torvalds 	 * but may be worse for the performance because of rcv_mss
14651da177e4SLinus Torvalds 	 * fluctuations.  --SAW  1998/11/1
14661da177e4SLinus Torvalds 	 */
1467463c84b9SArnaldo Carvalho de Melo 	int mss = icsk->icsk_ack.rcv_mss;
14681da177e4SLinus Torvalds 	int free_space = tcp_space(sk);
14691da177e4SLinus Torvalds 	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
14701da177e4SLinus Torvalds 	int window;
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds 	if (mss > full_space)
14731da177e4SLinus Torvalds 		mss = full_space;
14741da177e4SLinus Torvalds 
14751da177e4SLinus Torvalds 	if (free_space < full_space/2) {
1476463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_ack.quick = 0;
14771da177e4SLinus Torvalds 
14781da177e4SLinus Torvalds 		if (tcp_memory_pressure)
14791da177e4SLinus Torvalds 			tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 		if (free_space < mss)
14821da177e4SLinus Torvalds 			return 0;
14831da177e4SLinus Torvalds 	}
14841da177e4SLinus Torvalds 
14851da177e4SLinus Torvalds 	if (free_space > tp->rcv_ssthresh)
14861da177e4SLinus Torvalds 		free_space = tp->rcv_ssthresh;
14871da177e4SLinus Torvalds 
14881da177e4SLinus Torvalds 	/* Don't do rounding if we are using window scaling, since the
14891da177e4SLinus Torvalds 	 * scaled window will not line up with the MSS boundary anyway.
14901da177e4SLinus Torvalds 	 */
14911da177e4SLinus Torvalds 	window = tp->rcv_wnd;
14921da177e4SLinus Torvalds 	if (tp->rx_opt.rcv_wscale) {
14931da177e4SLinus Torvalds 		window = free_space;
14941da177e4SLinus Torvalds 
14951da177e4SLinus Torvalds 		/* Advertise enough space so that it won't get scaled away.
14961da177e4SLinus Torvalds 		 * Import case: prevent zero window announcement if
14971da177e4SLinus Torvalds 		 * 1<<rcv_wscale > mss.
14981da177e4SLinus Torvalds 		 */
14991da177e4SLinus Torvalds 		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
15001da177e4SLinus Torvalds 			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
15011da177e4SLinus Torvalds 				  << tp->rx_opt.rcv_wscale);
15021da177e4SLinus Torvalds 	} else {
15031da177e4SLinus Torvalds 		/* Get the largest window that is a nice multiple of mss.
15041da177e4SLinus Torvalds 		 * Window clamp already applied above.
15051da177e4SLinus Torvalds 		 * If our current window offering is within 1 mss of the
15061da177e4SLinus Torvalds 		 * free space we just keep it. This prevents the divide
15071da177e4SLinus Torvalds 		 * and multiply from happening most of the time.
15081da177e4SLinus Torvalds 		 * We also don't do any window rounding when the free space
15091da177e4SLinus Torvalds 		 * is too small.
15101da177e4SLinus Torvalds 		 */
15111da177e4SLinus Torvalds 		if (window <= free_space - mss || window > free_space)
15121da177e4SLinus Torvalds 			window = (free_space/mss)*mss;
15131da177e4SLinus Torvalds 	}
15141da177e4SLinus Torvalds 
15151da177e4SLinus Torvalds 	return window;
15161da177e4SLinus Torvalds }
15171da177e4SLinus Torvalds 
15181da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */
15191da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
15201da177e4SLinus Torvalds {
15211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
15221da177e4SLinus Torvalds 	struct sk_buff *next_skb = skb->next;
15231da177e4SLinus Torvalds 
15241da177e4SLinus Torvalds 	/* The first test we must make is that neither of these two
15251da177e4SLinus Torvalds 	 * SKB's are still referenced by someone else.
15261da177e4SLinus Torvalds 	 */
15271da177e4SLinus Torvalds 	if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
15281da177e4SLinus Torvalds 		int skb_size = skb->len, next_skb_size = next_skb->len;
15291da177e4SLinus Torvalds 		u16 flags = TCP_SKB_CB(skb)->flags;
15301da177e4SLinus Torvalds 
15311da177e4SLinus Torvalds 		/* Also punt if next skb has been SACK'd. */
15321da177e4SLinus Torvalds 		if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
15331da177e4SLinus Torvalds 			return;
15341da177e4SLinus Torvalds 
15351da177e4SLinus Torvalds 		/* Next skb is out of window. */
15361da177e4SLinus Torvalds 		if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
15371da177e4SLinus Torvalds 			return;
15381da177e4SLinus Torvalds 
15391da177e4SLinus Torvalds 		/* Punt if not enough space exists in the first SKB for
15401da177e4SLinus Torvalds 		 * the data in the second, or the total combined payload
15411da177e4SLinus Torvalds 		 * would exceed the MSS.
15421da177e4SLinus Torvalds 		 */
15431da177e4SLinus Torvalds 		if ((next_skb_size > skb_tailroom(skb)) ||
15441da177e4SLinus Torvalds 		    ((skb_size + next_skb_size) > mss_now))
15451da177e4SLinus Torvalds 			return;
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds 		BUG_ON(tcp_skb_pcount(skb) != 1 ||
15481da177e4SLinus Torvalds 		       tcp_skb_pcount(next_skb) != 1);
15491da177e4SLinus Torvalds 
15506a438bbeSStephen Hemminger 		/* changing transmit queue under us so clear hints */
15516a438bbeSStephen Hemminger 		clear_all_retrans_hints(tp);
15526a438bbeSStephen Hemminger 
15531da177e4SLinus Torvalds 		/* Ok.	We will be able to collapse the packet. */
15548728b834SDavid S. Miller 		__skb_unlink(next_skb, &sk->sk_write_queue);
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds 		memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds 		if (next_skb->ip_summed == CHECKSUM_HW)
15591da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_HW;
15601da177e4SLinus Torvalds 
15611da177e4SLinus Torvalds 		if (skb->ip_summed != CHECKSUM_HW)
15621da177e4SLinus Torvalds 			skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
15631da177e4SLinus Torvalds 
15641da177e4SLinus Torvalds 		/* Update sequence range on original skb. */
15651da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds 		/* Merge over control information. */
15681da177e4SLinus Torvalds 		flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
15691da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = flags;
15701da177e4SLinus Torvalds 
15711da177e4SLinus Torvalds 		/* All done, get rid of second SKB and account for it so
15721da177e4SLinus Torvalds 		 * packet counting does not break.
15731da177e4SLinus Torvalds 		 */
15741da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
15751da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
15761da177e4SLinus Torvalds 			tp->retrans_out -= tcp_skb_pcount(next_skb);
15771da177e4SLinus Torvalds 		if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) {
15781da177e4SLinus Torvalds 			tp->lost_out -= tcp_skb_pcount(next_skb);
15791da177e4SLinus Torvalds 			tp->left_out -= tcp_skb_pcount(next_skb);
15801da177e4SLinus Torvalds 		}
15811da177e4SLinus Torvalds 		/* Reno case is special. Sigh... */
15821da177e4SLinus Torvalds 		if (!tp->rx_opt.sack_ok && tp->sacked_out) {
15831da177e4SLinus Torvalds 			tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
15841da177e4SLinus Torvalds 			tp->left_out -= tcp_skb_pcount(next_skb);
15851da177e4SLinus Torvalds 		}
15861da177e4SLinus Torvalds 
15871da177e4SLinus Torvalds 		/* Not quite right: it can be > snd.fack, but
15881da177e4SLinus Torvalds 		 * it is better to underestimate fackets.
15891da177e4SLinus Torvalds 		 */
15901da177e4SLinus Torvalds 		tcp_dec_pcount_approx(&tp->fackets_out, next_skb);
15911da177e4SLinus Torvalds 		tcp_packets_out_dec(tp, next_skb);
15921da177e4SLinus Torvalds 		sk_stream_free_skb(sk, next_skb);
15931da177e4SLinus Torvalds 	}
15941da177e4SLinus Torvalds }
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in
15971da177e4SLinus Torvalds  * tcp_timer. This is used for path mtu discovery.
15981da177e4SLinus Torvalds  * The socket is already locked here.
15991da177e4SLinus Torvalds  */
16001da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk)
16011da177e4SLinus Torvalds {
16026687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
16031da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
16041da177e4SLinus Torvalds 	struct sk_buff *skb;
16051da177e4SLinus Torvalds 	unsigned int mss = tcp_current_mss(sk, 0);
16061da177e4SLinus Torvalds 	int lost = 0;
16071da177e4SLinus Torvalds 
16081da177e4SLinus Torvalds 	sk_stream_for_retrans_queue(skb, sk) {
16091da177e4SLinus Torvalds 		if (skb->len > mss &&
16101da177e4SLinus Torvalds 		    !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
16111da177e4SLinus Torvalds 			if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
16121da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
16131da177e4SLinus Torvalds 				tp->retrans_out -= tcp_skb_pcount(skb);
16141da177e4SLinus Torvalds 			}
16151da177e4SLinus Torvalds 			if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
16161da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
16171da177e4SLinus Torvalds 				tp->lost_out += tcp_skb_pcount(skb);
16181da177e4SLinus Torvalds 				lost = 1;
16191da177e4SLinus Torvalds 			}
16201da177e4SLinus Torvalds 		}
16211da177e4SLinus Torvalds 	}
16221da177e4SLinus Torvalds 
16236a438bbeSStephen Hemminger 	clear_all_retrans_hints(tp);
16246a438bbeSStephen Hemminger 
16251da177e4SLinus Torvalds 	if (!lost)
16261da177e4SLinus Torvalds 		return;
16271da177e4SLinus Torvalds 
16281da177e4SLinus Torvalds 	tcp_sync_left_out(tp);
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds  	/* Don't muck with the congestion window here.
16311da177e4SLinus Torvalds 	 * Reason is that we do not increase amount of _data_
16321da177e4SLinus Torvalds 	 * in network, but units changed and effective
16331da177e4SLinus Torvalds 	 * cwnd/ssthresh really reduced now.
16341da177e4SLinus Torvalds 	 */
16356687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Loss) {
16361da177e4SLinus Torvalds 		tp->high_seq = tp->snd_nxt;
16376687e988SArnaldo Carvalho de Melo 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
16381da177e4SLinus Torvalds 		tp->prior_ssthresh = 0;
16391da177e4SLinus Torvalds 		tp->undo_marker = 0;
16406687e988SArnaldo Carvalho de Melo 		tcp_set_ca_state(sk, TCP_CA_Loss);
16411da177e4SLinus Torvalds 	}
16421da177e4SLinus Torvalds 	tcp_xmit_retransmit_queue(sk);
16431da177e4SLinus Torvalds }
16441da177e4SLinus Torvalds 
16451da177e4SLinus Torvalds /* This retransmits one SKB.  Policy decisions and retransmit queue
16461da177e4SLinus Torvalds  * state updates are done by the caller.  Returns non-zero if an
16471da177e4SLinus Torvalds  * error occurred which prevented the send.
16481da177e4SLinus Torvalds  */
16491da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
16501da177e4SLinus Torvalds {
16511da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
16525d424d5aSJohn Heffner 	struct inet_connection_sock *icsk = inet_csk(sk);
16531da177e4SLinus Torvalds  	unsigned int cur_mss = tcp_current_mss(sk, 0);
16541da177e4SLinus Torvalds 	int err;
16551da177e4SLinus Torvalds 
16565d424d5aSJohn Heffner 	/* Inconslusive MTU probe */
16575d424d5aSJohn Heffner 	if (icsk->icsk_mtup.probe_size) {
16585d424d5aSJohn Heffner 		icsk->icsk_mtup.probe_size = 0;
16595d424d5aSJohn Heffner 	}
16605d424d5aSJohn Heffner 
16611da177e4SLinus Torvalds 	/* Do not sent more than we queued. 1/4 is reserved for possible
1662caa20d9aSStephen Hemminger 	 * copying overhead: fragmentation, tunneling, mangling etc.
16631da177e4SLinus Torvalds 	 */
16641da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_wmem_alloc) >
16651da177e4SLinus Torvalds 	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
16661da177e4SLinus Torvalds 		return -EAGAIN;
16671da177e4SLinus Torvalds 
16681da177e4SLinus Torvalds 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
16691da177e4SLinus Torvalds 		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
16701da177e4SLinus Torvalds 			BUG();
16711da177e4SLinus Torvalds 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
16721da177e4SLinus Torvalds 			return -ENOMEM;
16731da177e4SLinus Torvalds 	}
16741da177e4SLinus Torvalds 
16751da177e4SLinus Torvalds 	/* If receiver has shrunk his window, and skb is out of
16761da177e4SLinus Torvalds 	 * new window, do not retransmit it. The exception is the
16771da177e4SLinus Torvalds 	 * case, when window is shrunk to zero. In this case
16781da177e4SLinus Torvalds 	 * our retransmit serves as a zero window probe.
16791da177e4SLinus Torvalds 	 */
16801da177e4SLinus Torvalds 	if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
16811da177e4SLinus Torvalds 	    && TCP_SKB_CB(skb)->seq != tp->snd_una)
16821da177e4SLinus Torvalds 		return -EAGAIN;
16831da177e4SLinus Torvalds 
16841da177e4SLinus Torvalds 	if (skb->len > cur_mss) {
1685846998aeSDavid S. Miller 		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
16861da177e4SLinus Torvalds 			return -ENOMEM; /* We'll try again later. */
16871da177e4SLinus Torvalds 	}
16881da177e4SLinus Torvalds 
16891da177e4SLinus Torvalds 	/* Collapse two adjacent packets if worthwhile and we can. */
16901da177e4SLinus Torvalds 	if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
16911da177e4SLinus Torvalds 	   (skb->len < (cur_mss >> 1)) &&
16921da177e4SLinus Torvalds 	   (skb->next != sk->sk_send_head) &&
16931da177e4SLinus Torvalds 	   (skb->next != (struct sk_buff *)&sk->sk_write_queue) &&
16941da177e4SLinus Torvalds 	   (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) &&
16951da177e4SLinus Torvalds 	   (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) &&
16961da177e4SLinus Torvalds 	   (sysctl_tcp_retrans_collapse != 0))
16971da177e4SLinus Torvalds 		tcp_retrans_try_collapse(sk, skb, cur_mss);
16981da177e4SLinus Torvalds 
16998292a17aSArnaldo Carvalho de Melo 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
17001da177e4SLinus Torvalds 		return -EHOSTUNREACH; /* Routing failure or similar. */
17011da177e4SLinus Torvalds 
17021da177e4SLinus Torvalds 	/* Some Solaris stacks overoptimize and ignore the FIN on a
17031da177e4SLinus Torvalds 	 * retransmit when old data is attached.  So strip it off
17041da177e4SLinus Torvalds 	 * since it is cheap to do so and saves bytes on the network.
17051da177e4SLinus Torvalds 	 */
17061da177e4SLinus Torvalds 	if(skb->len > 0 &&
17071da177e4SLinus Torvalds 	   (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
17081da177e4SLinus Torvalds 	   tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
17091da177e4SLinus Torvalds 		if (!pskb_trim(skb, 0)) {
17101da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
17111da177e4SLinus Torvalds 			skb_shinfo(skb)->tso_segs = 1;
17121da177e4SLinus Torvalds 			skb_shinfo(skb)->tso_size = 0;
17131da177e4SLinus Torvalds 			skb->ip_summed = CHECKSUM_NONE;
17141da177e4SLinus Torvalds 			skb->csum = 0;
17151da177e4SLinus Torvalds 		}
17161da177e4SLinus Torvalds 	}
17171da177e4SLinus Torvalds 
17181da177e4SLinus Torvalds 	/* Make a copy, if the first transmission SKB clone we made
17191da177e4SLinus Torvalds 	 * is still in somebody's hands, else make a clone.
17201da177e4SLinus Torvalds 	 */
17211da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
17221da177e4SLinus Torvalds 
1723dfb4b9dcSDavid S. Miller 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
17241da177e4SLinus Torvalds 
17251da177e4SLinus Torvalds 	if (err == 0) {
17261da177e4SLinus Torvalds 		/* Update global TCP statistics. */
17271da177e4SLinus Torvalds 		TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
17281da177e4SLinus Torvalds 
17291da177e4SLinus Torvalds 		tp->total_retrans++;
17301da177e4SLinus Torvalds 
17311da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0
17321da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
17331da177e4SLinus Torvalds 			if (net_ratelimit())
17341da177e4SLinus Torvalds 				printk(KERN_DEBUG "retrans_out leaked.\n");
17351da177e4SLinus Torvalds 		}
17361da177e4SLinus Torvalds #endif
17371da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
17381da177e4SLinus Torvalds 		tp->retrans_out += tcp_skb_pcount(skb);
17391da177e4SLinus Torvalds 
17401da177e4SLinus Torvalds 		/* Save stamp of the first retransmit. */
17411da177e4SLinus Torvalds 		if (!tp->retrans_stamp)
17421da177e4SLinus Torvalds 			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
17431da177e4SLinus Torvalds 
17441da177e4SLinus Torvalds 		tp->undo_retrans++;
17451da177e4SLinus Torvalds 
17461da177e4SLinus Torvalds 		/* snd_nxt is stored to detect loss of retransmitted segment,
17471da177e4SLinus Torvalds 		 * see tcp_input.c tcp_sacktag_write_queue().
17481da177e4SLinus Torvalds 		 */
17491da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
17501da177e4SLinus Torvalds 	}
17511da177e4SLinus Torvalds 	return err;
17521da177e4SLinus Torvalds }
17531da177e4SLinus Torvalds 
17541da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially
17551da177e4SLinus Torvalds  * retransmitted data is acknowledged.  It tries to continue
17561da177e4SLinus Torvalds  * resending the rest of the retransmit queue, until either
17571da177e4SLinus Torvalds  * we've sent it all or the congestion window limit is reached.
17581da177e4SLinus Torvalds  * If doing SACK, the first ACK which comes back for a timeout
17591da177e4SLinus Torvalds  * based retransmit packet might feed us FACK information again.
17601da177e4SLinus Torvalds  * If so, we use it to avoid unnecessarily retransmissions.
17611da177e4SLinus Torvalds  */
17621da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk)
17631da177e4SLinus Torvalds {
17646687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
17651da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17661da177e4SLinus Torvalds 	struct sk_buff *skb;
17676a438bbeSStephen Hemminger 	int packet_cnt;
17686a438bbeSStephen Hemminger 
17696a438bbeSStephen Hemminger 	if (tp->retransmit_skb_hint) {
17706a438bbeSStephen Hemminger 		skb = tp->retransmit_skb_hint;
17716a438bbeSStephen Hemminger 		packet_cnt = tp->retransmit_cnt_hint;
17726a438bbeSStephen Hemminger 	}else{
17736a438bbeSStephen Hemminger 		skb = sk->sk_write_queue.next;
17746a438bbeSStephen Hemminger 		packet_cnt = 0;
17756a438bbeSStephen Hemminger 	}
17761da177e4SLinus Torvalds 
17771da177e4SLinus Torvalds 	/* First pass: retransmit lost packets. */
17786a438bbeSStephen Hemminger 	if (tp->lost_out) {
17796a438bbeSStephen Hemminger 		sk_stream_for_retrans_queue_from(skb, sk) {
17801da177e4SLinus Torvalds 			__u8 sacked = TCP_SKB_CB(skb)->sacked;
17811da177e4SLinus Torvalds 
17826a438bbeSStephen Hemminger 			/* we could do better than to assign each time */
17836a438bbeSStephen Hemminger 			tp->retransmit_skb_hint = skb;
17846a438bbeSStephen Hemminger 			tp->retransmit_cnt_hint = packet_cnt;
17856a438bbeSStephen Hemminger 
17861da177e4SLinus Torvalds 			/* Assume this retransmit will generate
17871da177e4SLinus Torvalds 			 * only one packet for congestion window
17881da177e4SLinus Torvalds 			 * calculation purposes.  This works because
17891da177e4SLinus Torvalds 			 * tcp_retransmit_skb() will chop up the
17901da177e4SLinus Torvalds 			 * packet to be MSS sized and all the
17911da177e4SLinus Torvalds 			 * packet counting works out.
17921da177e4SLinus Torvalds 			 */
17931da177e4SLinus Torvalds 			if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
17941da177e4SLinus Torvalds 				return;
17951da177e4SLinus Torvalds 
17961da177e4SLinus Torvalds 			if (sacked & TCPCB_LOST) {
17971da177e4SLinus Torvalds 				if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
17986a438bbeSStephen Hemminger 					if (tcp_retransmit_skb(sk, skb)) {
17996a438bbeSStephen Hemminger 						tp->retransmit_skb_hint = NULL;
18001da177e4SLinus Torvalds 						return;
18016a438bbeSStephen Hemminger 					}
18026687e988SArnaldo Carvalho de Melo 					if (icsk->icsk_ca_state != TCP_CA_Loss)
18031da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
18041da177e4SLinus Torvalds 					else
18051da177e4SLinus Torvalds 						NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
18061da177e4SLinus Torvalds 
18071da177e4SLinus Torvalds 					if (skb ==
18081da177e4SLinus Torvalds 					    skb_peek(&sk->sk_write_queue))
1809463c84b9SArnaldo Carvalho de Melo 						inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
18103f421baaSArnaldo Carvalho de Melo 									  inet_csk(sk)->icsk_rto,
18113f421baaSArnaldo Carvalho de Melo 									  TCP_RTO_MAX);
18121da177e4SLinus Torvalds 				}
18131da177e4SLinus Torvalds 
18146a438bbeSStephen Hemminger 				packet_cnt += tcp_skb_pcount(skb);
18156a438bbeSStephen Hemminger 				if (packet_cnt >= tp->lost_out)
18161da177e4SLinus Torvalds 					break;
18171da177e4SLinus Torvalds 			}
18181da177e4SLinus Torvalds 		}
18191da177e4SLinus Torvalds 	}
18201da177e4SLinus Torvalds 
18211da177e4SLinus Torvalds 	/* OK, demanded retransmission is finished. */
18221da177e4SLinus Torvalds 
18231da177e4SLinus Torvalds 	/* Forward retransmissions are possible only during Recovery. */
18246687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_state != TCP_CA_Recovery)
18251da177e4SLinus Torvalds 		return;
18261da177e4SLinus Torvalds 
18271da177e4SLinus Torvalds 	/* No forward retransmissions in Reno are possible. */
18281da177e4SLinus Torvalds 	if (!tp->rx_opt.sack_ok)
18291da177e4SLinus Torvalds 		return;
18301da177e4SLinus Torvalds 
18311da177e4SLinus Torvalds 	/* Yeah, we have to make difficult choice between forward transmission
18321da177e4SLinus Torvalds 	 * and retransmission... Both ways have their merits...
18331da177e4SLinus Torvalds 	 *
18341da177e4SLinus Torvalds 	 * For now we do not retransmit anything, while we have some new
18351da177e4SLinus Torvalds 	 * segments to send.
18361da177e4SLinus Torvalds 	 */
18371da177e4SLinus Torvalds 
18381da177e4SLinus Torvalds 	if (tcp_may_send_now(sk, tp))
18391da177e4SLinus Torvalds 		return;
18401da177e4SLinus Torvalds 
18416a438bbeSStephen Hemminger 	if (tp->forward_skb_hint) {
18426a438bbeSStephen Hemminger 		skb = tp->forward_skb_hint;
18436a438bbeSStephen Hemminger 		packet_cnt = tp->forward_cnt_hint;
18446a438bbeSStephen Hemminger 	} else{
18456a438bbeSStephen Hemminger 		skb = sk->sk_write_queue.next;
18461da177e4SLinus Torvalds 		packet_cnt = 0;
18476a438bbeSStephen Hemminger 	}
18481da177e4SLinus Torvalds 
18496a438bbeSStephen Hemminger 	sk_stream_for_retrans_queue_from(skb, sk) {
18506a438bbeSStephen Hemminger 		tp->forward_cnt_hint = packet_cnt;
18516a438bbeSStephen Hemminger 		tp->forward_skb_hint = skb;
18526a438bbeSStephen Hemminger 
18531da177e4SLinus Torvalds 		/* Similar to the retransmit loop above we
18541da177e4SLinus Torvalds 		 * can pretend that the retransmitted SKB
18551da177e4SLinus Torvalds 		 * we send out here will be composed of one
18561da177e4SLinus Torvalds 		 * real MSS sized packet because tcp_retransmit_skb()
18571da177e4SLinus Torvalds 		 * will fragment it if necessary.
18581da177e4SLinus Torvalds 		 */
18591da177e4SLinus Torvalds 		if (++packet_cnt > tp->fackets_out)
18601da177e4SLinus Torvalds 			break;
18611da177e4SLinus Torvalds 
18621da177e4SLinus Torvalds 		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
18631da177e4SLinus Torvalds 			break;
18641da177e4SLinus Torvalds 
18651da177e4SLinus Torvalds 		if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
18661da177e4SLinus Torvalds 			continue;
18671da177e4SLinus Torvalds 
18681da177e4SLinus Torvalds 		/* Ok, retransmit it. */
18696a438bbeSStephen Hemminger 		if (tcp_retransmit_skb(sk, skb)) {
18706a438bbeSStephen Hemminger 			tp->forward_skb_hint = NULL;
18711da177e4SLinus Torvalds 			break;
18726a438bbeSStephen Hemminger 		}
18731da177e4SLinus Torvalds 
18741da177e4SLinus Torvalds 		if (skb == skb_peek(&sk->sk_write_queue))
18753f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
18763f421baaSArnaldo Carvalho de Melo 						  inet_csk(sk)->icsk_rto,
18773f421baaSArnaldo Carvalho de Melo 						  TCP_RTO_MAX);
18781da177e4SLinus Torvalds 
18791da177e4SLinus Torvalds 		NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
18801da177e4SLinus Torvalds 	}
18811da177e4SLinus Torvalds }
18821da177e4SLinus Torvalds 
18831da177e4SLinus Torvalds 
18841da177e4SLinus Torvalds /* Send a fin.  The caller locks the socket for us.  This cannot be
18851da177e4SLinus Torvalds  * allowed to fail queueing a FIN frame under any circumstances.
18861da177e4SLinus Torvalds  */
18871da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk)
18881da177e4SLinus Torvalds {
18891da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18901da177e4SLinus Torvalds 	struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue);
18911da177e4SLinus Torvalds 	int mss_now;
18921da177e4SLinus Torvalds 
18931da177e4SLinus Torvalds 	/* Optimization, tack on the FIN if we have a queue of
18941da177e4SLinus Torvalds 	 * unsent frames.  But be careful about outgoing SACKS
18951da177e4SLinus Torvalds 	 * and IP options.
18961da177e4SLinus Torvalds 	 */
18971da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, 1);
18981da177e4SLinus Torvalds 
18991da177e4SLinus Torvalds 	if (sk->sk_send_head != NULL) {
19001da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
19011da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq++;
19021da177e4SLinus Torvalds 		tp->write_seq++;
19031da177e4SLinus Torvalds 	} else {
19041da177e4SLinus Torvalds 		/* Socket is locked, keep trying until memory is available. */
19051da177e4SLinus Torvalds 		for (;;) {
1906d179cd12SDavid S. Miller 			skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
19071da177e4SLinus Torvalds 			if (skb)
19081da177e4SLinus Torvalds 				break;
19091da177e4SLinus Torvalds 			yield();
19101da177e4SLinus Torvalds 		}
19111da177e4SLinus Torvalds 
19121da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
19131da177e4SLinus Torvalds 		skb_reserve(skb, MAX_TCP_HEADER);
19141da177e4SLinus Torvalds 		skb->csum = 0;
19151da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
19161da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked = 0;
19171da177e4SLinus Torvalds 		skb_shinfo(skb)->tso_segs = 1;
19181da177e4SLinus Torvalds 		skb_shinfo(skb)->tso_size = 0;
19191da177e4SLinus Torvalds 
19201da177e4SLinus Torvalds 		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
19211da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->seq = tp->write_seq;
19221da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
19231da177e4SLinus Torvalds 		tcp_queue_skb(sk, skb);
19241da177e4SLinus Torvalds 	}
19251da177e4SLinus Torvalds 	__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF);
19261da177e4SLinus Torvalds }
19271da177e4SLinus Torvalds 
19281da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to
19291da177e4SLinus Torvalds  * an explicit close() or as a byproduct of exit()'ing) and there
19301da177e4SLinus Torvalds  * was unread data in the receive queue.  This behavior is recommended
19311da177e4SLinus Torvalds  * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
19321da177e4SLinus Torvalds  */
1933dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority)
19341da177e4SLinus Torvalds {
19351da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
19361da177e4SLinus Torvalds 	struct sk_buff *skb;
19371da177e4SLinus Torvalds 
19381da177e4SLinus Torvalds 	/* NOTE: No TCP options attached and we never retransmit this. */
19391da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, priority);
19401da177e4SLinus Torvalds 	if (!skb) {
19411da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
19421da177e4SLinus Torvalds 		return;
19431da177e4SLinus Torvalds 	}
19441da177e4SLinus Torvalds 
19451da177e4SLinus Torvalds 	/* Reserve space for headers and prepare control bits. */
19461da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
19471da177e4SLinus Torvalds 	skb->csum = 0;
19481da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
19491da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
19501da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_segs = 1;
19511da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_size = 0;
19521da177e4SLinus Torvalds 
19531da177e4SLinus Torvalds 	/* Send it off. */
19541da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp);
19551da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
19561da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1957dfb4b9dcSDavid S. Miller 	if (tcp_transmit_skb(sk, skb, 0, priority))
19581da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
19591da177e4SLinus Torvalds }
19601da177e4SLinus Torvalds 
19611da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent
19621da177e4SLinus Torvalds  * a SYN packet that crossed the incoming SYN that caused this routine
19631da177e4SLinus Torvalds  * to get called. If this assumption fails then the initial rcv_wnd
19641da177e4SLinus Torvalds  * and rcv_wscale values will not be correct.
19651da177e4SLinus Torvalds  */
19661da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk)
19671da177e4SLinus Torvalds {
19681da177e4SLinus Torvalds 	struct sk_buff* skb;
19691da177e4SLinus Torvalds 
19701da177e4SLinus Torvalds 	skb = skb_peek(&sk->sk_write_queue);
19711da177e4SLinus Torvalds 	if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
19721da177e4SLinus Torvalds 		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
19731da177e4SLinus Torvalds 		return -EFAULT;
19741da177e4SLinus Torvalds 	}
19751da177e4SLinus Torvalds 	if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
19761da177e4SLinus Torvalds 		if (skb_cloned(skb)) {
19771da177e4SLinus Torvalds 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
19781da177e4SLinus Torvalds 			if (nskb == NULL)
19791da177e4SLinus Torvalds 				return -ENOMEM;
19801da177e4SLinus Torvalds 			__skb_unlink(skb, &sk->sk_write_queue);
19811da177e4SLinus Torvalds 			skb_header_release(nskb);
19821da177e4SLinus Torvalds 			__skb_queue_head(&sk->sk_write_queue, nskb);
19831da177e4SLinus Torvalds 			sk_stream_free_skb(sk, skb);
19841da177e4SLinus Torvalds 			sk_charge_skb(sk, nskb);
19851da177e4SLinus Torvalds 			skb = nskb;
19861da177e4SLinus Torvalds 		}
19871da177e4SLinus Torvalds 
19881da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
19891da177e4SLinus Torvalds 		TCP_ECN_send_synack(tcp_sk(sk), skb);
19901da177e4SLinus Torvalds 	}
19911da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
1992dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
19931da177e4SLinus Torvalds }
19941da177e4SLinus Torvalds 
19951da177e4SLinus Torvalds /*
19961da177e4SLinus Torvalds  * Prepare a SYN-ACK.
19971da177e4SLinus Torvalds  */
19981da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
199960236fddSArnaldo Carvalho de Melo 				 struct request_sock *req)
20001da177e4SLinus Torvalds {
20012e6599cbSArnaldo Carvalho de Melo 	struct inet_request_sock *ireq = inet_rsk(req);
20021da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20031da177e4SLinus Torvalds 	struct tcphdr *th;
20041da177e4SLinus Torvalds 	int tcp_header_size;
20051da177e4SLinus Torvalds 	struct sk_buff *skb;
20061da177e4SLinus Torvalds 
20071da177e4SLinus Torvalds 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
20081da177e4SLinus Torvalds 	if (skb == NULL)
20091da177e4SLinus Torvalds 		return NULL;
20101da177e4SLinus Torvalds 
20111da177e4SLinus Torvalds 	/* Reserve space for headers. */
20121da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
20131da177e4SLinus Torvalds 
20141da177e4SLinus Torvalds 	skb->dst = dst_clone(dst);
20151da177e4SLinus Torvalds 
20161da177e4SLinus Torvalds 	tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
20172e6599cbSArnaldo Carvalho de Melo 			   (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
20182e6599cbSArnaldo Carvalho de Melo 			   (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
20191da177e4SLinus Torvalds 			   /* SACK_PERM is in the place of NOP NOP of TS */
20202e6599cbSArnaldo Carvalho de Melo 			   ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
20211da177e4SLinus Torvalds 	skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
20221da177e4SLinus Torvalds 
20231da177e4SLinus Torvalds 	memset(th, 0, sizeof(struct tcphdr));
20241da177e4SLinus Torvalds 	th->syn = 1;
20251da177e4SLinus Torvalds 	th->ack = 1;
20261da177e4SLinus Torvalds 	if (dst->dev->features&NETIF_F_TSO)
20272e6599cbSArnaldo Carvalho de Melo 		ireq->ecn_ok = 0;
20281da177e4SLinus Torvalds 	TCP_ECN_make_synack(req, th);
20291da177e4SLinus Torvalds 	th->source = inet_sk(sk)->sport;
20302e6599cbSArnaldo Carvalho de Melo 	th->dest = ireq->rmt_port;
20312e6599cbSArnaldo Carvalho de Melo 	TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
20321da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
20331da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
20341da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_segs = 1;
20351da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_size = 0;
20361da177e4SLinus Torvalds 	th->seq = htonl(TCP_SKB_CB(skb)->seq);
20372e6599cbSArnaldo Carvalho de Melo 	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
20381da177e4SLinus Torvalds 	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
20391da177e4SLinus Torvalds 		__u8 rcv_wscale;
20401da177e4SLinus Torvalds 		/* Set this up on the first call only */
20411da177e4SLinus Torvalds 		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
20421da177e4SLinus Torvalds 		/* tcp_full_space because it is guaranteed to be the first packet */
20431da177e4SLinus Torvalds 		tcp_select_initial_window(tcp_full_space(sk),
20442e6599cbSArnaldo Carvalho de Melo 			dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
20451da177e4SLinus Torvalds 			&req->rcv_wnd,
20461da177e4SLinus Torvalds 			&req->window_clamp,
20472e6599cbSArnaldo Carvalho de Melo 			ireq->wscale_ok,
20481da177e4SLinus Torvalds 			&rcv_wscale);
20492e6599cbSArnaldo Carvalho de Melo 		ireq->rcv_wscale = rcv_wscale;
20501da177e4SLinus Torvalds 	}
20511da177e4SLinus Torvalds 
20521da177e4SLinus Torvalds 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
20531da177e4SLinus Torvalds 	th->window = htons(req->rcv_wnd);
20541da177e4SLinus Torvalds 
20551da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
20562e6599cbSArnaldo Carvalho de Melo 	tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
20572e6599cbSArnaldo Carvalho de Melo 			      ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
20581da177e4SLinus Torvalds 			      TCP_SKB_CB(skb)->when,
20591da177e4SLinus Torvalds 			      req->ts_recent);
20601da177e4SLinus Torvalds 
20611da177e4SLinus Torvalds 	skb->csum = 0;
20621da177e4SLinus Torvalds 	th->doff = (tcp_header_size >> 2);
20631da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_OUTSEGS);
20641da177e4SLinus Torvalds 	return skb;
20651da177e4SLinus Torvalds }
20661da177e4SLinus Torvalds 
20671da177e4SLinus Torvalds /*
20681da177e4SLinus Torvalds  * Do all connect socket setups that can be done AF independent.
20691da177e4SLinus Torvalds  */
207040efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk)
20711da177e4SLinus Torvalds {
20721da177e4SLinus Torvalds 	struct dst_entry *dst = __sk_dst_get(sk);
20731da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20741da177e4SLinus Torvalds 	__u8 rcv_wscale;
20751da177e4SLinus Torvalds 
20761da177e4SLinus Torvalds 	/* We'll fix this up when we get a response from the other end.
20771da177e4SLinus Torvalds 	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
20781da177e4SLinus Torvalds 	 */
20791da177e4SLinus Torvalds 	tp->tcp_header_len = sizeof(struct tcphdr) +
20801da177e4SLinus Torvalds 		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
20811da177e4SLinus Torvalds 
20821da177e4SLinus Torvalds 	/* If user gave his TCP_MAXSEG, record it to clamp */
20831da177e4SLinus Torvalds 	if (tp->rx_opt.user_mss)
20841da177e4SLinus Torvalds 		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
20851da177e4SLinus Torvalds 	tp->max_window = 0;
20865d424d5aSJohn Heffner 	tcp_mtup_init(sk);
20871da177e4SLinus Torvalds 	tcp_sync_mss(sk, dst_mtu(dst));
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds 	if (!tp->window_clamp)
20901da177e4SLinus Torvalds 		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
20911da177e4SLinus Torvalds 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
20921da177e4SLinus Torvalds 	tcp_initialize_rcv_mss(sk);
20931da177e4SLinus Torvalds 
20941da177e4SLinus Torvalds 	tcp_select_initial_window(tcp_full_space(sk),
20951da177e4SLinus Torvalds 				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
20961da177e4SLinus Torvalds 				  &tp->rcv_wnd,
20971da177e4SLinus Torvalds 				  &tp->window_clamp,
20981da177e4SLinus Torvalds 				  sysctl_tcp_window_scaling,
20991da177e4SLinus Torvalds 				  &rcv_wscale);
21001da177e4SLinus Torvalds 
21011da177e4SLinus Torvalds 	tp->rx_opt.rcv_wscale = rcv_wscale;
21021da177e4SLinus Torvalds 	tp->rcv_ssthresh = tp->rcv_wnd;
21031da177e4SLinus Torvalds 
21041da177e4SLinus Torvalds 	sk->sk_err = 0;
21051da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
21061da177e4SLinus Torvalds 	tp->snd_wnd = 0;
21071da177e4SLinus Torvalds 	tcp_init_wl(tp, tp->write_seq, 0);
21081da177e4SLinus Torvalds 	tp->snd_una = tp->write_seq;
21091da177e4SLinus Torvalds 	tp->snd_sml = tp->write_seq;
21101da177e4SLinus Torvalds 	tp->rcv_nxt = 0;
21111da177e4SLinus Torvalds 	tp->rcv_wup = 0;
21121da177e4SLinus Torvalds 	tp->copied_seq = 0;
21131da177e4SLinus Torvalds 
2114463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2115463c84b9SArnaldo Carvalho de Melo 	inet_csk(sk)->icsk_retransmits = 0;
21161da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
21171da177e4SLinus Torvalds }
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds /*
21201da177e4SLinus Torvalds  * Build a SYN and send it off.
21211da177e4SLinus Torvalds  */
21221da177e4SLinus Torvalds int tcp_connect(struct sock *sk)
21231da177e4SLinus Torvalds {
21241da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
21251da177e4SLinus Torvalds 	struct sk_buff *buff;
21261da177e4SLinus Torvalds 
21271da177e4SLinus Torvalds 	tcp_connect_init(sk);
21281da177e4SLinus Torvalds 
2129d179cd12SDavid S. Miller 	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
21301da177e4SLinus Torvalds 	if (unlikely(buff == NULL))
21311da177e4SLinus Torvalds 		return -ENOBUFS;
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds 	/* Reserve space for headers. */
21341da177e4SLinus Torvalds 	skb_reserve(buff, MAX_TCP_HEADER);
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
21371da177e4SLinus Torvalds 	TCP_ECN_send_syn(sk, tp, buff);
21381da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->sacked = 0;
21391da177e4SLinus Torvalds 	skb_shinfo(buff)->tso_segs = 1;
21401da177e4SLinus Torvalds 	skb_shinfo(buff)->tso_size = 0;
21411da177e4SLinus Torvalds 	buff->csum = 0;
21421da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->seq = tp->write_seq++;
21431da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->end_seq = tp->write_seq;
21441da177e4SLinus Torvalds 	tp->snd_nxt = tp->write_seq;
21451da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
21461da177e4SLinus Torvalds 
21471da177e4SLinus Torvalds 	/* Send it off. */
21481da177e4SLinus Torvalds 	TCP_SKB_CB(buff)->when = tcp_time_stamp;
21491da177e4SLinus Torvalds 	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
21501da177e4SLinus Torvalds 	skb_header_release(buff);
21511da177e4SLinus Torvalds 	__skb_queue_tail(&sk->sk_write_queue, buff);
21521da177e4SLinus Torvalds 	sk_charge_skb(sk, buff);
21531da177e4SLinus Torvalds 	tp->packets_out += tcp_skb_pcount(buff);
2154dfb4b9dcSDavid S. Miller 	tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
21551da177e4SLinus Torvalds 	TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds 	/* Timer for repeating the SYN until an answer. */
21583f421baaSArnaldo Carvalho de Melo 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
21593f421baaSArnaldo Carvalho de Melo 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
21601da177e4SLinus Torvalds 	return 0;
21611da177e4SLinus Torvalds }
21621da177e4SLinus Torvalds 
21631da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking
21641da177e4SLinus Torvalds  * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
21651da177e4SLinus Torvalds  * for details.
21661da177e4SLinus Torvalds  */
21671da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk)
21681da177e4SLinus Torvalds {
2169463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
2170463c84b9SArnaldo Carvalho de Melo 	int ato = icsk->icsk_ack.ato;
21711da177e4SLinus Torvalds 	unsigned long timeout;
21721da177e4SLinus Torvalds 
21731da177e4SLinus Torvalds 	if (ato > TCP_DELACK_MIN) {
2174463c84b9SArnaldo Carvalho de Melo 		const struct tcp_sock *tp = tcp_sk(sk);
21751da177e4SLinus Torvalds 		int max_ato = HZ/2;
21761da177e4SLinus Torvalds 
2177463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
21781da177e4SLinus Torvalds 			max_ato = TCP_DELACK_MAX;
21791da177e4SLinus Torvalds 
21801da177e4SLinus Torvalds 		/* Slow path, intersegment interval is "high". */
21811da177e4SLinus Torvalds 
21821da177e4SLinus Torvalds 		/* If some rtt estimate is known, use it to bound delayed ack.
2183463c84b9SArnaldo Carvalho de Melo 		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
21841da177e4SLinus Torvalds 		 * directly.
21851da177e4SLinus Torvalds 		 */
21861da177e4SLinus Torvalds 		if (tp->srtt) {
21871da177e4SLinus Torvalds 			int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
21881da177e4SLinus Torvalds 
21891da177e4SLinus Torvalds 			if (rtt < max_ato)
21901da177e4SLinus Torvalds 				max_ato = rtt;
21911da177e4SLinus Torvalds 		}
21921da177e4SLinus Torvalds 
21931da177e4SLinus Torvalds 		ato = min(ato, max_ato);
21941da177e4SLinus Torvalds 	}
21951da177e4SLinus Torvalds 
21961da177e4SLinus Torvalds 	/* Stay within the limit we were given */
21971da177e4SLinus Torvalds 	timeout = jiffies + ato;
21981da177e4SLinus Torvalds 
21991da177e4SLinus Torvalds 	/* Use new timeout only if there wasn't a older one earlier. */
2200463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
22011da177e4SLinus Torvalds 		/* If delack timer was blocked or is about to expire,
22021da177e4SLinus Torvalds 		 * send ACK now.
22031da177e4SLinus Torvalds 		 */
2204463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
2205463c84b9SArnaldo Carvalho de Melo 		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
22061da177e4SLinus Torvalds 			tcp_send_ack(sk);
22071da177e4SLinus Torvalds 			return;
22081da177e4SLinus Torvalds 		}
22091da177e4SLinus Torvalds 
2210463c84b9SArnaldo Carvalho de Melo 		if (!time_before(timeout, icsk->icsk_ack.timeout))
2211463c84b9SArnaldo Carvalho de Melo 			timeout = icsk->icsk_ack.timeout;
22121da177e4SLinus Torvalds 	}
2213463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2214463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_ack.timeout = timeout;
2215463c84b9SArnaldo Carvalho de Melo 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
22161da177e4SLinus Torvalds }
22171da177e4SLinus Torvalds 
22181da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */
22191da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk)
22201da177e4SLinus Torvalds {
22211da177e4SLinus Torvalds 	/* If we have been reset, we may not send again. */
22221da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
22231da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
22241da177e4SLinus Torvalds 		struct sk_buff *buff;
22251da177e4SLinus Torvalds 
22261da177e4SLinus Torvalds 		/* We are not putting this on the write queue, so
22271da177e4SLinus Torvalds 		 * tcp_transmit_skb() will set the ownership to this
22281da177e4SLinus Torvalds 		 * sock.
22291da177e4SLinus Torvalds 		 */
22301da177e4SLinus Torvalds 		buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
22311da177e4SLinus Torvalds 		if (buff == NULL) {
2232463c84b9SArnaldo Carvalho de Melo 			inet_csk_schedule_ack(sk);
2233463c84b9SArnaldo Carvalho de Melo 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
22343f421baaSArnaldo Carvalho de Melo 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
22353f421baaSArnaldo Carvalho de Melo 						  TCP_DELACK_MAX, TCP_RTO_MAX);
22361da177e4SLinus Torvalds 			return;
22371da177e4SLinus Torvalds 		}
22381da177e4SLinus Torvalds 
22391da177e4SLinus Torvalds 		/* Reserve space for headers and prepare control bits. */
22401da177e4SLinus Torvalds 		skb_reserve(buff, MAX_TCP_HEADER);
22411da177e4SLinus Torvalds 		buff->csum = 0;
22421da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
22431da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->sacked = 0;
22441da177e4SLinus Torvalds 		skb_shinfo(buff)->tso_segs = 1;
22451da177e4SLinus Torvalds 		skb_shinfo(buff)->tso_size = 0;
22461da177e4SLinus Torvalds 
22471da177e4SLinus Torvalds 		/* Send it off, this clears delayed acks for us. */
22481da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp);
22491da177e4SLinus Torvalds 		TCP_SKB_CB(buff)->when = tcp_time_stamp;
2250dfb4b9dcSDavid S. Miller 		tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
22511da177e4SLinus Torvalds 	}
22521da177e4SLinus Torvalds }
22531da177e4SLinus Torvalds 
22541da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence
22551da177e4SLinus Torvalds  * number. It assumes the other end will try to ack it.
22561da177e4SLinus Torvalds  *
22571da177e4SLinus Torvalds  * Question: what should we make while urgent mode?
22581da177e4SLinus Torvalds  * 4.4BSD forces sending single byte of data. We cannot send
22591da177e4SLinus Torvalds  * out of window data, because we have SND.NXT==SND.MAX...
22601da177e4SLinus Torvalds  *
22611da177e4SLinus Torvalds  * Current solution: to send TWO zero-length segments in urgent mode:
22621da177e4SLinus Torvalds  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
22631da177e4SLinus Torvalds  * out-of-date with SND.UNA-1 to probe window.
22641da177e4SLinus Torvalds  */
22651da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
22661da177e4SLinus Torvalds {
22671da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22681da177e4SLinus Torvalds 	struct sk_buff *skb;
22691da177e4SLinus Torvalds 
22701da177e4SLinus Torvalds 	/* We don't queue it, tcp_transmit_skb() sets ownership. */
22711da177e4SLinus Torvalds 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
22721da177e4SLinus Torvalds 	if (skb == NULL)
22731da177e4SLinus Torvalds 		return -1;
22741da177e4SLinus Torvalds 
22751da177e4SLinus Torvalds 	/* Reserve space for headers and set control bits. */
22761da177e4SLinus Torvalds 	skb_reserve(skb, MAX_TCP_HEADER);
22771da177e4SLinus Torvalds 	skb->csum = 0;
22781da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
22791da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = urgent;
22801da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_segs = 1;
22811da177e4SLinus Torvalds 	skb_shinfo(skb)->tso_size = 0;
22821da177e4SLinus Torvalds 
22831da177e4SLinus Torvalds 	/* Use a previous sequence.  This should cause the other
22841da177e4SLinus Torvalds 	 * end to send an ack.  Don't queue or clone SKB, just
22851da177e4SLinus Torvalds 	 * send it.
22861da177e4SLinus Torvalds 	 */
22871da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
22881da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
22891da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2290dfb4b9dcSDavid S. Miller 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
22911da177e4SLinus Torvalds }
22921da177e4SLinus Torvalds 
22931da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk)
22941da177e4SLinus Torvalds {
22951da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
22961da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
22971da177e4SLinus Torvalds 		struct sk_buff *skb;
22981da177e4SLinus Torvalds 
22991da177e4SLinus Torvalds 		if ((skb = sk->sk_send_head) != NULL &&
23001da177e4SLinus Torvalds 		    before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
23011da177e4SLinus Torvalds 			int err;
23021da177e4SLinus Torvalds 			unsigned int mss = tcp_current_mss(sk, 0);
23031da177e4SLinus Torvalds 			unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
23041da177e4SLinus Torvalds 
23051da177e4SLinus Torvalds 			if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
23061da177e4SLinus Torvalds 				tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
23071da177e4SLinus Torvalds 
23081da177e4SLinus Torvalds 			/* We are probing the opening of a window
23091da177e4SLinus Torvalds 			 * but the window size is != 0
23101da177e4SLinus Torvalds 			 * must have been a result SWS avoidance ( sender )
23111da177e4SLinus Torvalds 			 */
23121da177e4SLinus Torvalds 			if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
23131da177e4SLinus Torvalds 			    skb->len > mss) {
23141da177e4SLinus Torvalds 				seg_size = min(seg_size, mss);
23151da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
2316846998aeSDavid S. Miller 				if (tcp_fragment(sk, skb, seg_size, mss))
23171da177e4SLinus Torvalds 					return -1;
23181da177e4SLinus Torvalds 			} else if (!tcp_skb_pcount(skb))
2319846998aeSDavid S. Miller 				tcp_set_skb_tso_segs(sk, skb, mss);
23201da177e4SLinus Torvalds 
23211da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
23221da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->when = tcp_time_stamp;
2323dfb4b9dcSDavid S. Miller 			err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
23241da177e4SLinus Torvalds 			if (!err) {
23251da177e4SLinus Torvalds 				update_send_head(sk, tp, skb);
23261da177e4SLinus Torvalds 			}
23271da177e4SLinus Torvalds 			return err;
23281da177e4SLinus Torvalds 		} else {
23291da177e4SLinus Torvalds 			if (tp->urg_mode &&
23301da177e4SLinus Torvalds 			    between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
23311da177e4SLinus Torvalds 				tcp_xmit_probe_skb(sk, TCPCB_URG);
23321da177e4SLinus Torvalds 			return tcp_xmit_probe_skb(sk, 0);
23331da177e4SLinus Torvalds 		}
23341da177e4SLinus Torvalds 	}
23351da177e4SLinus Torvalds 	return -1;
23361da177e4SLinus Torvalds }
23371da177e4SLinus Torvalds 
23381da177e4SLinus Torvalds /* A window probe timeout has occurred.  If window is not closed send
23391da177e4SLinus Torvalds  * a partial packet else a zero probe.
23401da177e4SLinus Torvalds  */
23411da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk)
23421da177e4SLinus Torvalds {
2343463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
23441da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
23451da177e4SLinus Torvalds 	int err;
23461da177e4SLinus Torvalds 
23471da177e4SLinus Torvalds 	err = tcp_write_wakeup(sk);
23481da177e4SLinus Torvalds 
23491da177e4SLinus Torvalds 	if (tp->packets_out || !sk->sk_send_head) {
23501da177e4SLinus Torvalds 		/* Cancel probe timer, if it is not required. */
23516687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out = 0;
2352463c84b9SArnaldo Carvalho de Melo 		icsk->icsk_backoff = 0;
23531da177e4SLinus Torvalds 		return;
23541da177e4SLinus Torvalds 	}
23551da177e4SLinus Torvalds 
23561da177e4SLinus Torvalds 	if (err <= 0) {
2357463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2358463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_backoff++;
23596687e988SArnaldo Carvalho de Melo 		icsk->icsk_probes_out++;
2360463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
23613f421baaSArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
23623f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
23631da177e4SLinus Torvalds 	} else {
23641da177e4SLinus Torvalds 		/* If packet was not sent due to local congestion,
23656687e988SArnaldo Carvalho de Melo 		 * do not backoff and do not remember icsk_probes_out.
23661da177e4SLinus Torvalds 		 * Let local senders to fight for local resources.
23671da177e4SLinus Torvalds 		 *
23681da177e4SLinus Torvalds 		 * Use accumulated backoff yet.
23691da177e4SLinus Torvalds 		 */
23706687e988SArnaldo Carvalho de Melo 		if (!icsk->icsk_probes_out)
23716687e988SArnaldo Carvalho de Melo 			icsk->icsk_probes_out = 1;
2372463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2373463c84b9SArnaldo Carvalho de Melo 					  min(icsk->icsk_rto << icsk->icsk_backoff,
23743f421baaSArnaldo Carvalho de Melo 					      TCP_RESOURCE_PROBE_INTERVAL),
23753f421baaSArnaldo Carvalho de Melo 					  TCP_RTO_MAX);
23761da177e4SLinus Torvalds 	}
23771da177e4SLinus Torvalds }
23781da177e4SLinus Torvalds 
23791da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect);
23801da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack);
23811da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit);
23821da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss);
2383f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
23845d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init);
2385