11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 91da177e4SLinus Torvalds * 1002c30a84SJesper Juhl * Authors: Ross Biro 111da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 121da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 131da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 141da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 151da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 161da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 171da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 181da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 191da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 201da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 251da177e4SLinus Torvalds * : Fragmentation on mtu decrease 261da177e4SLinus Torvalds * : Segment collapse on retransmit 271da177e4SLinus Torvalds * : AF independence 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 301da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 311da177e4SLinus Torvalds * during syn/ack processing. 321da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 331da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 341da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 351da177e4SLinus Torvalds * J Hadi Salim : ECN support 361da177e4SLinus Torvalds * 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds #include <net/tcp.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/compiler.h> 421da177e4SLinus Torvalds #include <linux/module.h> 431da177e4SLinus Torvalds #include <linux/smp_lock.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */ 461da177e4SLinus Torvalds int sysctl_tcp_retrans_collapse = 1; 471da177e4SLinus Torvalds 481da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we 491da177e4SLinus Torvalds * will allow a single TSO frame to consume. Building TSO frames 501da177e4SLinus Torvalds * which are too large can cause TCP streams to be bursty. 511da177e4SLinus Torvalds */ 52c1b4a7e6SDavid S. Miller int sysctl_tcp_tso_win_divisor = 3; 531da177e4SLinus Torvalds 541da177e4SLinus Torvalds static inline void update_send_head(struct sock *sk, struct tcp_sock *tp, 551da177e4SLinus Torvalds struct sk_buff *skb) 561da177e4SLinus Torvalds { 571da177e4SLinus Torvalds sk->sk_send_head = skb->next; 581da177e4SLinus Torvalds if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) 591da177e4SLinus Torvalds sk->sk_send_head = NULL; 601da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 611da177e4SLinus Torvalds tcp_packets_out_inc(sk, tp, skb); 621da177e4SLinus Torvalds } 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk. 651da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 661da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 671da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 681da177e4SLinus Torvalds * invalid. OK, let's make this for now: 691da177e4SLinus Torvalds */ 701da177e4SLinus Torvalds static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) 711da177e4SLinus Torvalds { 721da177e4SLinus Torvalds if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 731da177e4SLinus Torvalds return tp->snd_nxt; 741da177e4SLinus Torvalds else 751da177e4SLinus Torvalds return tp->snd_una+tp->snd_wnd; 761da177e4SLinus Torvalds } 771da177e4SLinus Torvalds 781da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 791da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 801da177e4SLinus Torvalds * 811da177e4SLinus Torvalds * 1. It is independent of path mtu. 821da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 831da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 841da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 851da177e4SLinus Torvalds * large MSS. 861da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 871da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 881da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 891da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 901da177e4SLinus Torvalds * probably even Jumbo". 911da177e4SLinus Torvalds */ 921da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 931da177e4SLinus Torvalds { 941da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 951da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 961da177e4SLinus Torvalds int mss = tp->advmss; 971da177e4SLinus Torvalds 981da177e4SLinus Torvalds if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 991da177e4SLinus Torvalds mss = dst_metric(dst, RTAX_ADVMSS); 1001da177e4SLinus Torvalds tp->advmss = mss; 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds return (__u16)mss; 1041da177e4SLinus Torvalds } 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1071da177e4SLinus Torvalds * This is the first part of cwnd validation mechanism. */ 108463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 1091da177e4SLinus Torvalds { 110463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1111da177e4SLinus Torvalds s32 delta = tcp_time_stamp - tp->lsndtime; 1121da177e4SLinus Torvalds u32 restart_cwnd = tcp_init_cwnd(tp, dst); 1131da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1141da177e4SLinus Torvalds 1156687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1161da177e4SLinus Torvalds 1176687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1181da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1191da177e4SLinus Torvalds 120463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1211da177e4SLinus Torvalds cwnd >>= 1; 1221da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 1231da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 1241da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1251da177e4SLinus Torvalds } 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds static inline void tcp_event_data_sent(struct tcp_sock *tp, 1281da177e4SLinus Torvalds struct sk_buff *skb, struct sock *sk) 1291da177e4SLinus Torvalds { 130463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 131463c84b9SArnaldo Carvalho de Melo const u32 now = tcp_time_stamp; 1321da177e4SLinus Torvalds 133463c84b9SArnaldo Carvalho de Melo if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) 134463c84b9SArnaldo Carvalho de Melo tcp_cwnd_restart(sk, __sk_dst_get(sk)); 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds tp->lsndtime = now; 1371da177e4SLinus Torvalds 1381da177e4SLinus Torvalds /* If it is a reply for ato after last received 1391da177e4SLinus Torvalds * packet, enter pingpong mode. 1401da177e4SLinus Torvalds */ 141463c84b9SArnaldo Carvalho de Melo if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 142463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 1; 1431da177e4SLinus Torvalds } 1441da177e4SLinus Torvalds 145fc6415bcSDavid S. Miller static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 1461da177e4SLinus Torvalds { 147463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 148463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 1521da177e4SLinus Torvalds * Based on the assumption that the given amount of space 1531da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 1541da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 1551da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 1561da177e4SLinus Torvalds * This MUST be enforced by all callers. 1571da177e4SLinus Torvalds */ 1581da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss, 1591da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 1601da177e4SLinus Torvalds int wscale_ok, __u8 *rcv_wscale) 1611da177e4SLinus Torvalds { 1621da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 1651da177e4SLinus Torvalds if (*window_clamp == 0) 1661da177e4SLinus Torvalds (*window_clamp) = (65535 << 14); 1671da177e4SLinus Torvalds space = min(*window_clamp, space); 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 1701da177e4SLinus Torvalds if (space > mss) 1711da177e4SLinus Torvalds space = (space / mss) * mss; 1721da177e4SLinus Torvalds 1731da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 1741da177e4SLinus Torvalds * will break some buggy TCP stacks. We try to be nice. 1751da177e4SLinus Torvalds * If we are not window scaling, then this truncates 1761da177e4SLinus Torvalds * our initial window offering to 32k. There should also 1771da177e4SLinus Torvalds * be a sysctl option to stop being nice. 1781da177e4SLinus Torvalds */ 1791da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 1801da177e4SLinus Torvalds (*rcv_wscale) = 0; 1811da177e4SLinus Torvalds if (wscale_ok) { 1821da177e4SLinus Torvalds /* Set window scaling on max possible window 1831da177e4SLinus Torvalds * See RFC1323 for an explanation of the limit to 14 1841da177e4SLinus Torvalds */ 1851da177e4SLinus Torvalds space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 1861da177e4SLinus Torvalds while (space > 65535 && (*rcv_wscale) < 14) { 1871da177e4SLinus Torvalds space >>= 1; 1881da177e4SLinus Torvalds (*rcv_wscale)++; 1891da177e4SLinus Torvalds } 1901da177e4SLinus Torvalds } 1911da177e4SLinus Torvalds 1921da177e4SLinus Torvalds /* Set initial window to value enough for senders, 1931da177e4SLinus Torvalds * following RFC1414. Senders, not following this RFC, 1941da177e4SLinus Torvalds * will be satisfied with 2. 1951da177e4SLinus Torvalds */ 1961da177e4SLinus Torvalds if (mss > (1<<*rcv_wscale)) { 1971da177e4SLinus Torvalds int init_cwnd = 4; 1981da177e4SLinus Torvalds if (mss > 1460*3) 1991da177e4SLinus Torvalds init_cwnd = 2; 2001da177e4SLinus Torvalds else if (mss > 1460) 2011da177e4SLinus Torvalds init_cwnd = 3; 2021da177e4SLinus Torvalds if (*rcv_wnd > init_cwnd*mss) 2031da177e4SLinus Torvalds *rcv_wnd = init_cwnd*mss; 2041da177e4SLinus Torvalds } 2051da177e4SLinus Torvalds 2061da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 2071da177e4SLinus Torvalds (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2111da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2121da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2131da177e4SLinus Torvalds * frame. 2141da177e4SLinus Torvalds */ 2151da177e4SLinus Torvalds static __inline__ u16 tcp_select_window(struct sock *sk) 2161da177e4SLinus Torvalds { 2171da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2181da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2191da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds /* Never shrink the offered window */ 2221da177e4SLinus Torvalds if(new_win < cur_win) { 2231da177e4SLinus Torvalds /* Danger Will Robinson! 2241da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2251da177e4SLinus Torvalds * we will not be able to advertise a zero 2261da177e4SLinus Torvalds * window in time. --DaveM 2271da177e4SLinus Torvalds * 2281da177e4SLinus Torvalds * Relax Will Robinson. 2291da177e4SLinus Torvalds */ 2301da177e4SLinus Torvalds new_win = cur_win; 2311da177e4SLinus Torvalds } 2321da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2331da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2341da177e4SLinus Torvalds 2351da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2361da177e4SLinus Torvalds * scaled window. 2371da177e4SLinus Torvalds */ 2381da177e4SLinus Torvalds if (!tp->rx_opt.rcv_wscale) 2391da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2401da177e4SLinus Torvalds else 2411da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2421da177e4SLinus Torvalds 2431da177e4SLinus Torvalds /* RFC1323 scaling applied */ 2441da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 2451da177e4SLinus Torvalds 2461da177e4SLinus Torvalds /* If we advertise zero window, disable fast path. */ 2471da177e4SLinus Torvalds if (new_win == 0) 2481da177e4SLinus Torvalds tp->pred_flags = 0; 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds return new_win; 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 2551da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 2561da177e4SLinus Torvalds * transmission and possible later retransmissions. 2571da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 2581da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 2591da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 2601da177e4SLinus Torvalds * device. 2611da177e4SLinus Torvalds * 2621da177e4SLinus Torvalds * We are working here with either a clone of the original 2631da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 2641da177e4SLinus Torvalds */ 2651da177e4SLinus Torvalds static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) 2661da177e4SLinus Torvalds { 2671da177e4SLinus Torvalds if (skb != NULL) { 2686687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 2691da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 2701da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2711da177e4SLinus Torvalds struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 2721da177e4SLinus Torvalds int tcp_header_size = tp->tcp_header_len; 2731da177e4SLinus Torvalds struct tcphdr *th; 2741da177e4SLinus Torvalds int sysctl_flags; 2751da177e4SLinus Torvalds int err; 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds BUG_ON(!tcp_skb_pcount(skb)); 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS 0x1 2801da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE 0x2 2811da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK 0x4 2821da177e4SLinus Torvalds 283317a76f9SStephen Hemminger /* If congestion control is doing timestamping */ 2846687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_ops->rtt_sample) 285a61bbcf2SPatrick McHardy __net_timestamp(skb); 286317a76f9SStephen Hemminger 2871da177e4SLinus Torvalds sysctl_flags = 0; 2881da177e4SLinus Torvalds if (tcb->flags & TCPCB_FLAG_SYN) { 2891da177e4SLinus Torvalds tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 2901da177e4SLinus Torvalds if(sysctl_tcp_timestamps) { 2911da177e4SLinus Torvalds tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 2921da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 2931da177e4SLinus Torvalds } 2941da177e4SLinus Torvalds if(sysctl_tcp_window_scaling) { 2951da177e4SLinus Torvalds tcp_header_size += TCPOLEN_WSCALE_ALIGNED; 2961da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_WSCALE; 2971da177e4SLinus Torvalds } 2981da177e4SLinus Torvalds if(sysctl_tcp_sack) { 2991da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_SACK; 3001da177e4SLinus Torvalds if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) 3011da177e4SLinus Torvalds tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; 3021da177e4SLinus Torvalds } 3031da177e4SLinus Torvalds } else if (tp->rx_opt.eff_sacks) { 3041da177e4SLinus Torvalds /* A SACK is 2 pad bytes, a 2 byte header, plus 3051da177e4SLinus Torvalds * 2 32-bit sequence numbers for each SACK block. 3061da177e4SLinus Torvalds */ 3071da177e4SLinus Torvalds tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + 3081da177e4SLinus Torvalds (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 311317a76f9SStephen Hemminger if (tcp_packets_in_flight(tp) == 0) 3126687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_TX_START); 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds th = (struct tcphdr *) skb_push(skb, tcp_header_size); 3151da177e4SLinus Torvalds skb->h.th = th; 3161da177e4SLinus Torvalds skb_set_owner_w(skb, sk); 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 3191da177e4SLinus Torvalds th->source = inet->sport; 3201da177e4SLinus Torvalds th->dest = inet->dport; 3211da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 3221da177e4SLinus Torvalds th->ack_seq = htonl(tp->rcv_nxt); 3231da177e4SLinus Torvalds *(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->flags); 3241da177e4SLinus Torvalds if (tcb->flags & TCPCB_FLAG_SYN) { 3251da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments 3261da177e4SLinus Torvalds * is never scaled. 3271da177e4SLinus Torvalds */ 3281da177e4SLinus Torvalds th->window = htons(tp->rcv_wnd); 3291da177e4SLinus Torvalds } else { 3301da177e4SLinus Torvalds th->window = htons(tcp_select_window(sk)); 3311da177e4SLinus Torvalds } 3321da177e4SLinus Torvalds th->check = 0; 3331da177e4SLinus Torvalds th->urg_ptr = 0; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds if (tp->urg_mode && 3361da177e4SLinus Torvalds between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF)) { 3371da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up-tcb->seq); 3381da177e4SLinus Torvalds th->urg = 1; 3391da177e4SLinus Torvalds } 3401da177e4SLinus Torvalds 3411da177e4SLinus Torvalds if (tcb->flags & TCPCB_FLAG_SYN) { 3421da177e4SLinus Torvalds tcp_syn_build_options((__u32 *)(th + 1), 3431da177e4SLinus Torvalds tcp_advertise_mss(sk), 3441da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 3451da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_SACK), 3461da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_WSCALE), 3471da177e4SLinus Torvalds tp->rx_opt.rcv_wscale, 3481da177e4SLinus Torvalds tcb->when, 3491da177e4SLinus Torvalds tp->rx_opt.ts_recent); 3501da177e4SLinus Torvalds } else { 3511da177e4SLinus Torvalds tcp_build_and_update_options((__u32 *)(th + 1), 3521da177e4SLinus Torvalds tp, tcb->when); 3531da177e4SLinus Torvalds 3541da177e4SLinus Torvalds TCP_ECN_send(sk, tp, skb, tcp_header_size); 3551da177e4SLinus Torvalds } 3561da177e4SLinus Torvalds tp->af_specific->send_check(sk, th, skb->len, skb); 3571da177e4SLinus Torvalds 3581da177e4SLinus Torvalds if (tcb->flags & TCPCB_FLAG_ACK) 359fc6415bcSDavid S. Miller tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 3601da177e4SLinus Torvalds 3611da177e4SLinus Torvalds if (skb->len != tcp_header_size) 3621da177e4SLinus Torvalds tcp_event_data_sent(tp, skb, sk); 3631da177e4SLinus Torvalds 3641da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_OUTSEGS); 3651da177e4SLinus Torvalds 3661da177e4SLinus Torvalds err = tp->af_specific->queue_xmit(skb, 0); 3671da177e4SLinus Torvalds if (err <= 0) 3681da177e4SLinus Torvalds return err; 3691da177e4SLinus Torvalds 3706687e988SArnaldo Carvalho de Melo tcp_enter_cwr(sk); 3711da177e4SLinus Torvalds 3721da177e4SLinus Torvalds /* NET_XMIT_CN is special. It does not guarantee, 3731da177e4SLinus Torvalds * that this packet is lost. It tells that device 3741da177e4SLinus Torvalds * is about to start to drop packets or already 3751da177e4SLinus Torvalds * drops some packets of the same priority and 3761da177e4SLinus Torvalds * invokes us to send less aggressively. 3771da177e4SLinus Torvalds */ 3781da177e4SLinus Torvalds return err == NET_XMIT_CN ? 0 : err; 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds return -ENOBUFS; 3811da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS 3821da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE 3831da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK 3841da177e4SLinus Torvalds } 3851da177e4SLinus Torvalds 3861da177e4SLinus Torvalds 3871da177e4SLinus Torvalds /* This routine just queue's the buffer 3881da177e4SLinus Torvalds * 3891da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 3901da177e4SLinus Torvalds * otherwise socket can stall. 3911da177e4SLinus Torvalds */ 3921da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 3931da177e4SLinus Torvalds { 3941da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3951da177e4SLinus Torvalds 3961da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 3971da177e4SLinus Torvalds tp->write_seq = TCP_SKB_CB(skb)->end_seq; 3981da177e4SLinus Torvalds skb_header_release(skb); 3991da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_write_queue, skb); 4001da177e4SLinus Torvalds sk_charge_skb(sk, skb); 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds /* Queue it, remembering where we must start sending. */ 4031da177e4SLinus Torvalds if (sk->sk_send_head == NULL) 4041da177e4SLinus Torvalds sk->sk_send_head = skb; 4051da177e4SLinus Torvalds } 4061da177e4SLinus Torvalds 407846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 408f6302d1dSDavid S. Miller { 409846998aeSDavid S. Miller if (skb->len <= mss_now || 410f6302d1dSDavid S. Miller !(sk->sk_route_caps & NETIF_F_TSO)) { 411f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 412f6302d1dSDavid S. Miller * non-TSO case. 413f6302d1dSDavid S. Miller */ 414f6302d1dSDavid S. Miller skb_shinfo(skb)->tso_segs = 1; 415f6302d1dSDavid S. Miller skb_shinfo(skb)->tso_size = 0; 416f6302d1dSDavid S. Miller } else { 417f6302d1dSDavid S. Miller unsigned int factor; 418f6302d1dSDavid S. Miller 419846998aeSDavid S. Miller factor = skb->len + (mss_now - 1); 420846998aeSDavid S. Miller factor /= mss_now; 421f6302d1dSDavid S. Miller skb_shinfo(skb)->tso_segs = factor; 422846998aeSDavid S. Miller skb_shinfo(skb)->tso_size = mss_now; 4231da177e4SLinus Torvalds } 4241da177e4SLinus Torvalds } 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 4271da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 4281da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 4291da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 4301da177e4SLinus Torvalds */ 431*6475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 4321da177e4SLinus Torvalds { 4331da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 4341da177e4SLinus Torvalds struct sk_buff *buff; 435*6475be16SDavid S. Miller int nsize, old_factor; 4361da177e4SLinus Torvalds u16 flags; 4371da177e4SLinus Torvalds 4381da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 4391da177e4SLinus Torvalds if (nsize < 0) 4401da177e4SLinus Torvalds nsize = 0; 4411da177e4SLinus Torvalds 4421da177e4SLinus Torvalds if (skb_cloned(skb) && 4431da177e4SLinus Torvalds skb_is_nonlinear(skb) && 4441da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 4451da177e4SLinus Torvalds return -ENOMEM; 4461da177e4SLinus Torvalds 4471da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 4481da177e4SLinus Torvalds buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 4491da177e4SLinus Torvalds if (buff == NULL) 4501da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 4511da177e4SLinus Torvalds sk_charge_skb(sk, buff); 4521da177e4SLinus Torvalds 4531da177e4SLinus Torvalds /* Correct the sequence numbers. */ 4541da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 4551da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 4561da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 4591da177e4SLinus Torvalds flags = TCP_SKB_CB(skb)->flags; 4601da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 4611da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = flags; 4621da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 4631da177e4SLinus Torvalds (TCP_SKB_CB(skb)->sacked & 4641da177e4SLinus Torvalds (TCPCB_LOST | TCPCB_EVER_RETRANS | TCPCB_AT_TAIL)); 4651da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) { 4681da177e4SLinus Torvalds /* Copy and checksum data tail into the new buffer. */ 4691da177e4SLinus Torvalds buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), 4701da177e4SLinus Torvalds nsize, 0); 4711da177e4SLinus Torvalds 4721da177e4SLinus Torvalds skb_trim(skb, len); 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds skb->csum = csum_block_sub(skb->csum, buff->csum, len); 4751da177e4SLinus Torvalds } else { 4761da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_HW; 4771da177e4SLinus Torvalds skb_split(skb, buff, len); 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 4801da177e4SLinus Torvalds buff->ip_summed = skb->ip_summed; 4811da177e4SLinus Torvalds 4821da177e4SLinus Torvalds /* Looks stupid, but our code really uses when of 4831da177e4SLinus Torvalds * skbs, which it never sent before. --ANK 4841da177e4SLinus Torvalds */ 4851da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 486a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 4871da177e4SLinus Torvalds 4881da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 4891da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(skb); 4901da177e4SLinus Torvalds tp->left_out -= tcp_skb_pcount(skb); 4911da177e4SLinus Torvalds } 4921da177e4SLinus Torvalds 493*6475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 494*6475be16SDavid S. Miller 4951da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 496846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 497846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 4981da177e4SLinus Torvalds 499*6475be16SDavid S. Miller /* If this packet has been sent out already, we must 500*6475be16SDavid S. Miller * adjust the various packet counters. 501*6475be16SDavid S. Miller */ 502*6475be16SDavid S. Miller if (after(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 503*6475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 504*6475be16SDavid S. Miller tcp_skb_pcount(buff); 5051da177e4SLinus Torvalds 506*6475be16SDavid S. Miller tp->packets_out -= diff; 507*6475be16SDavid S. Miller if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 508*6475be16SDavid S. Miller tp->lost_out -= diff; 509*6475be16SDavid S. Miller tp->left_out -= diff; 510*6475be16SDavid S. Miller } 511*6475be16SDavid S. Miller if (diff > 0) { 512*6475be16SDavid S. Miller tp->fackets_out -= diff; 513*6475be16SDavid S. Miller if ((int)tp->fackets_out < 0) 514*6475be16SDavid S. Miller tp->fackets_out = 0; 515*6475be16SDavid S. Miller } 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds 5181da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 519f44b5271SDavid S. Miller skb_header_release(buff); 5208728b834SDavid S. Miller __skb_append(skb, buff, &sk->sk_write_queue); 5211da177e4SLinus Torvalds 5221da177e4SLinus Torvalds return 0; 5231da177e4SLinus Torvalds } 5241da177e4SLinus Torvalds 5251da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 5261da177e4SLinus Torvalds * eventually). The difference is that pulled data not copied, but 5271da177e4SLinus Torvalds * immediately discarded. 5281da177e4SLinus Torvalds */ 5291da177e4SLinus Torvalds static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) 5301da177e4SLinus Torvalds { 5311da177e4SLinus Torvalds int i, k, eat; 5321da177e4SLinus Torvalds 5331da177e4SLinus Torvalds eat = len; 5341da177e4SLinus Torvalds k = 0; 5351da177e4SLinus Torvalds for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { 5361da177e4SLinus Torvalds if (skb_shinfo(skb)->frags[i].size <= eat) { 5371da177e4SLinus Torvalds put_page(skb_shinfo(skb)->frags[i].page); 5381da177e4SLinus Torvalds eat -= skb_shinfo(skb)->frags[i].size; 5391da177e4SLinus Torvalds } else { 5401da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 5411da177e4SLinus Torvalds if (eat) { 5421da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 5431da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].size -= eat; 5441da177e4SLinus Torvalds eat = 0; 5451da177e4SLinus Torvalds } 5461da177e4SLinus Torvalds k++; 5471da177e4SLinus Torvalds } 5481da177e4SLinus Torvalds } 5491da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 5501da177e4SLinus Torvalds 5511da177e4SLinus Torvalds skb->tail = skb->data; 5521da177e4SLinus Torvalds skb->data_len -= len; 5531da177e4SLinus Torvalds skb->len = skb->data_len; 5541da177e4SLinus Torvalds return skb->tail; 5551da177e4SLinus Torvalds } 5561da177e4SLinus Torvalds 5571da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 5581da177e4SLinus Torvalds { 5591da177e4SLinus Torvalds if (skb_cloned(skb) && 5601da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 5611da177e4SLinus Torvalds return -ENOMEM; 5621da177e4SLinus Torvalds 5631da177e4SLinus Torvalds if (len <= skb_headlen(skb)) { 5641da177e4SLinus Torvalds __skb_pull(skb, len); 5651da177e4SLinus Torvalds } else { 5661da177e4SLinus Torvalds if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) 5671da177e4SLinus Torvalds return -ENOMEM; 5681da177e4SLinus Torvalds } 5691da177e4SLinus Torvalds 5701da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 5711da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_HW; 5721da177e4SLinus Torvalds 5731da177e4SLinus Torvalds skb->truesize -= len; 5741da177e4SLinus Torvalds sk->sk_wmem_queued -= len; 5751da177e4SLinus Torvalds sk->sk_forward_alloc += len; 5761da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 5771da177e4SLinus Torvalds 5781da177e4SLinus Torvalds /* Any change of skb->len requires recalculation of tso 5791da177e4SLinus Torvalds * factor and mss. 5801da177e4SLinus Torvalds */ 5811da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 582846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 5831da177e4SLinus Torvalds 5841da177e4SLinus Torvalds return 0; 5851da177e4SLinus Torvalds } 5861da177e4SLinus Torvalds 5871da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 5881da177e4SLinus Torvalds 5891da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 5901da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 5911da177e4SLinus Torvalds 5921da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 5931da177e4SLinus Torvalds It is minumum of user_mss and mss received with SYN. 5941da177e4SLinus Torvalds It also does not include TCP options. 5951da177e4SLinus Torvalds 5961da177e4SLinus Torvalds tp->pmtu_cookie is last pmtu, seen by this function. 5971da177e4SLinus Torvalds 5981da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 5991da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 6001da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 6011da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 6021da177e4SLinus Torvalds 6031da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 6041da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 6051da177e4SLinus Torvalds 6061da177e4SLinus Torvalds NOTE2. tp->pmtu_cookie and tp->mss_cache are READ ONLY outside 6071da177e4SLinus Torvalds this function. --ANK (980731) 6081da177e4SLinus Torvalds */ 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 6111da177e4SLinus Torvalds { 6121da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6131da177e4SLinus Torvalds int mss_now; 6141da177e4SLinus Torvalds 6151da177e4SLinus Torvalds /* Calculate base mss without TCP options: 6161da177e4SLinus Torvalds It is MMS_S - sizeof(tcphdr) of rfc1122 6171da177e4SLinus Torvalds */ 6181da177e4SLinus Torvalds mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr); 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds /* Clamp it (mss_clamp does not include tcp options) */ 6211da177e4SLinus Torvalds if (mss_now > tp->rx_opt.mss_clamp) 6221da177e4SLinus Torvalds mss_now = tp->rx_opt.mss_clamp; 6231da177e4SLinus Torvalds 6241da177e4SLinus Torvalds /* Now subtract optional transport overhead */ 6251da177e4SLinus Torvalds mss_now -= tp->ext_header_len; 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds /* Then reserve room for full set of TCP options and 8 bytes of data */ 6281da177e4SLinus Torvalds if (mss_now < 48) 6291da177e4SLinus Torvalds mss_now = 48; 6301da177e4SLinus Torvalds 6311da177e4SLinus Torvalds /* Now subtract TCP options size, not including SACKs */ 6321da177e4SLinus Torvalds mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 6331da177e4SLinus Torvalds 6341da177e4SLinus Torvalds /* Bound mss with half of window */ 6351da177e4SLinus Torvalds if (tp->max_window && mss_now > (tp->max_window>>1)) 6361da177e4SLinus Torvalds mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); 6371da177e4SLinus Torvalds 6381da177e4SLinus Torvalds /* And store cached results */ 6391da177e4SLinus Torvalds tp->pmtu_cookie = pmtu; 640c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 6411da177e4SLinus Torvalds 6421da177e4SLinus Torvalds return mss_now; 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds 6451da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 6461da177e4SLinus Torvalds * and even PMTU discovery events into account. 6471da177e4SLinus Torvalds * 6481da177e4SLinus Torvalds * LARGESEND note: !urg_mode is overkill, only frames up to snd_up 6491da177e4SLinus Torvalds * cannot be large. However, taking into account rare use of URG, this 6501da177e4SLinus Torvalds * is not a big flaw. 6511da177e4SLinus Torvalds */ 652c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 6531da177e4SLinus Torvalds { 6541da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6551da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 656c1b4a7e6SDavid S. Miller u32 mss_now; 657c1b4a7e6SDavid S. Miller u16 xmit_size_goal; 658c1b4a7e6SDavid S. Miller int doing_tso = 0; 6591da177e4SLinus Torvalds 660c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 661c1b4a7e6SDavid S. Miller 662c1b4a7e6SDavid S. Miller if (large_allowed && 663c1b4a7e6SDavid S. Miller (sk->sk_route_caps & NETIF_F_TSO) && 664c1b4a7e6SDavid S. Miller !tp->urg_mode) 665c1b4a7e6SDavid S. Miller doing_tso = 1; 666c1b4a7e6SDavid S. Miller 6671da177e4SLinus Torvalds if (dst) { 6681da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 6691da177e4SLinus Torvalds if (mtu != tp->pmtu_cookie) 6701da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 6711da177e4SLinus Torvalds } 6721da177e4SLinus Torvalds 6731da177e4SLinus Torvalds if (tp->rx_opt.eff_sacks) 6741da177e4SLinus Torvalds mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 6751da177e4SLinus Torvalds (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 676c1b4a7e6SDavid S. Miller 677c1b4a7e6SDavid S. Miller xmit_size_goal = mss_now; 678c1b4a7e6SDavid S. Miller 679c1b4a7e6SDavid S. Miller if (doing_tso) { 680c1b4a7e6SDavid S. Miller xmit_size_goal = 65535 - 681c1b4a7e6SDavid S. Miller tp->af_specific->net_header_len - 682c1b4a7e6SDavid S. Miller tp->ext_header_len - tp->tcp_header_len; 683c1b4a7e6SDavid S. Miller 684c1b4a7e6SDavid S. Miller if (tp->max_window && 685c1b4a7e6SDavid S. Miller (xmit_size_goal > (tp->max_window >> 1))) 686c1b4a7e6SDavid S. Miller xmit_size_goal = max((tp->max_window >> 1), 687c1b4a7e6SDavid S. Miller 68U - tp->tcp_header_len); 688c1b4a7e6SDavid S. Miller 689c1b4a7e6SDavid S. Miller xmit_size_goal -= (xmit_size_goal % mss_now); 690c1b4a7e6SDavid S. Miller } 691c1b4a7e6SDavid S. Miller tp->xmit_size_goal = xmit_size_goal; 692c1b4a7e6SDavid S. Miller 6931da177e4SLinus Torvalds return mss_now; 6941da177e4SLinus Torvalds } 6951da177e4SLinus Torvalds 696a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */ 697a762a980SDavid S. Miller 698a762a980SDavid S. Miller static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) 699a762a980SDavid S. Miller { 700a762a980SDavid S. Miller __u32 packets_out = tp->packets_out; 701a762a980SDavid S. Miller 702a762a980SDavid S. Miller if (packets_out >= tp->snd_cwnd) { 703a762a980SDavid S. Miller /* Network is feed fully. */ 704a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 705a762a980SDavid S. Miller tp->snd_cwnd_stamp = tcp_time_stamp; 706a762a980SDavid S. Miller } else { 707a762a980SDavid S. Miller /* Network starves. */ 708a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 709a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 710a762a980SDavid S. Miller 711463c84b9SArnaldo Carvalho de Melo if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 712a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 713a762a980SDavid S. Miller } 714a762a980SDavid S. Miller } 715a762a980SDavid S. Miller 716c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 717c1b4a7e6SDavid S. Miller { 718c1b4a7e6SDavid S. Miller u32 window, cwnd_len; 719c1b4a7e6SDavid S. Miller 720c1b4a7e6SDavid S. Miller window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 721c1b4a7e6SDavid S. Miller cwnd_len = mss_now * cwnd; 722c1b4a7e6SDavid S. Miller return min(window, cwnd_len); 723c1b4a7e6SDavid S. Miller } 724c1b4a7e6SDavid S. Miller 725c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 726c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 727c1b4a7e6SDavid S. Miller */ 728c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 729c1b4a7e6SDavid S. Miller { 730c1b4a7e6SDavid S. Miller u32 in_flight, cwnd; 731c1b4a7e6SDavid S. Miller 732c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 733c1b4a7e6SDavid S. Miller if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 734c1b4a7e6SDavid S. Miller return 1; 735c1b4a7e6SDavid S. Miller 736c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 737c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 738c1b4a7e6SDavid S. Miller if (in_flight < cwnd) 739c1b4a7e6SDavid S. Miller return (cwnd - in_flight); 740c1b4a7e6SDavid S. Miller 741c1b4a7e6SDavid S. Miller return 0; 742c1b4a7e6SDavid S. Miller } 743c1b4a7e6SDavid S. Miller 744c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting 745c1b4a7e6SDavid S. Miller * SKB onto the wire. 746c1b4a7e6SDavid S. Miller */ 747846998aeSDavid S. Miller static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 748c1b4a7e6SDavid S. Miller { 749c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 750c1b4a7e6SDavid S. Miller 751846998aeSDavid S. Miller if (!tso_segs || 752846998aeSDavid S. Miller (tso_segs > 1 && 753846998aeSDavid S. Miller skb_shinfo(skb)->tso_size != mss_now)) { 754846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 755c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 756c1b4a7e6SDavid S. Miller } 757c1b4a7e6SDavid S. Miller return tso_segs; 758c1b4a7e6SDavid S. Miller } 759c1b4a7e6SDavid S. Miller 760c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp) 761c1b4a7e6SDavid S. Miller { 762c1b4a7e6SDavid S. Miller return after(tp->snd_sml,tp->snd_una) && 763c1b4a7e6SDavid S. Miller !after(tp->snd_sml, tp->snd_nxt); 764c1b4a7e6SDavid S. Miller } 765c1b4a7e6SDavid S. Miller 766c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules: 767c1b4a7e6SDavid S. Miller * 1. It is full sized. 768c1b4a7e6SDavid S. Miller * 2. Or it contains FIN. (already checked by caller) 769c1b4a7e6SDavid S. Miller * 3. Or TCP_NODELAY was set. 770c1b4a7e6SDavid S. Miller * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 771c1b4a7e6SDavid S. Miller * With Minshall's modification: all sent small packets are ACKed. 772c1b4a7e6SDavid S. Miller */ 773c1b4a7e6SDavid S. Miller 774c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp, 775c1b4a7e6SDavid S. Miller const struct sk_buff *skb, 776c1b4a7e6SDavid S. Miller unsigned mss_now, int nonagle) 777c1b4a7e6SDavid S. Miller { 778c1b4a7e6SDavid S. Miller return (skb->len < mss_now && 779c1b4a7e6SDavid S. Miller ((nonagle&TCP_NAGLE_CORK) || 780c1b4a7e6SDavid S. Miller (!nonagle && 781c1b4a7e6SDavid S. Miller tp->packets_out && 782c1b4a7e6SDavid S. Miller tcp_minshall_check(tp)))); 783c1b4a7e6SDavid S. Miller } 784c1b4a7e6SDavid S. Miller 785c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be 786c1b4a7e6SDavid S. Miller * sent now. 787c1b4a7e6SDavid S. Miller */ 788c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 789c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 790c1b4a7e6SDavid S. Miller { 791c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 792c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 793c1b4a7e6SDavid S. Miller * 794c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 795c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 796c1b4a7e6SDavid S. Miller */ 797c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 798c1b4a7e6SDavid S. Miller return 1; 799c1b4a7e6SDavid S. Miller 800c1b4a7e6SDavid S. Miller /* Don't use the nagle rule for urgent data (or for the final FIN). */ 801c1b4a7e6SDavid S. Miller if (tp->urg_mode || 802c1b4a7e6SDavid S. Miller (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 803c1b4a7e6SDavid S. Miller return 1; 804c1b4a7e6SDavid S. Miller 805c1b4a7e6SDavid S. Miller if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 806c1b4a7e6SDavid S. Miller return 1; 807c1b4a7e6SDavid S. Miller 808c1b4a7e6SDavid S. Miller return 0; 809c1b4a7e6SDavid S. Miller } 810c1b4a7e6SDavid S. Miller 811c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 812c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 813c1b4a7e6SDavid S. Miller { 814c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 815c1b4a7e6SDavid S. Miller 816c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 817c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 818c1b4a7e6SDavid S. Miller 819c1b4a7e6SDavid S. Miller return !after(end_seq, tp->snd_una + tp->snd_wnd); 820c1b4a7e6SDavid S. Miller } 821c1b4a7e6SDavid S. Miller 822c1b4a7e6SDavid S. Miller /* This checks if the data bearing packet SKB (usually sk->sk_send_head) 823c1b4a7e6SDavid S. Miller * should be put on the wire right now. If so, it returns the number of 824c1b4a7e6SDavid S. Miller * packets allowed by the congestion window. 825c1b4a7e6SDavid S. Miller */ 826c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 827c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 828c1b4a7e6SDavid S. Miller { 829c1b4a7e6SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 830c1b4a7e6SDavid S. Miller unsigned int cwnd_quota; 831c1b4a7e6SDavid S. Miller 832846998aeSDavid S. Miller tcp_init_tso_segs(sk, skb, cur_mss); 833c1b4a7e6SDavid S. Miller 834c1b4a7e6SDavid S. Miller if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 835c1b4a7e6SDavid S. Miller return 0; 836c1b4a7e6SDavid S. Miller 837c1b4a7e6SDavid S. Miller cwnd_quota = tcp_cwnd_test(tp, skb); 838c1b4a7e6SDavid S. Miller if (cwnd_quota && 839c1b4a7e6SDavid S. Miller !tcp_snd_wnd_test(tp, skb, cur_mss)) 840c1b4a7e6SDavid S. Miller cwnd_quota = 0; 841c1b4a7e6SDavid S. Miller 842c1b4a7e6SDavid S. Miller return cwnd_quota; 843c1b4a7e6SDavid S. Miller } 844c1b4a7e6SDavid S. Miller 845c1b4a7e6SDavid S. Miller static inline int tcp_skb_is_last(const struct sock *sk, 846c1b4a7e6SDavid S. Miller const struct sk_buff *skb) 847c1b4a7e6SDavid S. Miller { 848c1b4a7e6SDavid S. Miller return skb->next == (struct sk_buff *)&sk->sk_write_queue; 849c1b4a7e6SDavid S. Miller } 850c1b4a7e6SDavid S. Miller 851c1b4a7e6SDavid S. Miller int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) 852c1b4a7e6SDavid S. Miller { 853c1b4a7e6SDavid S. Miller struct sk_buff *skb = sk->sk_send_head; 854c1b4a7e6SDavid S. Miller 855c1b4a7e6SDavid S. Miller return (skb && 856c1b4a7e6SDavid S. Miller tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 857c1b4a7e6SDavid S. Miller (tcp_skb_is_last(sk, skb) ? 858c1b4a7e6SDavid S. Miller TCP_NAGLE_PUSH : 859c1b4a7e6SDavid S. Miller tp->nonagle))); 860c1b4a7e6SDavid S. Miller } 861c1b4a7e6SDavid S. Miller 862c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 863c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 864c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 865c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 866c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 867c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 868c1b4a7e6SDavid S. Miller */ 869846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) 870c1b4a7e6SDavid S. Miller { 871c1b4a7e6SDavid S. Miller struct sk_buff *buff; 872c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 873c1b4a7e6SDavid S. Miller u16 flags; 874c1b4a7e6SDavid S. Miller 875c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 876c8ac3774SHerbert Xu if (skb->len != skb->data_len) 877c8ac3774SHerbert Xu return tcp_fragment(sk, skb, len, mss_now); 878c1b4a7e6SDavid S. Miller 879c1b4a7e6SDavid S. Miller buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 880c1b4a7e6SDavid S. Miller if (unlikely(buff == NULL)) 881c1b4a7e6SDavid S. Miller return -ENOMEM; 882c1b4a7e6SDavid S. Miller 883c1b4a7e6SDavid S. Miller buff->truesize = nlen; 884c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 885c1b4a7e6SDavid S. Miller 886c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 887c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 888c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 889c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 890c1b4a7e6SDavid S. Miller 891c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 892c1b4a7e6SDavid S. Miller flags = TCP_SKB_CB(skb)->flags; 893c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 894c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->flags = flags; 895c1b4a7e6SDavid S. Miller 896c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 897c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 898c1b4a7e6SDavid S. Miller 899c1b4a7e6SDavid S. Miller buff->ip_summed = skb->ip_summed = CHECKSUM_HW; 900c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 901c1b4a7e6SDavid S. Miller 902c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 903846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 904846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 905c1b4a7e6SDavid S. Miller 906c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 907c1b4a7e6SDavid S. Miller skb_header_release(buff); 9088728b834SDavid S. Miller __skb_append(skb, buff, &sk->sk_write_queue); 909c1b4a7e6SDavid S. Miller 910c1b4a7e6SDavid S. Miller return 0; 911c1b4a7e6SDavid S. Miller } 912c1b4a7e6SDavid S. Miller 913c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 914c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 915c1b4a7e6SDavid S. Miller * 916c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 917c1b4a7e6SDavid S. Miller */ 918c1b4a7e6SDavid S. Miller static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 919c1b4a7e6SDavid S. Miller { 9206687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 921c1b4a7e6SDavid S. Miller u32 send_win, cong_win, limit, in_flight; 922c1b4a7e6SDavid S. Miller 923c1b4a7e6SDavid S. Miller if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 924c1b4a7e6SDavid S. Miller return 0; 925c1b4a7e6SDavid S. Miller 9266687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 927908a75c1SDavid S. Miller return 0; 928908a75c1SDavid S. Miller 929c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 930c1b4a7e6SDavid S. Miller 931c1b4a7e6SDavid S. Miller BUG_ON(tcp_skb_pcount(skb) <= 1 || 932c1b4a7e6SDavid S. Miller (tp->snd_cwnd <= in_flight)); 933c1b4a7e6SDavid S. Miller 934c1b4a7e6SDavid S. Miller send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; 935c1b4a7e6SDavid S. Miller 936c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 937c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 938c1b4a7e6SDavid S. Miller 939c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 940c1b4a7e6SDavid S. Miller 941c1b4a7e6SDavid S. Miller if (sysctl_tcp_tso_win_divisor) { 942c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 943c1b4a7e6SDavid S. Miller 944c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 945c1b4a7e6SDavid S. Miller * just use it. 946c1b4a7e6SDavid S. Miller */ 947c1b4a7e6SDavid S. Miller chunk /= sysctl_tcp_tso_win_divisor; 948c1b4a7e6SDavid S. Miller if (limit >= chunk) 949c1b4a7e6SDavid S. Miller return 0; 950c1b4a7e6SDavid S. Miller } else { 951c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 952c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 953c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 954c1b4a7e6SDavid S. Miller * then send now. 955c1b4a7e6SDavid S. Miller */ 956c1b4a7e6SDavid S. Miller if (limit > tcp_max_burst(tp) * tp->mss_cache) 957c1b4a7e6SDavid S. Miller return 0; 958c1b4a7e6SDavid S. Miller } 959c1b4a7e6SDavid S. Miller 960c1b4a7e6SDavid S. Miller /* Ok, it looks like it is advisable to defer. */ 961c1b4a7e6SDavid S. Miller return 1; 962c1b4a7e6SDavid S. Miller } 963c1b4a7e6SDavid S. Miller 9641da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 9651da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 9661da177e4SLinus Torvalds * window for us. 9671da177e4SLinus Torvalds * 9681da177e4SLinus Torvalds * Returns 1, if no segments are in flight and we have queued segments, but 9691da177e4SLinus Torvalds * cannot send anything now because of SWS or another problem. 9701da177e4SLinus Torvalds */ 971a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) 9721da177e4SLinus Torvalds { 9731da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 97492df7b51SDavid S. Miller struct sk_buff *skb; 975c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 976c1b4a7e6SDavid S. Miller int cwnd_quota; 9771da177e4SLinus Torvalds 9781da177e4SLinus Torvalds /* If we are closed, the bytes will have to remain here. 9791da177e4SLinus Torvalds * In time closedown will finish, we empty the write queue and all 9801da177e4SLinus Torvalds * will be happy. 9811da177e4SLinus Torvalds */ 98292df7b51SDavid S. Miller if (unlikely(sk->sk_state == TCP_CLOSE)) 98392df7b51SDavid S. Miller return 0; 98492df7b51SDavid S. Miller 985c1b4a7e6SDavid S. Miller sent_pkts = 0; 986b68e9f85SHerbert Xu while ((skb = sk->sk_send_head)) { 987c8ac3774SHerbert Xu unsigned int limit; 988c8ac3774SHerbert Xu 989b68e9f85SHerbert Xu tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 990c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 991c1b4a7e6SDavid S. Miller 992b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 993b68e9f85SHerbert Xu if (!cwnd_quota) 994b68e9f85SHerbert Xu break; 995b68e9f85SHerbert Xu 996b68e9f85SHerbert Xu if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 997b68e9f85SHerbert Xu break; 998b68e9f85SHerbert Xu 999c1b4a7e6SDavid S. Miller if (tso_segs == 1) { 1000aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1001aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1002aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 1003aa93466bSDavid S. Miller break; 1004c1b4a7e6SDavid S. Miller } else { 1005c1b4a7e6SDavid S. Miller if (tcp_tso_should_defer(sk, tp, skb)) 1006aa93466bSDavid S. Miller break; 1007c1b4a7e6SDavid S. Miller } 1008aa93466bSDavid S. Miller 1009c8ac3774SHerbert Xu limit = mss_now; 1010c1b4a7e6SDavid S. Miller if (tso_segs > 1) { 1011c8ac3774SHerbert Xu limit = tcp_window_allows(tp, skb, 1012c1b4a7e6SDavid S. Miller mss_now, cwnd_quota); 1013c1b4a7e6SDavid S. Miller 1014c1b4a7e6SDavid S. Miller if (skb->len < limit) { 1015c1b4a7e6SDavid S. Miller unsigned int trim = skb->len % mss_now; 1016c1b4a7e6SDavid S. Miller 1017c1b4a7e6SDavid S. Miller if (trim) 1018c1b4a7e6SDavid S. Miller limit = skb->len - trim; 1019c1b4a7e6SDavid S. Miller } 1020c1b4a7e6SDavid S. Miller } 1021c8ac3774SHerbert Xu 1022c8ac3774SHerbert Xu if (skb->len > limit && 1023c8ac3774SHerbert Xu unlikely(tso_fragment(sk, skb, limit, mss_now))) 10241da177e4SLinus Torvalds break; 10251da177e4SLinus Torvalds 10261da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 1027c1b4a7e6SDavid S. Miller 1028aa93466bSDavid S. Miller if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))) 10291da177e4SLinus Torvalds break; 10301da177e4SLinus Torvalds 10311da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 10321da177e4SLinus Torvalds * This call will increment packets_out. 10331da177e4SLinus Torvalds */ 10341da177e4SLinus Torvalds update_send_head(sk, tp, skb); 10351da177e4SLinus Torvalds 10361da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 1037aa93466bSDavid S. Miller sent_pkts++; 10381da177e4SLinus Torvalds } 10391da177e4SLinus Torvalds 1040aa93466bSDavid S. Miller if (likely(sent_pkts)) { 10411da177e4SLinus Torvalds tcp_cwnd_validate(sk, tp); 10421da177e4SLinus Torvalds return 0; 10431da177e4SLinus Torvalds } 10441da177e4SLinus Torvalds return !tp->packets_out && sk->sk_send_head; 10451da177e4SLinus Torvalds } 10461da177e4SLinus Torvalds 1047a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 1048a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 1049a762a980SDavid S. Miller * The socket must be locked by the caller. 1050a762a980SDavid S. Miller */ 1051a762a980SDavid S. Miller void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, 1052a2e2a59cSDavid S. Miller unsigned int cur_mss, int nonagle) 1053a762a980SDavid S. Miller { 1054a762a980SDavid S. Miller struct sk_buff *skb = sk->sk_send_head; 1055a762a980SDavid S. Miller 1056a762a980SDavid S. Miller if (skb) { 105755c97f3eSDavid S. Miller if (tcp_write_xmit(sk, cur_mss, nonagle)) 1058a762a980SDavid S. Miller tcp_check_probe_timer(sk, tp); 1059a762a980SDavid S. Miller } 1060a762a980SDavid S. Miller } 1061a762a980SDavid S. Miller 1062c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 1063c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 1064c1b4a7e6SDavid S. Miller */ 1065c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 1066c1b4a7e6SDavid S. Miller { 1067c1b4a7e6SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1068c1b4a7e6SDavid S. Miller struct sk_buff *skb = sk->sk_send_head; 1069c1b4a7e6SDavid S. Miller unsigned int tso_segs, cwnd_quota; 1070c1b4a7e6SDavid S. Miller 1071c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 1072c1b4a7e6SDavid S. Miller 1073846998aeSDavid S. Miller tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1074c1b4a7e6SDavid S. Miller cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1075c1b4a7e6SDavid S. Miller 1076c1b4a7e6SDavid S. Miller if (likely(cwnd_quota)) { 1077c8ac3774SHerbert Xu unsigned int limit; 1078c8ac3774SHerbert Xu 1079c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1080c1b4a7e6SDavid S. Miller 1081c8ac3774SHerbert Xu limit = mss_now; 1082c1b4a7e6SDavid S. Miller if (tso_segs > 1) { 1083c8ac3774SHerbert Xu limit = tcp_window_allows(tp, skb, 1084c1b4a7e6SDavid S. Miller mss_now, cwnd_quota); 1085c1b4a7e6SDavid S. Miller 1086c1b4a7e6SDavid S. Miller if (skb->len < limit) { 1087c1b4a7e6SDavid S. Miller unsigned int trim = skb->len % mss_now; 1088c1b4a7e6SDavid S. Miller 1089c1b4a7e6SDavid S. Miller if (trim) 1090c1b4a7e6SDavid S. Miller limit = skb->len - trim; 1091c1b4a7e6SDavid S. Miller } 1092c1b4a7e6SDavid S. Miller } 1093c8ac3774SHerbert Xu 1094c8ac3774SHerbert Xu if (skb->len > limit && 1095c8ac3774SHerbert Xu unlikely(tso_fragment(sk, skb, limit, mss_now))) 1096c1b4a7e6SDavid S. Miller return; 1097c1b4a7e6SDavid S. Miller 1098c1b4a7e6SDavid S. Miller /* Send it out now. */ 1099c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->when = tcp_time_stamp; 1100c1b4a7e6SDavid S. Miller 1101c1b4a7e6SDavid S. Miller if (likely(!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation)))) { 1102c1b4a7e6SDavid S. Miller update_send_head(sk, tp, skb); 1103c1b4a7e6SDavid S. Miller tcp_cwnd_validate(sk, tp); 1104c1b4a7e6SDavid S. Miller return; 1105c1b4a7e6SDavid S. Miller } 1106c1b4a7e6SDavid S. Miller } 1107c1b4a7e6SDavid S. Miller } 1108c1b4a7e6SDavid S. Miller 11091da177e4SLinus Torvalds /* This function returns the amount that we can raise the 11101da177e4SLinus Torvalds * usable window based on the following constraints 11111da177e4SLinus Torvalds * 11121da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 11131da177e4SLinus Torvalds * 2. We limit memory per socket 11141da177e4SLinus Torvalds * 11151da177e4SLinus Torvalds * RFC 1122: 11161da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 11171da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 11181da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 11191da177e4SLinus Torvalds * 11201da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 11211da177e4SLinus Torvalds * it at least MSS bytes. 11221da177e4SLinus Torvalds * 11231da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 11241da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 11251da177e4SLinus Torvalds * 11261da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 11271da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 11281da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 11291da177e4SLinus Torvalds * window to always advance by a single byte. 11301da177e4SLinus Torvalds * 11311da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 11321da177e4SLinus Torvalds * then this will not be a problem. 11331da177e4SLinus Torvalds * 11341da177e4SLinus Torvalds * BSD seems to make the following compromise: 11351da177e4SLinus Torvalds * 11361da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 11371da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 11381da177e4SLinus Torvalds * then set the window to 0. 11391da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 11401da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 11411da177e4SLinus Torvalds * and from being larger than the largest representable value. 11421da177e4SLinus Torvalds * 11431da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 11441da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 11451da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 11461da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 11471da177e4SLinus Torvalds * because the pipeline is full. 11481da177e4SLinus Torvalds * 11491da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 11501da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 11511da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 11521da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 11531da177e4SLinus Torvalds * of having a fixed window size at almost all times. 11541da177e4SLinus Torvalds * 11551da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 11561da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 11571da177e4SLinus Torvalds * 11581da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 11591da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 11601da177e4SLinus Torvalds */ 11611da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 11621da177e4SLinus Torvalds { 1163463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 11641da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 11651da177e4SLinus Torvalds /* MSS for the peer's data. Previous verions used mss_clamp 11661da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 11671da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 11681da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 11691da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 11701da177e4SLinus Torvalds */ 1171463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 11721da177e4SLinus Torvalds int free_space = tcp_space(sk); 11731da177e4SLinus Torvalds int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 11741da177e4SLinus Torvalds int window; 11751da177e4SLinus Torvalds 11761da177e4SLinus Torvalds if (mss > full_space) 11771da177e4SLinus Torvalds mss = full_space; 11781da177e4SLinus Torvalds 11791da177e4SLinus Torvalds if (free_space < full_space/2) { 1180463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 11811da177e4SLinus Torvalds 11821da177e4SLinus Torvalds if (tcp_memory_pressure) 11831da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds if (free_space < mss) 11861da177e4SLinus Torvalds return 0; 11871da177e4SLinus Torvalds } 11881da177e4SLinus Torvalds 11891da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 11901da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 11911da177e4SLinus Torvalds 11921da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 11931da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 11941da177e4SLinus Torvalds */ 11951da177e4SLinus Torvalds window = tp->rcv_wnd; 11961da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 11971da177e4SLinus Torvalds window = free_space; 11981da177e4SLinus Torvalds 11991da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 12001da177e4SLinus Torvalds * Import case: prevent zero window announcement if 12011da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 12021da177e4SLinus Torvalds */ 12031da177e4SLinus Torvalds if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 12041da177e4SLinus Torvalds window = (((window >> tp->rx_opt.rcv_wscale) + 1) 12051da177e4SLinus Torvalds << tp->rx_opt.rcv_wscale); 12061da177e4SLinus Torvalds } else { 12071da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 12081da177e4SLinus Torvalds * Window clamp already applied above. 12091da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 12101da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 12111da177e4SLinus Torvalds * and multiply from happening most of the time. 12121da177e4SLinus Torvalds * We also don't do any window rounding when the free space 12131da177e4SLinus Torvalds * is too small. 12141da177e4SLinus Torvalds */ 12151da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 12161da177e4SLinus Torvalds window = (free_space/mss)*mss; 12171da177e4SLinus Torvalds } 12181da177e4SLinus Torvalds 12191da177e4SLinus Torvalds return window; 12201da177e4SLinus Torvalds } 12211da177e4SLinus Torvalds 12221da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */ 12231da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) 12241da177e4SLinus Torvalds { 12251da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 12261da177e4SLinus Torvalds struct sk_buff *next_skb = skb->next; 12271da177e4SLinus Torvalds 12281da177e4SLinus Torvalds /* The first test we must make is that neither of these two 12291da177e4SLinus Torvalds * SKB's are still referenced by someone else. 12301da177e4SLinus Torvalds */ 12311da177e4SLinus Torvalds if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 12321da177e4SLinus Torvalds int skb_size = skb->len, next_skb_size = next_skb->len; 12331da177e4SLinus Torvalds u16 flags = TCP_SKB_CB(skb)->flags; 12341da177e4SLinus Torvalds 12351da177e4SLinus Torvalds /* Also punt if next skb has been SACK'd. */ 12361da177e4SLinus Torvalds if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 12371da177e4SLinus Torvalds return; 12381da177e4SLinus Torvalds 12391da177e4SLinus Torvalds /* Next skb is out of window. */ 12401da177e4SLinus Torvalds if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) 12411da177e4SLinus Torvalds return; 12421da177e4SLinus Torvalds 12431da177e4SLinus Torvalds /* Punt if not enough space exists in the first SKB for 12441da177e4SLinus Torvalds * the data in the second, or the total combined payload 12451da177e4SLinus Torvalds * would exceed the MSS. 12461da177e4SLinus Torvalds */ 12471da177e4SLinus Torvalds if ((next_skb_size > skb_tailroom(skb)) || 12481da177e4SLinus Torvalds ((skb_size + next_skb_size) > mss_now)) 12491da177e4SLinus Torvalds return; 12501da177e4SLinus Torvalds 12511da177e4SLinus Torvalds BUG_ON(tcp_skb_pcount(skb) != 1 || 12521da177e4SLinus Torvalds tcp_skb_pcount(next_skb) != 1); 12531da177e4SLinus Torvalds 12541da177e4SLinus Torvalds /* Ok. We will be able to collapse the packet. */ 12558728b834SDavid S. Miller __skb_unlink(next_skb, &sk->sk_write_queue); 12561da177e4SLinus Torvalds 12571da177e4SLinus Torvalds memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 12581da177e4SLinus Torvalds 12591da177e4SLinus Torvalds if (next_skb->ip_summed == CHECKSUM_HW) 12601da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_HW; 12611da177e4SLinus Torvalds 12621da177e4SLinus Torvalds if (skb->ip_summed != CHECKSUM_HW) 12631da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 12641da177e4SLinus Torvalds 12651da177e4SLinus Torvalds /* Update sequence range on original skb. */ 12661da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 12671da177e4SLinus Torvalds 12681da177e4SLinus Torvalds /* Merge over control information. */ 12691da177e4SLinus Torvalds flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 12701da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = flags; 12711da177e4SLinus Torvalds 12721da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 12731da177e4SLinus Torvalds * packet counting does not break. 12741da177e4SLinus Torvalds */ 12751da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); 12761da177e4SLinus Torvalds if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 12771da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(next_skb); 12781da177e4SLinus Torvalds if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) { 12791da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(next_skb); 12801da177e4SLinus Torvalds tp->left_out -= tcp_skb_pcount(next_skb); 12811da177e4SLinus Torvalds } 12821da177e4SLinus Torvalds /* Reno case is special. Sigh... */ 12831da177e4SLinus Torvalds if (!tp->rx_opt.sack_ok && tp->sacked_out) { 12841da177e4SLinus Torvalds tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 12851da177e4SLinus Torvalds tp->left_out -= tcp_skb_pcount(next_skb); 12861da177e4SLinus Torvalds } 12871da177e4SLinus Torvalds 12881da177e4SLinus Torvalds /* Not quite right: it can be > snd.fack, but 12891da177e4SLinus Torvalds * it is better to underestimate fackets. 12901da177e4SLinus Torvalds */ 12911da177e4SLinus Torvalds tcp_dec_pcount_approx(&tp->fackets_out, next_skb); 12921da177e4SLinus Torvalds tcp_packets_out_dec(tp, next_skb); 12931da177e4SLinus Torvalds sk_stream_free_skb(sk, next_skb); 12941da177e4SLinus Torvalds } 12951da177e4SLinus Torvalds } 12961da177e4SLinus Torvalds 12971da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in 12981da177e4SLinus Torvalds * tcp_timer. This is used for path mtu discovery. 12991da177e4SLinus Torvalds * The socket is already locked here. 13001da177e4SLinus Torvalds */ 13011da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk) 13021da177e4SLinus Torvalds { 13036687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 13041da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13051da177e4SLinus Torvalds struct sk_buff *skb; 13061da177e4SLinus Torvalds unsigned int mss = tcp_current_mss(sk, 0); 13071da177e4SLinus Torvalds int lost = 0; 13081da177e4SLinus Torvalds 13091da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 13101da177e4SLinus Torvalds if (skb->len > mss && 13111da177e4SLinus Torvalds !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 13121da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 13131da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13141da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 13151da177e4SLinus Torvalds } 13161da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { 13171da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 13181da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 13191da177e4SLinus Torvalds lost = 1; 13201da177e4SLinus Torvalds } 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds } 13231da177e4SLinus Torvalds 13241da177e4SLinus Torvalds if (!lost) 13251da177e4SLinus Torvalds return; 13261da177e4SLinus Torvalds 13271da177e4SLinus Torvalds tcp_sync_left_out(tp); 13281da177e4SLinus Torvalds 13291da177e4SLinus Torvalds /* Don't muck with the congestion window here. 13301da177e4SLinus Torvalds * Reason is that we do not increase amount of _data_ 13311da177e4SLinus Torvalds * in network, but units changed and effective 13321da177e4SLinus Torvalds * cwnd/ssthresh really reduced now. 13331da177e4SLinus Torvalds */ 13346687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Loss) { 13351da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 13366687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 13371da177e4SLinus Torvalds tp->prior_ssthresh = 0; 13381da177e4SLinus Torvalds tp->undo_marker = 0; 13396687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 13401da177e4SLinus Torvalds } 13411da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 13421da177e4SLinus Torvalds } 13431da177e4SLinus Torvalds 13441da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 13451da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 13461da177e4SLinus Torvalds * error occurred which prevented the send. 13471da177e4SLinus Torvalds */ 13481da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 13491da177e4SLinus Torvalds { 13501da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13511da177e4SLinus Torvalds unsigned int cur_mss = tcp_current_mss(sk, 0); 13521da177e4SLinus Torvalds int err; 13531da177e4SLinus Torvalds 13541da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 13551da177e4SLinus Torvalds * copying overhead: frgagmentation, tunneling, mangling etc. 13561da177e4SLinus Torvalds */ 13571da177e4SLinus Torvalds if (atomic_read(&sk->sk_wmem_alloc) > 13581da177e4SLinus Torvalds min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 13591da177e4SLinus Torvalds return -EAGAIN; 13601da177e4SLinus Torvalds 13611da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 13621da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 13631da177e4SLinus Torvalds BUG(); 13641da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 13651da177e4SLinus Torvalds return -ENOMEM; 13661da177e4SLinus Torvalds } 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 13691da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 13701da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 13711da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 13721da177e4SLinus Torvalds */ 13731da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) 13741da177e4SLinus Torvalds && TCP_SKB_CB(skb)->seq != tp->snd_una) 13751da177e4SLinus Torvalds return -EAGAIN; 13761da177e4SLinus Torvalds 13771da177e4SLinus Torvalds if (skb->len > cur_mss) { 1378846998aeSDavid S. Miller if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 13791da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 13801da177e4SLinus Torvalds } 13811da177e4SLinus Torvalds 13821da177e4SLinus Torvalds /* Collapse two adjacent packets if worthwhile and we can. */ 13831da177e4SLinus Torvalds if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 13841da177e4SLinus Torvalds (skb->len < (cur_mss >> 1)) && 13851da177e4SLinus Torvalds (skb->next != sk->sk_send_head) && 13861da177e4SLinus Torvalds (skb->next != (struct sk_buff *)&sk->sk_write_queue) && 13871da177e4SLinus Torvalds (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && 13881da177e4SLinus Torvalds (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && 13891da177e4SLinus Torvalds (sysctl_tcp_retrans_collapse != 0)) 13901da177e4SLinus Torvalds tcp_retrans_try_collapse(sk, skb, cur_mss); 13911da177e4SLinus Torvalds 13921da177e4SLinus Torvalds if(tp->af_specific->rebuild_header(sk)) 13931da177e4SLinus Torvalds return -EHOSTUNREACH; /* Routing failure or similar. */ 13941da177e4SLinus Torvalds 13951da177e4SLinus Torvalds /* Some Solaris stacks overoptimize and ignore the FIN on a 13961da177e4SLinus Torvalds * retransmit when old data is attached. So strip it off 13971da177e4SLinus Torvalds * since it is cheap to do so and saves bytes on the network. 13981da177e4SLinus Torvalds */ 13991da177e4SLinus Torvalds if(skb->len > 0 && 14001da177e4SLinus Torvalds (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 14011da177e4SLinus Torvalds tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 14021da177e4SLinus Torvalds if (!pskb_trim(skb, 0)) { 14031da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 14041da177e4SLinus Torvalds skb_shinfo(skb)->tso_segs = 1; 14051da177e4SLinus Torvalds skb_shinfo(skb)->tso_size = 0; 14061da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_NONE; 14071da177e4SLinus Torvalds skb->csum = 0; 14081da177e4SLinus Torvalds } 14091da177e4SLinus Torvalds } 14101da177e4SLinus Torvalds 14111da177e4SLinus Torvalds /* Make a copy, if the first transmission SKB clone we made 14121da177e4SLinus Torvalds * is still in somebody's hands, else make a clone. 14131da177e4SLinus Torvalds */ 14141da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 14151da177e4SLinus Torvalds 14161da177e4SLinus Torvalds err = tcp_transmit_skb(sk, (skb_cloned(skb) ? 14171da177e4SLinus Torvalds pskb_copy(skb, GFP_ATOMIC): 14181da177e4SLinus Torvalds skb_clone(skb, GFP_ATOMIC))); 14191da177e4SLinus Torvalds 14201da177e4SLinus Torvalds if (err == 0) { 14211da177e4SLinus Torvalds /* Update global TCP statistics. */ 14221da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 14231da177e4SLinus Torvalds 14241da177e4SLinus Torvalds tp->total_retrans++; 14251da177e4SLinus Torvalds 14261da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 14271da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 14281da177e4SLinus Torvalds if (net_ratelimit()) 14291da177e4SLinus Torvalds printk(KERN_DEBUG "retrans_out leaked.\n"); 14301da177e4SLinus Torvalds } 14311da177e4SLinus Torvalds #endif 14321da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 14331da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 14341da177e4SLinus Torvalds 14351da177e4SLinus Torvalds /* Save stamp of the first retransmit. */ 14361da177e4SLinus Torvalds if (!tp->retrans_stamp) 14371da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(skb)->when; 14381da177e4SLinus Torvalds 14391da177e4SLinus Torvalds tp->undo_retrans++; 14401da177e4SLinus Torvalds 14411da177e4SLinus Torvalds /* snd_nxt is stored to detect loss of retransmitted segment, 14421da177e4SLinus Torvalds * see tcp_input.c tcp_sacktag_write_queue(). 14431da177e4SLinus Torvalds */ 14441da177e4SLinus Torvalds TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds return err; 14471da177e4SLinus Torvalds } 14481da177e4SLinus Torvalds 14491da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 14501da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 14511da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 14521da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 14531da177e4SLinus Torvalds * If doing SACK, the first ACK which comes back for a timeout 14541da177e4SLinus Torvalds * based retransmit packet might feed us FACK information again. 14551da177e4SLinus Torvalds * If so, we use it to avoid unnecessarily retransmissions. 14561da177e4SLinus Torvalds */ 14571da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 14581da177e4SLinus Torvalds { 14596687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 14601da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 14611da177e4SLinus Torvalds struct sk_buff *skb; 14621da177e4SLinus Torvalds int packet_cnt = tp->lost_out; 14631da177e4SLinus Torvalds 14641da177e4SLinus Torvalds /* First pass: retransmit lost packets. */ 14651da177e4SLinus Torvalds if (packet_cnt) { 14661da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 14671da177e4SLinus Torvalds __u8 sacked = TCP_SKB_CB(skb)->sacked; 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds /* Assume this retransmit will generate 14701da177e4SLinus Torvalds * only one packet for congestion window 14711da177e4SLinus Torvalds * calculation purposes. This works because 14721da177e4SLinus Torvalds * tcp_retransmit_skb() will chop up the 14731da177e4SLinus Torvalds * packet to be MSS sized and all the 14741da177e4SLinus Torvalds * packet counting works out. 14751da177e4SLinus Torvalds */ 14761da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 14771da177e4SLinus Torvalds return; 14781da177e4SLinus Torvalds 14791da177e4SLinus Torvalds if (sacked&TCPCB_LOST) { 14801da177e4SLinus Torvalds if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 14811da177e4SLinus Torvalds if (tcp_retransmit_skb(sk, skb)) 14821da177e4SLinus Torvalds return; 14836687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Loss) 14841da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 14851da177e4SLinus Torvalds else 14861da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds if (skb == 14891da177e4SLinus Torvalds skb_peek(&sk->sk_write_queue)) 1490463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 14913f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 14923f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 14931da177e4SLinus Torvalds } 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds packet_cnt -= tcp_skb_pcount(skb); 14961da177e4SLinus Torvalds if (packet_cnt <= 0) 14971da177e4SLinus Torvalds break; 14981da177e4SLinus Torvalds } 14991da177e4SLinus Torvalds } 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds 15021da177e4SLinus Torvalds /* OK, demanded retransmission is finished. */ 15031da177e4SLinus Torvalds 15041da177e4SLinus Torvalds /* Forward retransmissions are possible only during Recovery. */ 15056687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Recovery) 15061da177e4SLinus Torvalds return; 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds /* No forward retransmissions in Reno are possible. */ 15091da177e4SLinus Torvalds if (!tp->rx_opt.sack_ok) 15101da177e4SLinus Torvalds return; 15111da177e4SLinus Torvalds 15121da177e4SLinus Torvalds /* Yeah, we have to make difficult choice between forward transmission 15131da177e4SLinus Torvalds * and retransmission... Both ways have their merits... 15141da177e4SLinus Torvalds * 15151da177e4SLinus Torvalds * For now we do not retransmit anything, while we have some new 15161da177e4SLinus Torvalds * segments to send. 15171da177e4SLinus Torvalds */ 15181da177e4SLinus Torvalds 15191da177e4SLinus Torvalds if (tcp_may_send_now(sk, tp)) 15201da177e4SLinus Torvalds return; 15211da177e4SLinus Torvalds 15221da177e4SLinus Torvalds packet_cnt = 0; 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 15251da177e4SLinus Torvalds /* Similar to the retransmit loop above we 15261da177e4SLinus Torvalds * can pretend that the retransmitted SKB 15271da177e4SLinus Torvalds * we send out here will be composed of one 15281da177e4SLinus Torvalds * real MSS sized packet because tcp_retransmit_skb() 15291da177e4SLinus Torvalds * will fragment it if necessary. 15301da177e4SLinus Torvalds */ 15311da177e4SLinus Torvalds if (++packet_cnt > tp->fackets_out) 15321da177e4SLinus Torvalds break; 15331da177e4SLinus Torvalds 15341da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 15351da177e4SLinus Torvalds break; 15361da177e4SLinus Torvalds 15371da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 15381da177e4SLinus Torvalds continue; 15391da177e4SLinus Torvalds 15401da177e4SLinus Torvalds /* Ok, retransmit it. */ 15411da177e4SLinus Torvalds if (tcp_retransmit_skb(sk, skb)) 15421da177e4SLinus Torvalds break; 15431da177e4SLinus Torvalds 15441da177e4SLinus Torvalds if (skb == skb_peek(&sk->sk_write_queue)) 15453f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 15463f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 15473f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 15481da177e4SLinus Torvalds 15491da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 15501da177e4SLinus Torvalds } 15511da177e4SLinus Torvalds } 15521da177e4SLinus Torvalds 15531da177e4SLinus Torvalds 15541da177e4SLinus Torvalds /* Send a fin. The caller locks the socket for us. This cannot be 15551da177e4SLinus Torvalds * allowed to fail queueing a FIN frame under any circumstances. 15561da177e4SLinus Torvalds */ 15571da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 15581da177e4SLinus Torvalds { 15591da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15601da177e4SLinus Torvalds struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); 15611da177e4SLinus Torvalds int mss_now; 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds /* Optimization, tack on the FIN if we have a queue of 15641da177e4SLinus Torvalds * unsent frames. But be careful about outgoing SACKS 15651da177e4SLinus Torvalds * and IP options. 15661da177e4SLinus Torvalds */ 15671da177e4SLinus Torvalds mss_now = tcp_current_mss(sk, 1); 15681da177e4SLinus Torvalds 15691da177e4SLinus Torvalds if (sk->sk_send_head != NULL) { 15701da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 15711da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq++; 15721da177e4SLinus Torvalds tp->write_seq++; 15731da177e4SLinus Torvalds } else { 15741da177e4SLinus Torvalds /* Socket is locked, keep trying until memory is available. */ 15751da177e4SLinus Torvalds for (;;) { 1576d179cd12SDavid S. Miller skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 15771da177e4SLinus Torvalds if (skb) 15781da177e4SLinus Torvalds break; 15791da177e4SLinus Torvalds yield(); 15801da177e4SLinus Torvalds } 15811da177e4SLinus Torvalds 15821da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 15831da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 15841da177e4SLinus Torvalds skb->csum = 0; 15851da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 15861da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 15871da177e4SLinus Torvalds skb_shinfo(skb)->tso_segs = 1; 15881da177e4SLinus Torvalds skb_shinfo(skb)->tso_size = 0; 15891da177e4SLinus Torvalds 15901da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 15911da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = tp->write_seq; 15921da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 15931da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 15941da177e4SLinus Torvalds } 15951da177e4SLinus Torvalds __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); 15961da177e4SLinus Torvalds } 15971da177e4SLinus Torvalds 15981da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 15991da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 16001da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 16011da177e4SLinus Torvalds * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 16021da177e4SLinus Torvalds */ 160386a76cafSVictor Fusco void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) 16041da177e4SLinus Torvalds { 16051da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 16061da177e4SLinus Torvalds struct sk_buff *skb; 16071da177e4SLinus Torvalds 16081da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 16091da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 16101da177e4SLinus Torvalds if (!skb) { 16111da177e4SLinus Torvalds NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 16121da177e4SLinus Torvalds return; 16131da177e4SLinus Torvalds } 16141da177e4SLinus Torvalds 16151da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 16161da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 16171da177e4SLinus Torvalds skb->csum = 0; 16181da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 16191da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 16201da177e4SLinus Torvalds skb_shinfo(skb)->tso_segs = 1; 16211da177e4SLinus Torvalds skb_shinfo(skb)->tso_size = 0; 16221da177e4SLinus Torvalds 16231da177e4SLinus Torvalds /* Send it off. */ 16241da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 16251da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 16261da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 16271da177e4SLinus Torvalds if (tcp_transmit_skb(sk, skb)) 16281da177e4SLinus Torvalds NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 16291da177e4SLinus Torvalds } 16301da177e4SLinus Torvalds 16311da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent 16321da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 16331da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 16341da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 16351da177e4SLinus Torvalds */ 16361da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 16371da177e4SLinus Torvalds { 16381da177e4SLinus Torvalds struct sk_buff* skb; 16391da177e4SLinus Torvalds 16401da177e4SLinus Torvalds skb = skb_peek(&sk->sk_write_queue); 16411da177e4SLinus Torvalds if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { 16421da177e4SLinus Torvalds printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 16431da177e4SLinus Torvalds return -EFAULT; 16441da177e4SLinus Torvalds } 16451da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { 16461da177e4SLinus Torvalds if (skb_cloned(skb)) { 16471da177e4SLinus Torvalds struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 16481da177e4SLinus Torvalds if (nskb == NULL) 16491da177e4SLinus Torvalds return -ENOMEM; 16501da177e4SLinus Torvalds __skb_unlink(skb, &sk->sk_write_queue); 16511da177e4SLinus Torvalds skb_header_release(nskb); 16521da177e4SLinus Torvalds __skb_queue_head(&sk->sk_write_queue, nskb); 16531da177e4SLinus Torvalds sk_stream_free_skb(sk, skb); 16541da177e4SLinus Torvalds sk_charge_skb(sk, nskb); 16551da177e4SLinus Torvalds skb = nskb; 16561da177e4SLinus Torvalds } 16571da177e4SLinus Torvalds 16581da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 16591da177e4SLinus Torvalds TCP_ECN_send_synack(tcp_sk(sk), skb); 16601da177e4SLinus Torvalds } 16611da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 16621da177e4SLinus Torvalds return tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)); 16631da177e4SLinus Torvalds } 16641da177e4SLinus Torvalds 16651da177e4SLinus Torvalds /* 16661da177e4SLinus Torvalds * Prepare a SYN-ACK. 16671da177e4SLinus Torvalds */ 16681da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 166960236fddSArnaldo Carvalho de Melo struct request_sock *req) 16701da177e4SLinus Torvalds { 16712e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 16721da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 16731da177e4SLinus Torvalds struct tcphdr *th; 16741da177e4SLinus Torvalds int tcp_header_size; 16751da177e4SLinus Torvalds struct sk_buff *skb; 16761da177e4SLinus Torvalds 16771da177e4SLinus Torvalds skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 16781da177e4SLinus Torvalds if (skb == NULL) 16791da177e4SLinus Torvalds return NULL; 16801da177e4SLinus Torvalds 16811da177e4SLinus Torvalds /* Reserve space for headers. */ 16821da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 16831da177e4SLinus Torvalds 16841da177e4SLinus Torvalds skb->dst = dst_clone(dst); 16851da177e4SLinus Torvalds 16861da177e4SLinus Torvalds tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + 16872e6599cbSArnaldo Carvalho de Melo (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + 16882e6599cbSArnaldo Carvalho de Melo (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 16891da177e4SLinus Torvalds /* SACK_PERM is in the place of NOP NOP of TS */ 16902e6599cbSArnaldo Carvalho de Melo ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 16911da177e4SLinus Torvalds skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 16941da177e4SLinus Torvalds th->syn = 1; 16951da177e4SLinus Torvalds th->ack = 1; 16961da177e4SLinus Torvalds if (dst->dev->features&NETIF_F_TSO) 16972e6599cbSArnaldo Carvalho de Melo ireq->ecn_ok = 0; 16981da177e4SLinus Torvalds TCP_ECN_make_synack(req, th); 16991da177e4SLinus Torvalds th->source = inet_sk(sk)->sport; 17002e6599cbSArnaldo Carvalho de Melo th->dest = ireq->rmt_port; 17012e6599cbSArnaldo Carvalho de Melo TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 17021da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 17031da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 17041da177e4SLinus Torvalds skb_shinfo(skb)->tso_segs = 1; 17051da177e4SLinus Torvalds skb_shinfo(skb)->tso_size = 0; 17061da177e4SLinus Torvalds th->seq = htonl(TCP_SKB_CB(skb)->seq); 17072e6599cbSArnaldo Carvalho de Melo th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 17081da177e4SLinus Torvalds if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 17091da177e4SLinus Torvalds __u8 rcv_wscale; 17101da177e4SLinus Torvalds /* Set this up on the first call only */ 17111da177e4SLinus Torvalds req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 17121da177e4SLinus Torvalds /* tcp_full_space because it is guaranteed to be the first packet */ 17131da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 17142e6599cbSArnaldo Carvalho de Melo dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 17151da177e4SLinus Torvalds &req->rcv_wnd, 17161da177e4SLinus Torvalds &req->window_clamp, 17172e6599cbSArnaldo Carvalho de Melo ireq->wscale_ok, 17181da177e4SLinus Torvalds &rcv_wscale); 17192e6599cbSArnaldo Carvalho de Melo ireq->rcv_wscale = rcv_wscale; 17201da177e4SLinus Torvalds } 17211da177e4SLinus Torvalds 17221da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 17231da177e4SLinus Torvalds th->window = htons(req->rcv_wnd); 17241da177e4SLinus Torvalds 17251da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 17262e6599cbSArnaldo Carvalho de Melo tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 17272e6599cbSArnaldo Carvalho de Melo ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 17281da177e4SLinus Torvalds TCP_SKB_CB(skb)->when, 17291da177e4SLinus Torvalds req->ts_recent); 17301da177e4SLinus Torvalds 17311da177e4SLinus Torvalds skb->csum = 0; 17321da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 17331da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_OUTSEGS); 17341da177e4SLinus Torvalds return skb; 17351da177e4SLinus Torvalds } 17361da177e4SLinus Torvalds 17371da177e4SLinus Torvalds /* 17381da177e4SLinus Torvalds * Do all connect socket setups that can be done AF independent. 17391da177e4SLinus Torvalds */ 17401da177e4SLinus Torvalds static inline void tcp_connect_init(struct sock *sk) 17411da177e4SLinus Torvalds { 17421da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 17431da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 17441da177e4SLinus Torvalds __u8 rcv_wscale; 17451da177e4SLinus Torvalds 17461da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 17471da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 17481da177e4SLinus Torvalds */ 17491da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr) + 17501da177e4SLinus Torvalds (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 17511da177e4SLinus Torvalds 17521da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 17531da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 17541da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 17551da177e4SLinus Torvalds tp->max_window = 0; 17561da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 17571da177e4SLinus Torvalds 17581da177e4SLinus Torvalds if (!tp->window_clamp) 17591da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 17601da177e4SLinus Torvalds tp->advmss = dst_metric(dst, RTAX_ADVMSS); 17611da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 17621da177e4SLinus Torvalds 17631da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 17641da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 17651da177e4SLinus Torvalds &tp->rcv_wnd, 17661da177e4SLinus Torvalds &tp->window_clamp, 17671da177e4SLinus Torvalds sysctl_tcp_window_scaling, 17681da177e4SLinus Torvalds &rcv_wscale); 17691da177e4SLinus Torvalds 17701da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 17711da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 17721da177e4SLinus Torvalds 17731da177e4SLinus Torvalds sk->sk_err = 0; 17741da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 17751da177e4SLinus Torvalds tp->snd_wnd = 0; 17761da177e4SLinus Torvalds tcp_init_wl(tp, tp->write_seq, 0); 17771da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 17781da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 17791da177e4SLinus Torvalds tp->rcv_nxt = 0; 17801da177e4SLinus Torvalds tp->rcv_wup = 0; 17811da177e4SLinus Torvalds tp->copied_seq = 0; 17821da177e4SLinus Torvalds 1783463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 1784463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 17851da177e4SLinus Torvalds tcp_clear_retrans(tp); 17861da177e4SLinus Torvalds } 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds /* 17891da177e4SLinus Torvalds * Build a SYN and send it off. 17901da177e4SLinus Torvalds */ 17911da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 17921da177e4SLinus Torvalds { 17931da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 17941da177e4SLinus Torvalds struct sk_buff *buff; 17951da177e4SLinus Torvalds 17961da177e4SLinus Torvalds tcp_connect_init(sk); 17971da177e4SLinus Torvalds 1798d179cd12SDavid S. Miller buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 17991da177e4SLinus Torvalds if (unlikely(buff == NULL)) 18001da177e4SLinus Torvalds return -ENOBUFS; 18011da177e4SLinus Torvalds 18021da177e4SLinus Torvalds /* Reserve space for headers. */ 18031da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 18041da177e4SLinus Torvalds 18051da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 18061da177e4SLinus Torvalds TCP_ECN_send_syn(sk, tp, buff); 18071da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 0; 18081da177e4SLinus Torvalds skb_shinfo(buff)->tso_segs = 1; 18091da177e4SLinus Torvalds skb_shinfo(buff)->tso_size = 0; 18101da177e4SLinus Torvalds buff->csum = 0; 18111da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = tp->write_seq++; 18121da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = tp->write_seq; 18131da177e4SLinus Torvalds tp->snd_nxt = tp->write_seq; 18141da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq; 18151da177e4SLinus Torvalds 18161da177e4SLinus Torvalds /* Send it off. */ 18171da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 18181da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(buff)->when; 18191da177e4SLinus Torvalds skb_header_release(buff); 18201da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_write_queue, buff); 18211da177e4SLinus Torvalds sk_charge_skb(sk, buff); 18221da177e4SLinus Torvalds tp->packets_out += tcp_skb_pcount(buff); 18231da177e4SLinus Torvalds tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL)); 18241da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 18251da177e4SLinus Torvalds 18261da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 18273f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 18283f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 18291da177e4SLinus Torvalds return 0; 18301da177e4SLinus Torvalds } 18311da177e4SLinus Torvalds 18321da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 18331da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 18341da177e4SLinus Torvalds * for details. 18351da177e4SLinus Torvalds */ 18361da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 18371da177e4SLinus Torvalds { 1838463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 1839463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 18401da177e4SLinus Torvalds unsigned long timeout; 18411da177e4SLinus Torvalds 18421da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 1843463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 18441da177e4SLinus Torvalds int max_ato = HZ/2; 18451da177e4SLinus Torvalds 1846463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 18471da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 18481da177e4SLinus Torvalds 18491da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 18501da177e4SLinus Torvalds 18511da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 1852463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 18531da177e4SLinus Torvalds * directly. 18541da177e4SLinus Torvalds */ 18551da177e4SLinus Torvalds if (tp->srtt) { 18561da177e4SLinus Torvalds int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); 18571da177e4SLinus Torvalds 18581da177e4SLinus Torvalds if (rtt < max_ato) 18591da177e4SLinus Torvalds max_ato = rtt; 18601da177e4SLinus Torvalds } 18611da177e4SLinus Torvalds 18621da177e4SLinus Torvalds ato = min(ato, max_ato); 18631da177e4SLinus Torvalds } 18641da177e4SLinus Torvalds 18651da177e4SLinus Torvalds /* Stay within the limit we were given */ 18661da177e4SLinus Torvalds timeout = jiffies + ato; 18671da177e4SLinus Torvalds 18681da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 1869463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 18701da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 18711da177e4SLinus Torvalds * send ACK now. 18721da177e4SLinus Torvalds */ 1873463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 1874463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 18751da177e4SLinus Torvalds tcp_send_ack(sk); 18761da177e4SLinus Torvalds return; 18771da177e4SLinus Torvalds } 18781da177e4SLinus Torvalds 1879463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 1880463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 18811da177e4SLinus Torvalds } 1882463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 1883463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 1884463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 18851da177e4SLinus Torvalds } 18861da177e4SLinus Torvalds 18871da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 18881da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk) 18891da177e4SLinus Torvalds { 18901da177e4SLinus Torvalds /* If we have been reset, we may not send again. */ 18911da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 18921da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 18931da177e4SLinus Torvalds struct sk_buff *buff; 18941da177e4SLinus Torvalds 18951da177e4SLinus Torvalds /* We are not putting this on the write queue, so 18961da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 18971da177e4SLinus Torvalds * sock. 18981da177e4SLinus Torvalds */ 18991da177e4SLinus Torvalds buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 19001da177e4SLinus Torvalds if (buff == NULL) { 1901463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 1902463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 19033f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 19043f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 19051da177e4SLinus Torvalds return; 19061da177e4SLinus Torvalds } 19071da177e4SLinus Torvalds 19081da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 19091da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 19101da177e4SLinus Torvalds buff->csum = 0; 19111da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 19121da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 0; 19131da177e4SLinus Torvalds skb_shinfo(buff)->tso_segs = 1; 19141da177e4SLinus Torvalds skb_shinfo(buff)->tso_size = 0; 19151da177e4SLinus Torvalds 19161da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 19171da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 19181da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 19191da177e4SLinus Torvalds tcp_transmit_skb(sk, buff); 19201da177e4SLinus Torvalds } 19211da177e4SLinus Torvalds } 19221da177e4SLinus Torvalds 19231da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 19241da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 19251da177e4SLinus Torvalds * 19261da177e4SLinus Torvalds * Question: what should we make while urgent mode? 19271da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 19281da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 19291da177e4SLinus Torvalds * 19301da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 19311da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 19321da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 19331da177e4SLinus Torvalds */ 19341da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 19351da177e4SLinus Torvalds { 19361da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 19371da177e4SLinus Torvalds struct sk_buff *skb; 19381da177e4SLinus Torvalds 19391da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 19401da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 19411da177e4SLinus Torvalds if (skb == NULL) 19421da177e4SLinus Torvalds return -1; 19431da177e4SLinus Torvalds 19441da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 19451da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 19461da177e4SLinus Torvalds skb->csum = 0; 19471da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 19481da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = urgent; 19491da177e4SLinus Torvalds skb_shinfo(skb)->tso_segs = 1; 19501da177e4SLinus Torvalds skb_shinfo(skb)->tso_size = 0; 19511da177e4SLinus Torvalds 19521da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 19531da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 19541da177e4SLinus Torvalds * send it. 19551da177e4SLinus Torvalds */ 19561da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; 19571da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 19581da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 19591da177e4SLinus Torvalds return tcp_transmit_skb(sk, skb); 19601da177e4SLinus Torvalds } 19611da177e4SLinus Torvalds 19621da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk) 19631da177e4SLinus Torvalds { 19641da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 19651da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 19661da177e4SLinus Torvalds struct sk_buff *skb; 19671da177e4SLinus Torvalds 19681da177e4SLinus Torvalds if ((skb = sk->sk_send_head) != NULL && 19691da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { 19701da177e4SLinus Torvalds int err; 19711da177e4SLinus Torvalds unsigned int mss = tcp_current_mss(sk, 0); 19721da177e4SLinus Torvalds unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; 19731da177e4SLinus Torvalds 19741da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 19751da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 19761da177e4SLinus Torvalds 19771da177e4SLinus Torvalds /* We are probing the opening of a window 19781da177e4SLinus Torvalds * but the window size is != 0 19791da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 19801da177e4SLinus Torvalds */ 19811da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 19821da177e4SLinus Torvalds skb->len > mss) { 19831da177e4SLinus Torvalds seg_size = min(seg_size, mss); 19841da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 1985846998aeSDavid S. Miller if (tcp_fragment(sk, skb, seg_size, mss)) 19861da177e4SLinus Torvalds return -1; 19871da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 1988846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss); 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 19911da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 19921da177e4SLinus Torvalds err = tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)); 19931da177e4SLinus Torvalds if (!err) { 19941da177e4SLinus Torvalds update_send_head(sk, tp, skb); 19951da177e4SLinus Torvalds } 19961da177e4SLinus Torvalds return err; 19971da177e4SLinus Torvalds } else { 19981da177e4SLinus Torvalds if (tp->urg_mode && 19991da177e4SLinus Torvalds between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 20001da177e4SLinus Torvalds tcp_xmit_probe_skb(sk, TCPCB_URG); 20011da177e4SLinus Torvalds return tcp_xmit_probe_skb(sk, 0); 20021da177e4SLinus Torvalds } 20031da177e4SLinus Torvalds } 20041da177e4SLinus Torvalds return -1; 20051da177e4SLinus Torvalds } 20061da177e4SLinus Torvalds 20071da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 20081da177e4SLinus Torvalds * a partial packet else a zero probe. 20091da177e4SLinus Torvalds */ 20101da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 20111da177e4SLinus Torvalds { 2012463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 20131da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 20141da177e4SLinus Torvalds int err; 20151da177e4SLinus Torvalds 20161da177e4SLinus Torvalds err = tcp_write_wakeup(sk); 20171da177e4SLinus Torvalds 20181da177e4SLinus Torvalds if (tp->packets_out || !sk->sk_send_head) { 20191da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 20206687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 2021463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 20221da177e4SLinus Torvalds return; 20231da177e4SLinus Torvalds } 20241da177e4SLinus Torvalds 20251da177e4SLinus Torvalds if (err <= 0) { 2026463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_backoff < sysctl_tcp_retries2) 2027463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 20286687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out++; 2029463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 20303f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 20313f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 20321da177e4SLinus Torvalds } else { 20331da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 20346687e988SArnaldo Carvalho de Melo * do not backoff and do not remember icsk_probes_out. 20351da177e4SLinus Torvalds * Let local senders to fight for local resources. 20361da177e4SLinus Torvalds * 20371da177e4SLinus Torvalds * Use accumulated backoff yet. 20381da177e4SLinus Torvalds */ 20396687e988SArnaldo Carvalho de Melo if (!icsk->icsk_probes_out) 20406687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 1; 2041463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2042463c84b9SArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, 20433f421baaSArnaldo Carvalho de Melo TCP_RESOURCE_PROBE_INTERVAL), 20443f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 20451da177e4SLinus Torvalds } 20461da177e4SLinus Torvalds } 20471da177e4SLinus Torvalds 20481da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect); 20491da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack); 20501da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit); 20511da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss); 2052