11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 91da177e4SLinus Torvalds * 1002c30a84SJesper Juhl * Authors: Ross Biro 111da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 121da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 131da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 141da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 151da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 161da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 171da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 181da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 191da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 201da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 251da177e4SLinus Torvalds * : Fragmentation on mtu decrease 261da177e4SLinus Torvalds * : Segment collapse on retransmit 271da177e4SLinus Torvalds * : AF independence 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 301da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 311da177e4SLinus Torvalds * during syn/ack processing. 321da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 331da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 341da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 351da177e4SLinus Torvalds * J Hadi Salim : ECN support 361da177e4SLinus Torvalds * 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds #include <net/tcp.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/compiler.h> 421da177e4SLinus Torvalds #include <linux/module.h> 431da177e4SLinus Torvalds #include <linux/smp_lock.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */ 461da177e4SLinus Torvalds int sysctl_tcp_retrans_collapse = 1; 471da177e4SLinus Torvalds 4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that 4915d99e02SRick Jones * interpret the window field as a signed quantity. 5015d99e02SRick Jones */ 5115d99e02SRick Jones int sysctl_tcp_workaround_signed_windows = 0; 5215d99e02SRick Jones 531da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we 541da177e4SLinus Torvalds * will allow a single TSO frame to consume. Building TSO frames 551da177e4SLinus Torvalds * which are too large can cause TCP streams to be bursty. 561da177e4SLinus Torvalds */ 57c1b4a7e6SDavid S. Miller int sysctl_tcp_tso_win_divisor = 3; 581da177e4SLinus Torvalds 595d424d5aSJohn Heffner int sysctl_tcp_mtu_probing = 0; 605d424d5aSJohn Heffner int sysctl_tcp_base_mss = 512; 615d424d5aSJohn Heffner 6235089bb2SDavid S. Miller /* By default, RFC2861 behavior. */ 6335089bb2SDavid S. Miller int sysctl_tcp_slow_start_after_idle = 1; 6435089bb2SDavid S. Miller 6540efc6faSStephen Hemminger static void update_send_head(struct sock *sk, struct tcp_sock *tp, 661da177e4SLinus Torvalds struct sk_buff *skb) 671da177e4SLinus Torvalds { 681da177e4SLinus Torvalds sk->sk_send_head = skb->next; 691da177e4SLinus Torvalds if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) 701da177e4SLinus Torvalds sk->sk_send_head = NULL; 711da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 721da177e4SLinus Torvalds tcp_packets_out_inc(sk, tp, skb); 731da177e4SLinus Torvalds } 741da177e4SLinus Torvalds 751da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk. 761da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 771da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 781da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 791da177e4SLinus Torvalds * invalid. OK, let's make this for now: 801da177e4SLinus Torvalds */ 811da177e4SLinus Torvalds static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) 821da177e4SLinus Torvalds { 831da177e4SLinus Torvalds if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 841da177e4SLinus Torvalds return tp->snd_nxt; 851da177e4SLinus Torvalds else 861da177e4SLinus Torvalds return tp->snd_una+tp->snd_wnd; 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 901da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 911da177e4SLinus Torvalds * 921da177e4SLinus Torvalds * 1. It is independent of path mtu. 931da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 941da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 951da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 961da177e4SLinus Torvalds * large MSS. 971da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 981da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 991da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1001da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1011da177e4SLinus Torvalds * probably even Jumbo". 1021da177e4SLinus Torvalds */ 1031da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1041da177e4SLinus Torvalds { 1051da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1061da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 1071da177e4SLinus Torvalds int mss = tp->advmss; 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 1101da177e4SLinus Torvalds mss = dst_metric(dst, RTAX_ADVMSS); 1111da177e4SLinus Torvalds tp->advmss = mss; 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds return (__u16)mss; 1151da177e4SLinus Torvalds } 1161da177e4SLinus Torvalds 1171da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1181da177e4SLinus Torvalds * This is the first part of cwnd validation mechanism. */ 119463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 1201da177e4SLinus Torvalds { 121463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1221da177e4SLinus Torvalds s32 delta = tcp_time_stamp - tp->lsndtime; 1231da177e4SLinus Torvalds u32 restart_cwnd = tcp_init_cwnd(tp, dst); 1241da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1251da177e4SLinus Torvalds 1266687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1271da177e4SLinus Torvalds 1286687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1291da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1301da177e4SLinus Torvalds 131463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1321da177e4SLinus Torvalds cwnd >>= 1; 1331da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 1341da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 1351da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1361da177e4SLinus Torvalds } 1371da177e4SLinus Torvalds 13840efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 1391da177e4SLinus Torvalds struct sk_buff *skb, struct sock *sk) 1401da177e4SLinus Torvalds { 141463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 142463c84b9SArnaldo Carvalho de Melo const u32 now = tcp_time_stamp; 1431da177e4SLinus Torvalds 14435089bb2SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 14535089bb2SDavid S. Miller (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 146463c84b9SArnaldo Carvalho de Melo tcp_cwnd_restart(sk, __sk_dst_get(sk)); 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds tp->lsndtime = now; 1491da177e4SLinus Torvalds 1501da177e4SLinus Torvalds /* If it is a reply for ato after last received 1511da177e4SLinus Torvalds * packet, enter pingpong mode. 1521da177e4SLinus Torvalds */ 153463c84b9SArnaldo Carvalho de Melo if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 154463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 1; 1551da177e4SLinus Torvalds } 1561da177e4SLinus Torvalds 15740efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 1581da177e4SLinus Torvalds { 159463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 160463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1611da177e4SLinus Torvalds } 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 1641da177e4SLinus Torvalds * Based on the assumption that the given amount of space 1651da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 1661da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 1671da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 1681da177e4SLinus Torvalds * This MUST be enforced by all callers. 1691da177e4SLinus Torvalds */ 1701da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss, 1711da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 1721da177e4SLinus Torvalds int wscale_ok, __u8 *rcv_wscale) 1731da177e4SLinus Torvalds { 1741da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 1751da177e4SLinus Torvalds 1761da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 1771da177e4SLinus Torvalds if (*window_clamp == 0) 1781da177e4SLinus Torvalds (*window_clamp) = (65535 << 14); 1791da177e4SLinus Torvalds space = min(*window_clamp, space); 1801da177e4SLinus Torvalds 1811da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 1821da177e4SLinus Torvalds if (space > mss) 1831da177e4SLinus Torvalds space = (space / mss) * mss; 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 18615d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 18715d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 18815d99e02SRick Jones * we will truncate our initial window offering to 32K-1 18915d99e02SRick Jones * unless the remote has sent us a window scaling option, 19015d99e02SRick Jones * which we interpret as a sign the remote TCP is not 19115d99e02SRick Jones * misinterpreting the window field as a signed quantity. 1921da177e4SLinus Torvalds */ 19315d99e02SRick Jones if (sysctl_tcp_workaround_signed_windows) 1941da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 19515d99e02SRick Jones else 19615d99e02SRick Jones (*rcv_wnd) = space; 19715d99e02SRick Jones 1981da177e4SLinus Torvalds (*rcv_wscale) = 0; 1991da177e4SLinus Torvalds if (wscale_ok) { 2001da177e4SLinus Torvalds /* Set window scaling on max possible window 2011da177e4SLinus Torvalds * See RFC1323 for an explanation of the limit to 14 2021da177e4SLinus Torvalds */ 2031da177e4SLinus Torvalds space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 204316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 2051da177e4SLinus Torvalds while (space > 65535 && (*rcv_wscale) < 14) { 2061da177e4SLinus Torvalds space >>= 1; 2071da177e4SLinus Torvalds (*rcv_wscale)++; 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds /* Set initial window to value enough for senders, 2126b251858SDavid S. Miller * following RFC2414. Senders, not following this RFC, 2131da177e4SLinus Torvalds * will be satisfied with 2. 2141da177e4SLinus Torvalds */ 2151da177e4SLinus Torvalds if (mss > (1<<*rcv_wscale)) { 21601ff367eSDavid S. Miller int init_cwnd = 4; 21701ff367eSDavid S. Miller if (mss > 1460*3) 2181da177e4SLinus Torvalds init_cwnd = 2; 21901ff367eSDavid S. Miller else if (mss > 1460) 22001ff367eSDavid S. Miller init_cwnd = 3; 2211da177e4SLinus Torvalds if (*rcv_wnd > init_cwnd*mss) 2221da177e4SLinus Torvalds *rcv_wnd = init_cwnd*mss; 2231da177e4SLinus Torvalds } 2241da177e4SLinus Torvalds 2251da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 2261da177e4SLinus Torvalds (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 2271da177e4SLinus Torvalds } 2281da177e4SLinus Torvalds 2291da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2301da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2311da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2321da177e4SLinus Torvalds * frame. 2331da177e4SLinus Torvalds */ 23440efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2351da177e4SLinus Torvalds { 2361da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2371da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2381da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds /* Never shrink the offered window */ 2411da177e4SLinus Torvalds if(new_win < cur_win) { 2421da177e4SLinus Torvalds /* Danger Will Robinson! 2431da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2441da177e4SLinus Torvalds * we will not be able to advertise a zero 2451da177e4SLinus Torvalds * window in time. --DaveM 2461da177e4SLinus Torvalds * 2471da177e4SLinus Torvalds * Relax Will Robinson. 2481da177e4SLinus Torvalds */ 2491da177e4SLinus Torvalds new_win = cur_win; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2521da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2531da177e4SLinus Torvalds 2541da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2551da177e4SLinus Torvalds * scaled window. 2561da177e4SLinus Torvalds */ 25715d99e02SRick Jones if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 2581da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2591da177e4SLinus Torvalds else 2601da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds /* RFC1323 scaling applied */ 2631da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 2641da177e4SLinus Torvalds 2651da177e4SLinus Torvalds /* If we advertise zero window, disable fast path. */ 2661da177e4SLinus Torvalds if (new_win == 0) 2671da177e4SLinus Torvalds tp->pred_flags = 0; 2681da177e4SLinus Torvalds 2691da177e4SLinus Torvalds return new_win; 2701da177e4SLinus Torvalds } 2711da177e4SLinus Torvalds 27240efc6faSStephen Hemminger static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, 27340efc6faSStephen Hemminger __u32 tstamp) 27440efc6faSStephen Hemminger { 27540efc6faSStephen Hemminger if (tp->rx_opt.tstamp_ok) { 27640efc6faSStephen Hemminger *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | 27740efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 27840efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 27940efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 28040efc6faSStephen Hemminger *ptr++ = htonl(tstamp); 28140efc6faSStephen Hemminger *ptr++ = htonl(tp->rx_opt.ts_recent); 28240efc6faSStephen Hemminger } 28340efc6faSStephen Hemminger if (tp->rx_opt.eff_sacks) { 28440efc6faSStephen Hemminger struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; 28540efc6faSStephen Hemminger int this_sack; 28640efc6faSStephen Hemminger 28740efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 28840efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 28940efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 29040efc6faSStephen Hemminger (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * 29140efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 29240efc6faSStephen Hemminger for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { 29340efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 29440efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 29540efc6faSStephen Hemminger } 29640efc6faSStephen Hemminger if (tp->rx_opt.dsack) { 29740efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 29840efc6faSStephen Hemminger tp->rx_opt.eff_sacks--; 29940efc6faSStephen Hemminger } 30040efc6faSStephen Hemminger } 30140efc6faSStephen Hemminger } 30240efc6faSStephen Hemminger 30340efc6faSStephen Hemminger /* Construct a tcp options header for a SYN or SYN_ACK packet. 30440efc6faSStephen Hemminger * If this is every changed make sure to change the definition of 30540efc6faSStephen Hemminger * MAX_SYN_SIZE to match the new maximum number of options that you 30640efc6faSStephen Hemminger * can generate. 30740efc6faSStephen Hemminger */ 30840efc6faSStephen Hemminger static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, 30940efc6faSStephen Hemminger int offer_wscale, int wscale, __u32 tstamp, 31040efc6faSStephen Hemminger __u32 ts_recent) 31140efc6faSStephen Hemminger { 31240efc6faSStephen Hemminger /* We always get an MSS option. 31340efc6faSStephen Hemminger * The option bytes which will be seen in normal data 31440efc6faSStephen Hemminger * packets should timestamps be used, must be in the MSS 31540efc6faSStephen Hemminger * advertised. But we subtract them from tp->mss_cache so 31640efc6faSStephen Hemminger * that calculations in tcp_sendmsg are simpler etc. 31740efc6faSStephen Hemminger * So account for this fact here if necessary. If we 31840efc6faSStephen Hemminger * don't do this correctly, as a receiver we won't 31940efc6faSStephen Hemminger * recognize data packets as being full sized when we 32040efc6faSStephen Hemminger * should, and thus we won't abide by the delayed ACK 32140efc6faSStephen Hemminger * rules correctly. 32240efc6faSStephen Hemminger * SACKs don't matter, we never delay an ACK when we 32340efc6faSStephen Hemminger * have any of those going out. 32440efc6faSStephen Hemminger */ 32540efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 32640efc6faSStephen Hemminger if (ts) { 32740efc6faSStephen Hemminger if(sack) 32840efc6faSStephen Hemminger *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | 32940efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 33040efc6faSStephen Hemminger else 33140efc6faSStephen Hemminger *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 33240efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 33340efc6faSStephen Hemminger *ptr++ = htonl(tstamp); /* TSVAL */ 33440efc6faSStephen Hemminger *ptr++ = htonl(ts_recent); /* TSECR */ 33540efc6faSStephen Hemminger } else if(sack) 33640efc6faSStephen Hemminger *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 33740efc6faSStephen Hemminger (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); 33840efc6faSStephen Hemminger if (offer_wscale) 33940efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); 34040efc6faSStephen Hemminger } 3411da177e4SLinus Torvalds 3421da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 3431da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 3441da177e4SLinus Torvalds * transmission and possible later retransmissions. 3451da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 3461da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 3471da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 3481da177e4SLinus Torvalds * device. 3491da177e4SLinus Torvalds * 3501da177e4SLinus Torvalds * We are working here with either a clone of the original 3511da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 3521da177e4SLinus Torvalds */ 353dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) 3541da177e4SLinus Torvalds { 3556687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 356dfb4b9dcSDavid S. Miller struct inet_sock *inet; 357dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 358dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 359dfb4b9dcSDavid S. Miller int tcp_header_size; 3601da177e4SLinus Torvalds struct tcphdr *th; 3611da177e4SLinus Torvalds int sysctl_flags; 3621da177e4SLinus Torvalds int err; 3631da177e4SLinus Torvalds 364dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 365dfb4b9dcSDavid S. Miller 366dfb4b9dcSDavid S. Miller /* If congestion control is doing timestamping, we must 367dfb4b9dcSDavid S. Miller * take such a timestamp before we potentially clone/copy. 368dfb4b9dcSDavid S. Miller */ 369dfb4b9dcSDavid S. Miller if (icsk->icsk_ca_ops->rtt_sample) 370dfb4b9dcSDavid S. Miller __net_timestamp(skb); 371dfb4b9dcSDavid S. Miller 372dfb4b9dcSDavid S. Miller if (likely(clone_it)) { 373dfb4b9dcSDavid S. Miller if (unlikely(skb_cloned(skb))) 374dfb4b9dcSDavid S. Miller skb = pskb_copy(skb, gfp_mask); 375dfb4b9dcSDavid S. Miller else 376dfb4b9dcSDavid S. Miller skb = skb_clone(skb, gfp_mask); 377dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 378dfb4b9dcSDavid S. Miller return -ENOBUFS; 379dfb4b9dcSDavid S. Miller } 380dfb4b9dcSDavid S. Miller 381dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 382dfb4b9dcSDavid S. Miller tp = tcp_sk(sk); 383dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 384dfb4b9dcSDavid S. Miller tcp_header_size = tp->tcp_header_len; 3851da177e4SLinus Torvalds 3861da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS 0x1 3871da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE 0x2 3881da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK 0x4 3891da177e4SLinus Torvalds 3901da177e4SLinus Torvalds sysctl_flags = 0; 391dfb4b9dcSDavid S. Miller if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 3921da177e4SLinus Torvalds tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 3931da177e4SLinus Torvalds if(sysctl_tcp_timestamps) { 3941da177e4SLinus Torvalds tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 3951da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 3961da177e4SLinus Torvalds } 3971da177e4SLinus Torvalds if (sysctl_tcp_window_scaling) { 3981da177e4SLinus Torvalds tcp_header_size += TCPOLEN_WSCALE_ALIGNED; 3991da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_WSCALE; 4001da177e4SLinus Torvalds } 4011da177e4SLinus Torvalds if (sysctl_tcp_sack) { 4021da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_SACK; 4031da177e4SLinus Torvalds if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) 4041da177e4SLinus Torvalds tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; 4051da177e4SLinus Torvalds } 406dfb4b9dcSDavid S. Miller } else if (unlikely(tp->rx_opt.eff_sacks)) { 4071da177e4SLinus Torvalds /* A SACK is 2 pad bytes, a 2 byte header, plus 4081da177e4SLinus Torvalds * 2 32-bit sequence numbers for each SACK block. 4091da177e4SLinus Torvalds */ 4101da177e4SLinus Torvalds tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + 411dfb4b9dcSDavid S. Miller (tp->rx_opt.eff_sacks * 412dfb4b9dcSDavid S. Miller TCPOLEN_SACK_PERBLOCK)); 4131da177e4SLinus Torvalds } 4141da177e4SLinus Torvalds 415317a76f9SStephen Hemminger if (tcp_packets_in_flight(tp) == 0) 4166687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_TX_START); 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds th = (struct tcphdr *) skb_push(skb, tcp_header_size); 4191da177e4SLinus Torvalds skb->h.th = th; 4201da177e4SLinus Torvalds skb_set_owner_w(skb, sk); 4211da177e4SLinus Torvalds 4221da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 4231da177e4SLinus Torvalds th->source = inet->sport; 4241da177e4SLinus Torvalds th->dest = inet->dport; 4251da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 4261da177e4SLinus Torvalds th->ack_seq = htonl(tp->rcv_nxt); 427dfb4b9dcSDavid S. Miller *(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 428dfb4b9dcSDavid S. Miller tcb->flags); 429dfb4b9dcSDavid S. Miller 430dfb4b9dcSDavid S. Miller if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 4311da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments 4321da177e4SLinus Torvalds * is never scaled. 4331da177e4SLinus Torvalds */ 4341da177e4SLinus Torvalds th->window = htons(tp->rcv_wnd); 4351da177e4SLinus Torvalds } else { 4361da177e4SLinus Torvalds th->window = htons(tcp_select_window(sk)); 4371da177e4SLinus Torvalds } 4381da177e4SLinus Torvalds th->check = 0; 4391da177e4SLinus Torvalds th->urg_ptr = 0; 4401da177e4SLinus Torvalds 441dfb4b9dcSDavid S. Miller if (unlikely(tp->urg_mode && 442dfb4b9dcSDavid S. Miller between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) { 4431da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up-tcb->seq); 4441da177e4SLinus Torvalds th->urg = 1; 4451da177e4SLinus Torvalds } 4461da177e4SLinus Torvalds 447dfb4b9dcSDavid S. Miller if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 4481da177e4SLinus Torvalds tcp_syn_build_options((__u32 *)(th + 1), 4491da177e4SLinus Torvalds tcp_advertise_mss(sk), 4501da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 4511da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_SACK), 4521da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_WSCALE), 4531da177e4SLinus Torvalds tp->rx_opt.rcv_wscale, 4541da177e4SLinus Torvalds tcb->when, 4551da177e4SLinus Torvalds tp->rx_opt.ts_recent); 4561da177e4SLinus Torvalds } else { 4571da177e4SLinus Torvalds tcp_build_and_update_options((__u32 *)(th + 1), 4581da177e4SLinus Torvalds tp, tcb->when); 4591da177e4SLinus Torvalds TCP_ECN_send(sk, tp, skb, tcp_header_size); 4601da177e4SLinus Torvalds } 461dfb4b9dcSDavid S. Miller 4628292a17aSArnaldo Carvalho de Melo icsk->icsk_af_ops->send_check(sk, skb->len, skb); 4631da177e4SLinus Torvalds 464dfb4b9dcSDavid S. Miller if (likely(tcb->flags & TCPCB_FLAG_ACK)) 465fc6415bcSDavid S. Miller tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 4661da177e4SLinus Torvalds 4671da177e4SLinus Torvalds if (skb->len != tcp_header_size) 4681da177e4SLinus Torvalds tcp_event_data_sent(tp, skb, sk); 4691da177e4SLinus Torvalds 470bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 4711da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_OUTSEGS); 4721da177e4SLinus Torvalds 4738292a17aSArnaldo Carvalho de Melo err = icsk->icsk_af_ops->queue_xmit(skb, 0); 47483de47cdSHua Zhong if (likely(err <= 0)) 4751da177e4SLinus Torvalds return err; 4761da177e4SLinus Torvalds 4776687e988SArnaldo Carvalho de Melo tcp_enter_cwr(sk); 4781da177e4SLinus Torvalds 4791da177e4SLinus Torvalds /* NET_XMIT_CN is special. It does not guarantee, 4801da177e4SLinus Torvalds * that this packet is lost. It tells that device 4811da177e4SLinus Torvalds * is about to start to drop packets or already 4821da177e4SLinus Torvalds * drops some packets of the same priority and 4831da177e4SLinus Torvalds * invokes us to send less aggressively. 4841da177e4SLinus Torvalds */ 4851da177e4SLinus Torvalds return err == NET_XMIT_CN ? 0 : err; 486dfb4b9dcSDavid S. Miller 4871da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS 4881da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE 4891da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK 4901da177e4SLinus Torvalds } 4911da177e4SLinus Torvalds 4921da177e4SLinus Torvalds 4931da177e4SLinus Torvalds /* This routine just queue's the buffer 4941da177e4SLinus Torvalds * 4951da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 4961da177e4SLinus Torvalds * otherwise socket can stall. 4971da177e4SLinus Torvalds */ 4981da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 4991da177e4SLinus Torvalds { 5001da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 5011da177e4SLinus Torvalds 5021da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 5031da177e4SLinus Torvalds tp->write_seq = TCP_SKB_CB(skb)->end_seq; 5041da177e4SLinus Torvalds skb_header_release(skb); 5051da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_write_queue, skb); 5061da177e4SLinus Torvalds sk_charge_skb(sk, skb); 5071da177e4SLinus Torvalds 5081da177e4SLinus Torvalds /* Queue it, remembering where we must start sending. */ 5091da177e4SLinus Torvalds if (sk->sk_send_head == NULL) 5101da177e4SLinus Torvalds sk->sk_send_head = skb; 5111da177e4SLinus Torvalds } 5121da177e4SLinus Torvalds 513846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 514f6302d1dSDavid S. Miller { 515bcd76111SHerbert Xu if (skb->len <= mss_now || !sk_can_gso(sk)) { 516f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 517f6302d1dSDavid S. Miller * non-TSO case. 518f6302d1dSDavid S. Miller */ 5197967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 5207967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 5217967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 522f6302d1dSDavid S. Miller } else { 523f6302d1dSDavid S. Miller unsigned int factor; 524f6302d1dSDavid S. Miller 525846998aeSDavid S. Miller factor = skb->len + (mss_now - 1); 526846998aeSDavid S. Miller factor /= mss_now; 5277967168cSHerbert Xu skb_shinfo(skb)->gso_segs = factor; 5287967168cSHerbert Xu skb_shinfo(skb)->gso_size = mss_now; 529bcd76111SHerbert Xu skb_shinfo(skb)->gso_type = sk->sk_gso_type; 5301da177e4SLinus Torvalds } 5311da177e4SLinus Torvalds } 5321da177e4SLinus Torvalds 5331da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 5341da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 5351da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 5361da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 5371da177e4SLinus Torvalds */ 5386475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 5391da177e4SLinus Torvalds { 5401da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 5411da177e4SLinus Torvalds struct sk_buff *buff; 5426475be16SDavid S. Miller int nsize, old_factor; 543b60b49eaSHerbert Xu int nlen; 5441da177e4SLinus Torvalds u16 flags; 5451da177e4SLinus Torvalds 546b2cc99f0SHerbert Xu BUG_ON(len > skb->len); 5476a438bbeSStephen Hemminger 5486a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 5491da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 5501da177e4SLinus Torvalds if (nsize < 0) 5511da177e4SLinus Torvalds nsize = 0; 5521da177e4SLinus Torvalds 5531da177e4SLinus Torvalds if (skb_cloned(skb) && 5541da177e4SLinus Torvalds skb_is_nonlinear(skb) && 5551da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 5561da177e4SLinus Torvalds return -ENOMEM; 5571da177e4SLinus Torvalds 5581da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 5591da177e4SLinus Torvalds buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 5601da177e4SLinus Torvalds if (buff == NULL) 5611da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 562ef5cb973SHerbert Xu 563b60b49eaSHerbert Xu sk_charge_skb(sk, buff); 564b60b49eaSHerbert Xu nlen = skb->len - len - nsize; 565b60b49eaSHerbert Xu buff->truesize += nlen; 566b60b49eaSHerbert Xu skb->truesize -= nlen; 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds /* Correct the sequence numbers. */ 5691da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 5701da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 5711da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 5721da177e4SLinus Torvalds 5731da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 5741da177e4SLinus Torvalds flags = TCP_SKB_CB(skb)->flags; 5751da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 5761da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = flags; 577e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 5781da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; 5791da177e4SLinus Torvalds 580*84fa7933SPatrick McHardy if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 5811da177e4SLinus Torvalds /* Copy and checksum data tail into the new buffer. */ 5821da177e4SLinus Torvalds buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), 5831da177e4SLinus Torvalds nsize, 0); 5841da177e4SLinus Torvalds 5851da177e4SLinus Torvalds skb_trim(skb, len); 5861da177e4SLinus Torvalds 5871da177e4SLinus Torvalds skb->csum = csum_block_sub(skb->csum, buff->csum, len); 5881da177e4SLinus Torvalds } else { 589*84fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 5901da177e4SLinus Torvalds skb_split(skb, buff, len); 5911da177e4SLinus Torvalds } 5921da177e4SLinus Torvalds 5931da177e4SLinus Torvalds buff->ip_summed = skb->ip_summed; 5941da177e4SLinus Torvalds 5951da177e4SLinus Torvalds /* Looks stupid, but our code really uses when of 5961da177e4SLinus Torvalds * skbs, which it never sent before. --ANK 5971da177e4SLinus Torvalds */ 5981da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 599a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 6001da177e4SLinus Torvalds 6016475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 6026475be16SDavid S. Miller 6031da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 604846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 605846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 6061da177e4SLinus Torvalds 6076475be16SDavid S. Miller /* If this packet has been sent out already, we must 6086475be16SDavid S. Miller * adjust the various packet counters. 6096475be16SDavid S. Miller */ 610cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 6116475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 6126475be16SDavid S. Miller tcp_skb_pcount(buff); 6131da177e4SLinus Torvalds 6146475be16SDavid S. Miller tp->packets_out -= diff; 615e14c3cafSHerbert Xu 616e14c3cafSHerbert Xu if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 617e14c3cafSHerbert Xu tp->sacked_out -= diff; 618e14c3cafSHerbert Xu if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 619e14c3cafSHerbert Xu tp->retrans_out -= diff; 620e14c3cafSHerbert Xu 6216475be16SDavid S. Miller if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 6226475be16SDavid S. Miller tp->lost_out -= diff; 6236475be16SDavid S. Miller tp->left_out -= diff; 6246475be16SDavid S. Miller } 62583ca28beSHerbert Xu 6266475be16SDavid S. Miller if (diff > 0) { 62783ca28beSHerbert Xu /* Adjust Reno SACK estimate. */ 62883ca28beSHerbert Xu if (!tp->rx_opt.sack_ok) { 62983ca28beSHerbert Xu tp->sacked_out -= diff; 63083ca28beSHerbert Xu if ((int)tp->sacked_out < 0) 63183ca28beSHerbert Xu tp->sacked_out = 0; 63283ca28beSHerbert Xu tcp_sync_left_out(tp); 63383ca28beSHerbert Xu } 63483ca28beSHerbert Xu 6356475be16SDavid S. Miller tp->fackets_out -= diff; 6366475be16SDavid S. Miller if ((int)tp->fackets_out < 0) 6376475be16SDavid S. Miller tp->fackets_out = 0; 6386475be16SDavid S. Miller } 6391da177e4SLinus Torvalds } 6401da177e4SLinus Torvalds 6411da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 642f44b5271SDavid S. Miller skb_header_release(buff); 6438728b834SDavid S. Miller __skb_append(skb, buff, &sk->sk_write_queue); 6441da177e4SLinus Torvalds 6451da177e4SLinus Torvalds return 0; 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 6491da177e4SLinus Torvalds * eventually). The difference is that pulled data not copied, but 6501da177e4SLinus Torvalds * immediately discarded. 6511da177e4SLinus Torvalds */ 652f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len) 6531da177e4SLinus Torvalds { 6541da177e4SLinus Torvalds int i, k, eat; 6551da177e4SLinus Torvalds 6561da177e4SLinus Torvalds eat = len; 6571da177e4SLinus Torvalds k = 0; 6581da177e4SLinus Torvalds for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { 6591da177e4SLinus Torvalds if (skb_shinfo(skb)->frags[i].size <= eat) { 6601da177e4SLinus Torvalds put_page(skb_shinfo(skb)->frags[i].page); 6611da177e4SLinus Torvalds eat -= skb_shinfo(skb)->frags[i].size; 6621da177e4SLinus Torvalds } else { 6631da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 6641da177e4SLinus Torvalds if (eat) { 6651da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 6661da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].size -= eat; 6671da177e4SLinus Torvalds eat = 0; 6681da177e4SLinus Torvalds } 6691da177e4SLinus Torvalds k++; 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds } 6721da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 6731da177e4SLinus Torvalds 6741da177e4SLinus Torvalds skb->tail = skb->data; 6751da177e4SLinus Torvalds skb->data_len -= len; 6761da177e4SLinus Torvalds skb->len = skb->data_len; 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds 6791da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 6801da177e4SLinus Torvalds { 6811da177e4SLinus Torvalds if (skb_cloned(skb) && 6821da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 6831da177e4SLinus Torvalds return -ENOMEM; 6841da177e4SLinus Torvalds 685f2911969SHerbert Xu ~{PmVHI~} /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 686f2911969SHerbert Xu ~{PmVHI~} if (unlikely(len < skb_headlen(skb))) 6871da177e4SLinus Torvalds __skb_pull(skb, len); 688f2911969SHerbert Xu ~{PmVHI~} else 689f2911969SHerbert Xu ~{PmVHI~} __pskb_trim_head(skb, len - skb_headlen(skb)); 6901da177e4SLinus Torvalds 6911da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 692*84fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 6931da177e4SLinus Torvalds 6941da177e4SLinus Torvalds skb->truesize -= len; 6951da177e4SLinus Torvalds sk->sk_wmem_queued -= len; 6961da177e4SLinus Torvalds sk->sk_forward_alloc += len; 6971da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 6981da177e4SLinus Torvalds 6991da177e4SLinus Torvalds /* Any change of skb->len requires recalculation of tso 7001da177e4SLinus Torvalds * factor and mss. 7011da177e4SLinus Torvalds */ 7021da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 703846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 7041da177e4SLinus Torvalds 7051da177e4SLinus Torvalds return 0; 7061da177e4SLinus Torvalds } 7071da177e4SLinus Torvalds 7085d424d5aSJohn Heffner /* Not accounting for SACKs here. */ 7095d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu) 7105d424d5aSJohn Heffner { 7115d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 7125d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 7135d424d5aSJohn Heffner int mss_now; 7145d424d5aSJohn Heffner 7155d424d5aSJohn Heffner /* Calculate base mss without TCP options: 7165d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 7175d424d5aSJohn Heffner */ 7185d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 7195d424d5aSJohn Heffner 7205d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 7215d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 7225d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 7235d424d5aSJohn Heffner 7245d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 7255d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 7265d424d5aSJohn Heffner 7275d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 7285d424d5aSJohn Heffner if (mss_now < 48) 7295d424d5aSJohn Heffner mss_now = 48; 7305d424d5aSJohn Heffner 7315d424d5aSJohn Heffner /* Now subtract TCP options size, not including SACKs */ 7325d424d5aSJohn Heffner mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 7335d424d5aSJohn Heffner 7345d424d5aSJohn Heffner return mss_now; 7355d424d5aSJohn Heffner } 7365d424d5aSJohn Heffner 7375d424d5aSJohn Heffner /* Inverse of above */ 7385d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss) 7395d424d5aSJohn Heffner { 7405d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 7415d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 7425d424d5aSJohn Heffner int mtu; 7435d424d5aSJohn Heffner 7445d424d5aSJohn Heffner mtu = mss + 7455d424d5aSJohn Heffner tp->tcp_header_len + 7465d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 7475d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 7485d424d5aSJohn Heffner 7495d424d5aSJohn Heffner return mtu; 7505d424d5aSJohn Heffner } 7515d424d5aSJohn Heffner 7525d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 7535d424d5aSJohn Heffner { 7545d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 7555d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 7565d424d5aSJohn Heffner 7575d424d5aSJohn Heffner icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 7585d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 7595d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 7605d424d5aSJohn Heffner icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 7615d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 7625d424d5aSJohn Heffner } 7635d424d5aSJohn Heffner 7641da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 7651da177e4SLinus Torvalds 7661da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 7671da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 770caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 7711da177e4SLinus Torvalds It also does not include TCP options. 7721da177e4SLinus Torvalds 773d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 7761da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 7771da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 7781da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 7791da177e4SLinus Torvalds 7801da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 7811da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 7821da177e4SLinus Torvalds 783d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 784d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 7851da177e4SLinus Torvalds */ 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 7881da177e4SLinus Torvalds { 7891da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 790d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 7915d424d5aSJohn Heffner int mss_now; 7921da177e4SLinus Torvalds 7935d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 7945d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 7951da177e4SLinus Torvalds 7965d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 7971da177e4SLinus Torvalds 7981da177e4SLinus Torvalds /* Bound mss with half of window */ 7991da177e4SLinus Torvalds if (tp->max_window && mss_now > (tp->max_window>>1)) 8001da177e4SLinus Torvalds mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); 8011da177e4SLinus Torvalds 8021da177e4SLinus Torvalds /* And store cached results */ 803d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 8045d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 8055d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 806c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds return mss_now; 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 8121da177e4SLinus Torvalds * and even PMTU discovery events into account. 8131da177e4SLinus Torvalds * 8141da177e4SLinus Torvalds * LARGESEND note: !urg_mode is overkill, only frames up to snd_up 8151da177e4SLinus Torvalds * cannot be large. However, taking into account rare use of URG, this 8161da177e4SLinus Torvalds * is not a big flaw. 8171da177e4SLinus Torvalds */ 818c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 8191da177e4SLinus Torvalds { 8201da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 8211da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 822c1b4a7e6SDavid S. Miller u32 mss_now; 823c1b4a7e6SDavid S. Miller u16 xmit_size_goal; 824c1b4a7e6SDavid S. Miller int doing_tso = 0; 8251da177e4SLinus Torvalds 826c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 827c1b4a7e6SDavid S. Miller 828bcd76111SHerbert Xu if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) 829c1b4a7e6SDavid S. Miller doing_tso = 1; 830c1b4a7e6SDavid S. Miller 8311da177e4SLinus Torvalds if (dst) { 8321da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 833d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 8341da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 8351da177e4SLinus Torvalds } 8361da177e4SLinus Torvalds 8371da177e4SLinus Torvalds if (tp->rx_opt.eff_sacks) 8381da177e4SLinus Torvalds mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 8391da177e4SLinus Torvalds (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 840c1b4a7e6SDavid S. Miller 841c1b4a7e6SDavid S. Miller xmit_size_goal = mss_now; 842c1b4a7e6SDavid S. Miller 843c1b4a7e6SDavid S. Miller if (doing_tso) { 8448292a17aSArnaldo Carvalho de Melo xmit_size_goal = (65535 - 8458292a17aSArnaldo Carvalho de Melo inet_csk(sk)->icsk_af_ops->net_header_len - 846d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ext_hdr_len - 847d83d8461SArnaldo Carvalho de Melo tp->tcp_header_len); 848c1b4a7e6SDavid S. Miller 849c1b4a7e6SDavid S. Miller if (tp->max_window && 850c1b4a7e6SDavid S. Miller (xmit_size_goal > (tp->max_window >> 1))) 851c1b4a7e6SDavid S. Miller xmit_size_goal = max((tp->max_window >> 1), 852c1b4a7e6SDavid S. Miller 68U - tp->tcp_header_len); 853c1b4a7e6SDavid S. Miller 854c1b4a7e6SDavid S. Miller xmit_size_goal -= (xmit_size_goal % mss_now); 855c1b4a7e6SDavid S. Miller } 856c1b4a7e6SDavid S. Miller tp->xmit_size_goal = xmit_size_goal; 857c1b4a7e6SDavid S. Miller 8581da177e4SLinus Torvalds return mss_now; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 861a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */ 862a762a980SDavid S. Miller 86340efc6faSStephen Hemminger static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) 864a762a980SDavid S. Miller { 865a762a980SDavid S. Miller __u32 packets_out = tp->packets_out; 866a762a980SDavid S. Miller 867a762a980SDavid S. Miller if (packets_out >= tp->snd_cwnd) { 868a762a980SDavid S. Miller /* Network is feed fully. */ 869a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 870a762a980SDavid S. Miller tp->snd_cwnd_stamp = tcp_time_stamp; 871a762a980SDavid S. Miller } else { 872a762a980SDavid S. Miller /* Network starves. */ 873a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 874a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 875a762a980SDavid S. Miller 876463c84b9SArnaldo Carvalho de Melo if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 877a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 878a762a980SDavid S. Miller } 879a762a980SDavid S. Miller } 880a762a980SDavid S. Miller 881c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 882c1b4a7e6SDavid S. Miller { 883c1b4a7e6SDavid S. Miller u32 window, cwnd_len; 884c1b4a7e6SDavid S. Miller 885c1b4a7e6SDavid S. Miller window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 886c1b4a7e6SDavid S. Miller cwnd_len = mss_now * cwnd; 887c1b4a7e6SDavid S. Miller return min(window, cwnd_len); 888c1b4a7e6SDavid S. Miller } 889c1b4a7e6SDavid S. Miller 890c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 891c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 892c1b4a7e6SDavid S. Miller */ 893c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 894c1b4a7e6SDavid S. Miller { 895c1b4a7e6SDavid S. Miller u32 in_flight, cwnd; 896c1b4a7e6SDavid S. Miller 897c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 898c1b4a7e6SDavid S. Miller if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 899c1b4a7e6SDavid S. Miller return 1; 900c1b4a7e6SDavid S. Miller 901c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 902c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 903c1b4a7e6SDavid S. Miller if (in_flight < cwnd) 904c1b4a7e6SDavid S. Miller return (cwnd - in_flight); 905c1b4a7e6SDavid S. Miller 906c1b4a7e6SDavid S. Miller return 0; 907c1b4a7e6SDavid S. Miller } 908c1b4a7e6SDavid S. Miller 909c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting 910c1b4a7e6SDavid S. Miller * SKB onto the wire. 911c1b4a7e6SDavid S. Miller */ 91240efc6faSStephen Hemminger static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 913c1b4a7e6SDavid S. Miller { 914c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 915c1b4a7e6SDavid S. Miller 916846998aeSDavid S. Miller if (!tso_segs || 917846998aeSDavid S. Miller (tso_segs > 1 && 9187967168cSHerbert Xu tcp_skb_mss(skb) != mss_now)) { 919846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 920c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 921c1b4a7e6SDavid S. Miller } 922c1b4a7e6SDavid S. Miller return tso_segs; 923c1b4a7e6SDavid S. Miller } 924c1b4a7e6SDavid S. Miller 925c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp) 926c1b4a7e6SDavid S. Miller { 927c1b4a7e6SDavid S. Miller return after(tp->snd_sml,tp->snd_una) && 928c1b4a7e6SDavid S. Miller !after(tp->snd_sml, tp->snd_nxt); 929c1b4a7e6SDavid S. Miller } 930c1b4a7e6SDavid S. Miller 931c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules: 932c1b4a7e6SDavid S. Miller * 1. It is full sized. 933c1b4a7e6SDavid S. Miller * 2. Or it contains FIN. (already checked by caller) 934c1b4a7e6SDavid S. Miller * 3. Or TCP_NODELAY was set. 935c1b4a7e6SDavid S. Miller * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 936c1b4a7e6SDavid S. Miller * With Minshall's modification: all sent small packets are ACKed. 937c1b4a7e6SDavid S. Miller */ 938c1b4a7e6SDavid S. Miller 939c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp, 940c1b4a7e6SDavid S. Miller const struct sk_buff *skb, 941c1b4a7e6SDavid S. Miller unsigned mss_now, int nonagle) 942c1b4a7e6SDavid S. Miller { 943c1b4a7e6SDavid S. Miller return (skb->len < mss_now && 944c1b4a7e6SDavid S. Miller ((nonagle&TCP_NAGLE_CORK) || 945c1b4a7e6SDavid S. Miller (!nonagle && 946c1b4a7e6SDavid S. Miller tp->packets_out && 947c1b4a7e6SDavid S. Miller tcp_minshall_check(tp)))); 948c1b4a7e6SDavid S. Miller } 949c1b4a7e6SDavid S. Miller 950c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be 951c1b4a7e6SDavid S. Miller * sent now. 952c1b4a7e6SDavid S. Miller */ 953c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 954c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 955c1b4a7e6SDavid S. Miller { 956c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 957c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 958c1b4a7e6SDavid S. Miller * 959c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 960c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 961c1b4a7e6SDavid S. Miller */ 962c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 963c1b4a7e6SDavid S. Miller return 1; 964c1b4a7e6SDavid S. Miller 965c1b4a7e6SDavid S. Miller /* Don't use the nagle rule for urgent data (or for the final FIN). */ 966c1b4a7e6SDavid S. Miller if (tp->urg_mode || 967c1b4a7e6SDavid S. Miller (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 968c1b4a7e6SDavid S. Miller return 1; 969c1b4a7e6SDavid S. Miller 970c1b4a7e6SDavid S. Miller if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 971c1b4a7e6SDavid S. Miller return 1; 972c1b4a7e6SDavid S. Miller 973c1b4a7e6SDavid S. Miller return 0; 974c1b4a7e6SDavid S. Miller } 975c1b4a7e6SDavid S. Miller 976c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 977c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 978c1b4a7e6SDavid S. Miller { 979c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 980c1b4a7e6SDavid S. Miller 981c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 982c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 983c1b4a7e6SDavid S. Miller 984c1b4a7e6SDavid S. Miller return !after(end_seq, tp->snd_una + tp->snd_wnd); 985c1b4a7e6SDavid S. Miller } 986c1b4a7e6SDavid S. Miller 987c1b4a7e6SDavid S. Miller /* This checks if the data bearing packet SKB (usually sk->sk_send_head) 988c1b4a7e6SDavid S. Miller * should be put on the wire right now. If so, it returns the number of 989c1b4a7e6SDavid S. Miller * packets allowed by the congestion window. 990c1b4a7e6SDavid S. Miller */ 991c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 992c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 993c1b4a7e6SDavid S. Miller { 994c1b4a7e6SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 995c1b4a7e6SDavid S. Miller unsigned int cwnd_quota; 996c1b4a7e6SDavid S. Miller 997846998aeSDavid S. Miller tcp_init_tso_segs(sk, skb, cur_mss); 998c1b4a7e6SDavid S. Miller 999c1b4a7e6SDavid S. Miller if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1000c1b4a7e6SDavid S. Miller return 0; 1001c1b4a7e6SDavid S. Miller 1002c1b4a7e6SDavid S. Miller cwnd_quota = tcp_cwnd_test(tp, skb); 1003c1b4a7e6SDavid S. Miller if (cwnd_quota && 1004c1b4a7e6SDavid S. Miller !tcp_snd_wnd_test(tp, skb, cur_mss)) 1005c1b4a7e6SDavid S. Miller cwnd_quota = 0; 1006c1b4a7e6SDavid S. Miller 1007c1b4a7e6SDavid S. Miller return cwnd_quota; 1008c1b4a7e6SDavid S. Miller } 1009c1b4a7e6SDavid S. Miller 1010c1b4a7e6SDavid S. Miller static inline int tcp_skb_is_last(const struct sock *sk, 1011c1b4a7e6SDavid S. Miller const struct sk_buff *skb) 1012c1b4a7e6SDavid S. Miller { 1013c1b4a7e6SDavid S. Miller return skb->next == (struct sk_buff *)&sk->sk_write_queue; 1014c1b4a7e6SDavid S. Miller } 1015c1b4a7e6SDavid S. Miller 1016c1b4a7e6SDavid S. Miller int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) 1017c1b4a7e6SDavid S. Miller { 1018c1b4a7e6SDavid S. Miller struct sk_buff *skb = sk->sk_send_head; 1019c1b4a7e6SDavid S. Miller 1020c1b4a7e6SDavid S. Miller return (skb && 1021c1b4a7e6SDavid S. Miller tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1022c1b4a7e6SDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1023c1b4a7e6SDavid S. Miller TCP_NAGLE_PUSH : 1024c1b4a7e6SDavid S. Miller tp->nonagle))); 1025c1b4a7e6SDavid S. Miller } 1026c1b4a7e6SDavid S. Miller 1027c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1028c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 1029c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 1030c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 1031c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 1032c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 1033c1b4a7e6SDavid S. Miller */ 1034846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) 1035c1b4a7e6SDavid S. Miller { 1036c1b4a7e6SDavid S. Miller struct sk_buff *buff; 1037c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 1038c1b4a7e6SDavid S. Miller u16 flags; 1039c1b4a7e6SDavid S. Miller 1040c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 1041c8ac3774SHerbert Xu if (skb->len != skb->data_len) 1042c8ac3774SHerbert Xu return tcp_fragment(sk, skb, len, mss_now); 1043c1b4a7e6SDavid S. Miller 1044c1b4a7e6SDavid S. Miller buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 1045c1b4a7e6SDavid S. Miller if (unlikely(buff == NULL)) 1046c1b4a7e6SDavid S. Miller return -ENOMEM; 1047c1b4a7e6SDavid S. Miller 1048b60b49eaSHerbert Xu sk_charge_skb(sk, buff); 1049b60b49eaSHerbert Xu buff->truesize += nlen; 1050c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 1051c1b4a7e6SDavid S. Miller 1052c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 1053c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1054c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1055c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1056c1b4a7e6SDavid S. Miller 1057c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 1058c1b4a7e6SDavid S. Miller flags = TCP_SKB_CB(skb)->flags; 1059c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1060c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->flags = flags; 1061c1b4a7e6SDavid S. Miller 1062c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 1063c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 1064c1b4a7e6SDavid S. Miller 1065*84fa7933SPatrick McHardy buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1066c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 1067c1b4a7e6SDavid S. Miller 1068c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 1069846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1070846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 1071c1b4a7e6SDavid S. Miller 1072c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 1073c1b4a7e6SDavid S. Miller skb_header_release(buff); 10748728b834SDavid S. Miller __skb_append(skb, buff, &sk->sk_write_queue); 1075c1b4a7e6SDavid S. Miller 1076c1b4a7e6SDavid S. Miller return 0; 1077c1b4a7e6SDavid S. Miller } 1078c1b4a7e6SDavid S. Miller 1079c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 1080c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 1081c1b4a7e6SDavid S. Miller * 1082c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 1083c1b4a7e6SDavid S. Miller */ 1084c1b4a7e6SDavid S. Miller static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 1085c1b4a7e6SDavid S. Miller { 10866687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1087c1b4a7e6SDavid S. Miller u32 send_win, cong_win, limit, in_flight; 1088c1b4a7e6SDavid S. Miller 1089c1b4a7e6SDavid S. Miller if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1090c1b4a7e6SDavid S. Miller return 0; 1091c1b4a7e6SDavid S. Miller 10926687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 1093908a75c1SDavid S. Miller return 0; 1094908a75c1SDavid S. Miller 1095c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1096c1b4a7e6SDavid S. Miller 1097c1b4a7e6SDavid S. Miller BUG_ON(tcp_skb_pcount(skb) <= 1 || 1098c1b4a7e6SDavid S. Miller (tp->snd_cwnd <= in_flight)); 1099c1b4a7e6SDavid S. Miller 1100c1b4a7e6SDavid S. Miller send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; 1101c1b4a7e6SDavid S. Miller 1102c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 1103c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1104c1b4a7e6SDavid S. Miller 1105c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 1106c1b4a7e6SDavid S. Miller 1107ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 1108ba244fe9SDavid S. Miller if (limit >= 65536) 1109ba244fe9SDavid S. Miller return 0; 1110ba244fe9SDavid S. Miller 1111c1b4a7e6SDavid S. Miller if (sysctl_tcp_tso_win_divisor) { 1112c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1113c1b4a7e6SDavid S. Miller 1114c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 1115c1b4a7e6SDavid S. Miller * just use it. 1116c1b4a7e6SDavid S. Miller */ 1117c1b4a7e6SDavid S. Miller chunk /= sysctl_tcp_tso_win_divisor; 1118c1b4a7e6SDavid S. Miller if (limit >= chunk) 1119c1b4a7e6SDavid S. Miller return 0; 1120c1b4a7e6SDavid S. Miller } else { 1121c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 1122c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 1123c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 1124c1b4a7e6SDavid S. Miller * then send now. 1125c1b4a7e6SDavid S. Miller */ 1126c1b4a7e6SDavid S. Miller if (limit > tcp_max_burst(tp) * tp->mss_cache) 1127c1b4a7e6SDavid S. Miller return 0; 1128c1b4a7e6SDavid S. Miller } 1129c1b4a7e6SDavid S. Miller 1130c1b4a7e6SDavid S. Miller /* Ok, it looks like it is advisable to defer. */ 1131c1b4a7e6SDavid S. Miller return 1; 1132c1b4a7e6SDavid S. Miller } 1133c1b4a7e6SDavid S. Miller 11345d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 11355d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 11365d424d5aSJohn Heffner * 1 if a probe was sent, 11375d424d5aSJohn Heffner * -1 otherwise */ 11385d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 11395d424d5aSJohn Heffner { 11405d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 11415d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 11425d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 11435d424d5aSJohn Heffner int len; 11445d424d5aSJohn Heffner int probe_size; 11455d424d5aSJohn Heffner unsigned int pif; 11465d424d5aSJohn Heffner int copy; 11475d424d5aSJohn Heffner int mss_now; 11485d424d5aSJohn Heffner 11495d424d5aSJohn Heffner /* Not currently probing/verifying, 11505d424d5aSJohn Heffner * not in recovery, 11515d424d5aSJohn Heffner * have enough cwnd, and 11525d424d5aSJohn Heffner * not SACKing (the variable headers throw things off) */ 11535d424d5aSJohn Heffner if (!icsk->icsk_mtup.enabled || 11545d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 11555d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 11565d424d5aSJohn Heffner tp->snd_cwnd < 11 || 11575d424d5aSJohn Heffner tp->rx_opt.eff_sacks) 11585d424d5aSJohn Heffner return -1; 11595d424d5aSJohn Heffner 11605d424d5aSJohn Heffner /* Very simple search strategy: just double the MSS. */ 11615d424d5aSJohn Heffner mss_now = tcp_current_mss(sk, 0); 11625d424d5aSJohn Heffner probe_size = 2*tp->mss_cache; 11635d424d5aSJohn Heffner if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 11645d424d5aSJohn Heffner /* TODO: set timer for probe_converge_event */ 11655d424d5aSJohn Heffner return -1; 11665d424d5aSJohn Heffner } 11675d424d5aSJohn Heffner 11685d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 11695d424d5aSJohn Heffner len = 0; 11705d424d5aSJohn Heffner if ((skb = sk->sk_send_head) == NULL) 11715d424d5aSJohn Heffner return -1; 11725d424d5aSJohn Heffner while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) 11735d424d5aSJohn Heffner skb = skb->next; 11745d424d5aSJohn Heffner if (len < probe_size) 11755d424d5aSJohn Heffner return -1; 11765d424d5aSJohn Heffner 11775d424d5aSJohn Heffner /* Receive window check. */ 11785d424d5aSJohn Heffner if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { 11795d424d5aSJohn Heffner if (tp->snd_wnd < probe_size) 11805d424d5aSJohn Heffner return -1; 11815d424d5aSJohn Heffner else 11825d424d5aSJohn Heffner return 0; 11835d424d5aSJohn Heffner } 11845d424d5aSJohn Heffner 11855d424d5aSJohn Heffner /* Do we need to wait to drain cwnd? */ 11865d424d5aSJohn Heffner pif = tcp_packets_in_flight(tp); 11875d424d5aSJohn Heffner if (pif + 2 > tp->snd_cwnd) { 11885d424d5aSJohn Heffner /* With no packets in flight, don't stall. */ 11895d424d5aSJohn Heffner if (pif == 0) 11905d424d5aSJohn Heffner return -1; 11915d424d5aSJohn Heffner else 11925d424d5aSJohn Heffner return 0; 11935d424d5aSJohn Heffner } 11945d424d5aSJohn Heffner 11955d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 11965d424d5aSJohn Heffner if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 11975d424d5aSJohn Heffner return -1; 11985d424d5aSJohn Heffner sk_charge_skb(sk, nskb); 11995d424d5aSJohn Heffner 12005d424d5aSJohn Heffner skb = sk->sk_send_head; 12015d424d5aSJohn Heffner __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); 12025d424d5aSJohn Heffner sk->sk_send_head = nskb; 12035d424d5aSJohn Heffner 12045d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 12055d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 12065d424d5aSJohn Heffner TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 12075d424d5aSJohn Heffner TCP_SKB_CB(nskb)->sacked = 0; 12085d424d5aSJohn Heffner nskb->csum = 0; 1209*84fa7933SPatrick McHardy nskb->ip_summed = skb->ip_summed; 12105d424d5aSJohn Heffner 12115d424d5aSJohn Heffner len = 0; 12125d424d5aSJohn Heffner while (len < probe_size) { 12135d424d5aSJohn Heffner next = skb->next; 12145d424d5aSJohn Heffner 12155d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 12165d424d5aSJohn Heffner if (nskb->ip_summed) 12175d424d5aSJohn Heffner skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 12185d424d5aSJohn Heffner else 12195d424d5aSJohn Heffner nskb->csum = skb_copy_and_csum_bits(skb, 0, 12205d424d5aSJohn Heffner skb_put(nskb, copy), copy, nskb->csum); 12215d424d5aSJohn Heffner 12225d424d5aSJohn Heffner if (skb->len <= copy) { 12235d424d5aSJohn Heffner /* We've eaten all the data from this skb. 12245d424d5aSJohn Heffner * Throw it away. */ 12255d424d5aSJohn Heffner TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 12265d424d5aSJohn Heffner __skb_unlink(skb, &sk->sk_write_queue); 12275d424d5aSJohn Heffner sk_stream_free_skb(sk, skb); 12285d424d5aSJohn Heffner } else { 12295d424d5aSJohn Heffner TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 12305d424d5aSJohn Heffner ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 12315d424d5aSJohn Heffner if (!skb_shinfo(skb)->nr_frags) { 12325d424d5aSJohn Heffner skb_pull(skb, copy); 1233*84fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 12345d424d5aSJohn Heffner skb->csum = csum_partial(skb->data, skb->len, 0); 12355d424d5aSJohn Heffner } else { 12365d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 12375d424d5aSJohn Heffner tcp_set_skb_tso_segs(sk, skb, mss_now); 12385d424d5aSJohn Heffner } 12395d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 12405d424d5aSJohn Heffner } 12415d424d5aSJohn Heffner 12425d424d5aSJohn Heffner len += copy; 12435d424d5aSJohn Heffner skb = next; 12445d424d5aSJohn Heffner } 12455d424d5aSJohn Heffner tcp_init_tso_segs(sk, nskb, nskb->len); 12465d424d5aSJohn Heffner 12475d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 12485d424d5aSJohn Heffner * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 12495d424d5aSJohn Heffner TCP_SKB_CB(nskb)->when = tcp_time_stamp; 12505d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 12515d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 12525d424d5aSJohn Heffner * effectively two packets. */ 12535d424d5aSJohn Heffner tp->snd_cwnd--; 12545d424d5aSJohn Heffner update_send_head(sk, tp, nskb); 12555d424d5aSJohn Heffner 12565d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 12570e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 12580e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 12595d424d5aSJohn Heffner 12605d424d5aSJohn Heffner return 1; 12615d424d5aSJohn Heffner } 12625d424d5aSJohn Heffner 12635d424d5aSJohn Heffner return -1; 12645d424d5aSJohn Heffner } 12655d424d5aSJohn Heffner 12665d424d5aSJohn Heffner 12671da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 12681da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 12691da177e4SLinus Torvalds * window for us. 12701da177e4SLinus Torvalds * 12711da177e4SLinus Torvalds * Returns 1, if no segments are in flight and we have queued segments, but 12721da177e4SLinus Torvalds * cannot send anything now because of SWS or another problem. 12731da177e4SLinus Torvalds */ 1274a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) 12751da177e4SLinus Torvalds { 12761da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 127792df7b51SDavid S. Miller struct sk_buff *skb; 1278c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 1279c1b4a7e6SDavid S. Miller int cwnd_quota; 12805d424d5aSJohn Heffner int result; 12811da177e4SLinus Torvalds 12821da177e4SLinus Torvalds /* If we are closed, the bytes will have to remain here. 12831da177e4SLinus Torvalds * In time closedown will finish, we empty the write queue and all 12841da177e4SLinus Torvalds * will be happy. 12851da177e4SLinus Torvalds */ 128692df7b51SDavid S. Miller if (unlikely(sk->sk_state == TCP_CLOSE)) 128792df7b51SDavid S. Miller return 0; 128892df7b51SDavid S. Miller 1289c1b4a7e6SDavid S. Miller sent_pkts = 0; 12905d424d5aSJohn Heffner 12915d424d5aSJohn Heffner /* Do MTU probing. */ 12925d424d5aSJohn Heffner if ((result = tcp_mtu_probe(sk)) == 0) { 12935d424d5aSJohn Heffner return 0; 12945d424d5aSJohn Heffner } else if (result > 0) { 12955d424d5aSJohn Heffner sent_pkts = 1; 12965d424d5aSJohn Heffner } 12975d424d5aSJohn Heffner 1298b68e9f85SHerbert Xu while ((skb = sk->sk_send_head)) { 1299c8ac3774SHerbert Xu unsigned int limit; 1300c8ac3774SHerbert Xu 1301b68e9f85SHerbert Xu tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1302c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1303c1b4a7e6SDavid S. Miller 1304b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 1305b68e9f85SHerbert Xu if (!cwnd_quota) 1306b68e9f85SHerbert Xu break; 1307b68e9f85SHerbert Xu 1308b68e9f85SHerbert Xu if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1309b68e9f85SHerbert Xu break; 1310b68e9f85SHerbert Xu 1311c1b4a7e6SDavid S. Miller if (tso_segs == 1) { 1312aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1313aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1314aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 1315aa93466bSDavid S. Miller break; 1316c1b4a7e6SDavid S. Miller } else { 1317c1b4a7e6SDavid S. Miller if (tcp_tso_should_defer(sk, tp, skb)) 1318aa93466bSDavid S. Miller break; 1319c1b4a7e6SDavid S. Miller } 1320aa93466bSDavid S. Miller 1321c8ac3774SHerbert Xu limit = mss_now; 1322c1b4a7e6SDavid S. Miller if (tso_segs > 1) { 1323c8ac3774SHerbert Xu limit = tcp_window_allows(tp, skb, 1324c1b4a7e6SDavid S. Miller mss_now, cwnd_quota); 1325c1b4a7e6SDavid S. Miller 1326c1b4a7e6SDavid S. Miller if (skb->len < limit) { 1327c1b4a7e6SDavid S. Miller unsigned int trim = skb->len % mss_now; 1328c1b4a7e6SDavid S. Miller 1329c1b4a7e6SDavid S. Miller if (trim) 1330c1b4a7e6SDavid S. Miller limit = skb->len - trim; 1331c1b4a7e6SDavid S. Miller } 1332c1b4a7e6SDavid S. Miller } 1333c8ac3774SHerbert Xu 1334c8ac3774SHerbert Xu if (skb->len > limit && 1335c8ac3774SHerbert Xu unlikely(tso_fragment(sk, skb, limit, mss_now))) 13361da177e4SLinus Torvalds break; 13371da177e4SLinus Torvalds 13381da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 1339c1b4a7e6SDavid S. Miller 1340dfb4b9dcSDavid S. Miller if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC))) 13411da177e4SLinus Torvalds break; 13421da177e4SLinus Torvalds 13431da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 13441da177e4SLinus Torvalds * This call will increment packets_out. 13451da177e4SLinus Torvalds */ 13461da177e4SLinus Torvalds update_send_head(sk, tp, skb); 13471da177e4SLinus Torvalds 13481da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 1349aa93466bSDavid S. Miller sent_pkts++; 13501da177e4SLinus Torvalds } 13511da177e4SLinus Torvalds 1352aa93466bSDavid S. Miller if (likely(sent_pkts)) { 13531da177e4SLinus Torvalds tcp_cwnd_validate(sk, tp); 13541da177e4SLinus Torvalds return 0; 13551da177e4SLinus Torvalds } 13561da177e4SLinus Torvalds return !tp->packets_out && sk->sk_send_head; 13571da177e4SLinus Torvalds } 13581da177e4SLinus Torvalds 1359a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 1360a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 1361a762a980SDavid S. Miller * The socket must be locked by the caller. 1362a762a980SDavid S. Miller */ 1363a762a980SDavid S. Miller void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, 1364a2e2a59cSDavid S. Miller unsigned int cur_mss, int nonagle) 1365a762a980SDavid S. Miller { 1366a762a980SDavid S. Miller struct sk_buff *skb = sk->sk_send_head; 1367a762a980SDavid S. Miller 1368a762a980SDavid S. Miller if (skb) { 136955c97f3eSDavid S. Miller if (tcp_write_xmit(sk, cur_mss, nonagle)) 1370a762a980SDavid S. Miller tcp_check_probe_timer(sk, tp); 1371a762a980SDavid S. Miller } 1372a762a980SDavid S. Miller } 1373a762a980SDavid S. Miller 1374c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 1375c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 1376c1b4a7e6SDavid S. Miller */ 1377c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 1378c1b4a7e6SDavid S. Miller { 1379c1b4a7e6SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1380c1b4a7e6SDavid S. Miller struct sk_buff *skb = sk->sk_send_head; 1381c1b4a7e6SDavid S. Miller unsigned int tso_segs, cwnd_quota; 1382c1b4a7e6SDavid S. Miller 1383c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 1384c1b4a7e6SDavid S. Miller 1385846998aeSDavid S. Miller tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1386c1b4a7e6SDavid S. Miller cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1387c1b4a7e6SDavid S. Miller 1388c1b4a7e6SDavid S. Miller if (likely(cwnd_quota)) { 1389c8ac3774SHerbert Xu unsigned int limit; 1390c8ac3774SHerbert Xu 1391c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1392c1b4a7e6SDavid S. Miller 1393c8ac3774SHerbert Xu limit = mss_now; 1394c1b4a7e6SDavid S. Miller if (tso_segs > 1) { 1395c8ac3774SHerbert Xu limit = tcp_window_allows(tp, skb, 1396c1b4a7e6SDavid S. Miller mss_now, cwnd_quota); 1397c1b4a7e6SDavid S. Miller 1398c1b4a7e6SDavid S. Miller if (skb->len < limit) { 1399c1b4a7e6SDavid S. Miller unsigned int trim = skb->len % mss_now; 1400c1b4a7e6SDavid S. Miller 1401c1b4a7e6SDavid S. Miller if (trim) 1402c1b4a7e6SDavid S. Miller limit = skb->len - trim; 1403c1b4a7e6SDavid S. Miller } 1404c1b4a7e6SDavid S. Miller } 1405c8ac3774SHerbert Xu 1406c8ac3774SHerbert Xu if (skb->len > limit && 1407c8ac3774SHerbert Xu unlikely(tso_fragment(sk, skb, limit, mss_now))) 1408c1b4a7e6SDavid S. Miller return; 1409c1b4a7e6SDavid S. Miller 1410c1b4a7e6SDavid S. Miller /* Send it out now. */ 1411c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->when = tcp_time_stamp; 1412c1b4a7e6SDavid S. Miller 1413dfb4b9dcSDavid S. Miller if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { 1414c1b4a7e6SDavid S. Miller update_send_head(sk, tp, skb); 1415c1b4a7e6SDavid S. Miller tcp_cwnd_validate(sk, tp); 1416c1b4a7e6SDavid S. Miller return; 1417c1b4a7e6SDavid S. Miller } 1418c1b4a7e6SDavid S. Miller } 1419c1b4a7e6SDavid S. Miller } 1420c1b4a7e6SDavid S. Miller 14211da177e4SLinus Torvalds /* This function returns the amount that we can raise the 14221da177e4SLinus Torvalds * usable window based on the following constraints 14231da177e4SLinus Torvalds * 14241da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 14251da177e4SLinus Torvalds * 2. We limit memory per socket 14261da177e4SLinus Torvalds * 14271da177e4SLinus Torvalds * RFC 1122: 14281da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 14291da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 14301da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 14311da177e4SLinus Torvalds * 14321da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 14331da177e4SLinus Torvalds * it at least MSS bytes. 14341da177e4SLinus Torvalds * 14351da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 14361da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 14371da177e4SLinus Torvalds * 14381da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 14391da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 14401da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 14411da177e4SLinus Torvalds * window to always advance by a single byte. 14421da177e4SLinus Torvalds * 14431da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 14441da177e4SLinus Torvalds * then this will not be a problem. 14451da177e4SLinus Torvalds * 14461da177e4SLinus Torvalds * BSD seems to make the following compromise: 14471da177e4SLinus Torvalds * 14481da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 14491da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 14501da177e4SLinus Torvalds * then set the window to 0. 14511da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 14521da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 14531da177e4SLinus Torvalds * and from being larger than the largest representable value. 14541da177e4SLinus Torvalds * 14551da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 14561da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 14571da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 14581da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 14591da177e4SLinus Torvalds * because the pipeline is full. 14601da177e4SLinus Torvalds * 14611da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 14621da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 14631da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 14641da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 14651da177e4SLinus Torvalds * of having a fixed window size at almost all times. 14661da177e4SLinus Torvalds * 14671da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 14681da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 14691da177e4SLinus Torvalds * 14701da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 14711da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 14721da177e4SLinus Torvalds */ 14731da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 14741da177e4SLinus Torvalds { 1475463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 14761da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1477caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 14781da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 14791da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 14801da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 14811da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 14821da177e4SLinus Torvalds */ 1483463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 14841da177e4SLinus Torvalds int free_space = tcp_space(sk); 14851da177e4SLinus Torvalds int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 14861da177e4SLinus Torvalds int window; 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds if (mss > full_space) 14891da177e4SLinus Torvalds mss = full_space; 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds if (free_space < full_space/2) { 1492463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds if (tcp_memory_pressure) 14951da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 14961da177e4SLinus Torvalds 14971da177e4SLinus Torvalds if (free_space < mss) 14981da177e4SLinus Torvalds return 0; 14991da177e4SLinus Torvalds } 15001da177e4SLinus Torvalds 15011da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 15021da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 15031da177e4SLinus Torvalds 15041da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 15051da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 15061da177e4SLinus Torvalds */ 15071da177e4SLinus Torvalds window = tp->rcv_wnd; 15081da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 15091da177e4SLinus Torvalds window = free_space; 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 15121da177e4SLinus Torvalds * Import case: prevent zero window announcement if 15131da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 15141da177e4SLinus Torvalds */ 15151da177e4SLinus Torvalds if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 15161da177e4SLinus Torvalds window = (((window >> tp->rx_opt.rcv_wscale) + 1) 15171da177e4SLinus Torvalds << tp->rx_opt.rcv_wscale); 15181da177e4SLinus Torvalds } else { 15191da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 15201da177e4SLinus Torvalds * Window clamp already applied above. 15211da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 15221da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 15231da177e4SLinus Torvalds * and multiply from happening most of the time. 15241da177e4SLinus Torvalds * We also don't do any window rounding when the free space 15251da177e4SLinus Torvalds * is too small. 15261da177e4SLinus Torvalds */ 15271da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 15281da177e4SLinus Torvalds window = (free_space/mss)*mss; 15291da177e4SLinus Torvalds } 15301da177e4SLinus Torvalds 15311da177e4SLinus Torvalds return window; 15321da177e4SLinus Torvalds } 15331da177e4SLinus Torvalds 15341da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */ 15351da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) 15361da177e4SLinus Torvalds { 15371da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15381da177e4SLinus Torvalds struct sk_buff *next_skb = skb->next; 15391da177e4SLinus Torvalds 15401da177e4SLinus Torvalds /* The first test we must make is that neither of these two 15411da177e4SLinus Torvalds * SKB's are still referenced by someone else. 15421da177e4SLinus Torvalds */ 15431da177e4SLinus Torvalds if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 15441da177e4SLinus Torvalds int skb_size = skb->len, next_skb_size = next_skb->len; 15451da177e4SLinus Torvalds u16 flags = TCP_SKB_CB(skb)->flags; 15461da177e4SLinus Torvalds 15471da177e4SLinus Torvalds /* Also punt if next skb has been SACK'd. */ 15481da177e4SLinus Torvalds if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 15491da177e4SLinus Torvalds return; 15501da177e4SLinus Torvalds 15511da177e4SLinus Torvalds /* Next skb is out of window. */ 15521da177e4SLinus Torvalds if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) 15531da177e4SLinus Torvalds return; 15541da177e4SLinus Torvalds 15551da177e4SLinus Torvalds /* Punt if not enough space exists in the first SKB for 15561da177e4SLinus Torvalds * the data in the second, or the total combined payload 15571da177e4SLinus Torvalds * would exceed the MSS. 15581da177e4SLinus Torvalds */ 15591da177e4SLinus Torvalds if ((next_skb_size > skb_tailroom(skb)) || 15601da177e4SLinus Torvalds ((skb_size + next_skb_size) > mss_now)) 15611da177e4SLinus Torvalds return; 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds BUG_ON(tcp_skb_pcount(skb) != 1 || 15641da177e4SLinus Torvalds tcp_skb_pcount(next_skb) != 1); 15651da177e4SLinus Torvalds 15666a438bbeSStephen Hemminger /* changing transmit queue under us so clear hints */ 15676a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 15686a438bbeSStephen Hemminger 15691da177e4SLinus Torvalds /* Ok. We will be able to collapse the packet. */ 15708728b834SDavid S. Miller __skb_unlink(next_skb, &sk->sk_write_queue); 15711da177e4SLinus Torvalds 15721da177e4SLinus Torvalds memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 15731da177e4SLinus Torvalds 1574*84fa7933SPatrick McHardy skb->ip_summed = next_skb->ip_summed; 15751da177e4SLinus Torvalds 1576*84fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 15771da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 15781da177e4SLinus Torvalds 15791da177e4SLinus Torvalds /* Update sequence range on original skb. */ 15801da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 15811da177e4SLinus Torvalds 15821da177e4SLinus Torvalds /* Merge over control information. */ 15831da177e4SLinus Torvalds flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 15841da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = flags; 15851da177e4SLinus Torvalds 15861da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 15871da177e4SLinus Torvalds * packet counting does not break. 15881da177e4SLinus Torvalds */ 15891da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); 15901da177e4SLinus Torvalds if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 15911da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(next_skb); 15921da177e4SLinus Torvalds if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) { 15931da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(next_skb); 15941da177e4SLinus Torvalds tp->left_out -= tcp_skb_pcount(next_skb); 15951da177e4SLinus Torvalds } 15961da177e4SLinus Torvalds /* Reno case is special. Sigh... */ 15971da177e4SLinus Torvalds if (!tp->rx_opt.sack_ok && tp->sacked_out) { 15981da177e4SLinus Torvalds tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 15991da177e4SLinus Torvalds tp->left_out -= tcp_skb_pcount(next_skb); 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 16021da177e4SLinus Torvalds /* Not quite right: it can be > snd.fack, but 16031da177e4SLinus Torvalds * it is better to underestimate fackets. 16041da177e4SLinus Torvalds */ 16051da177e4SLinus Torvalds tcp_dec_pcount_approx(&tp->fackets_out, next_skb); 16061da177e4SLinus Torvalds tcp_packets_out_dec(tp, next_skb); 16071da177e4SLinus Torvalds sk_stream_free_skb(sk, next_skb); 16081da177e4SLinus Torvalds } 16091da177e4SLinus Torvalds } 16101da177e4SLinus Torvalds 16111da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in 16121da177e4SLinus Torvalds * tcp_timer. This is used for path mtu discovery. 16131da177e4SLinus Torvalds * The socket is already locked here. 16141da177e4SLinus Torvalds */ 16151da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk) 16161da177e4SLinus Torvalds { 16176687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 16181da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 16191da177e4SLinus Torvalds struct sk_buff *skb; 16201da177e4SLinus Torvalds unsigned int mss = tcp_current_mss(sk, 0); 16211da177e4SLinus Torvalds int lost = 0; 16221da177e4SLinus Torvalds 16231da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 16241da177e4SLinus Torvalds if (skb->len > mss && 16251da177e4SLinus Torvalds !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 16261da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 16271da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 16281da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 16291da177e4SLinus Torvalds } 16301da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { 16311da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 16321da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 16331da177e4SLinus Torvalds lost = 1; 16341da177e4SLinus Torvalds } 16351da177e4SLinus Torvalds } 16361da177e4SLinus Torvalds } 16371da177e4SLinus Torvalds 16386a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 16396a438bbeSStephen Hemminger 16401da177e4SLinus Torvalds if (!lost) 16411da177e4SLinus Torvalds return; 16421da177e4SLinus Torvalds 16431da177e4SLinus Torvalds tcp_sync_left_out(tp); 16441da177e4SLinus Torvalds 16451da177e4SLinus Torvalds /* Don't muck with the congestion window here. 16461da177e4SLinus Torvalds * Reason is that we do not increase amount of _data_ 16471da177e4SLinus Torvalds * in network, but units changed and effective 16481da177e4SLinus Torvalds * cwnd/ssthresh really reduced now. 16491da177e4SLinus Torvalds */ 16506687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Loss) { 16511da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 16526687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 16531da177e4SLinus Torvalds tp->prior_ssthresh = 0; 16541da177e4SLinus Torvalds tp->undo_marker = 0; 16556687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 16561da177e4SLinus Torvalds } 16571da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 16581da177e4SLinus Torvalds } 16591da177e4SLinus Torvalds 16601da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 16611da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 16621da177e4SLinus Torvalds * error occurred which prevented the send. 16631da177e4SLinus Torvalds */ 16641da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 16651da177e4SLinus Torvalds { 16661da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 16675d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 16681da177e4SLinus Torvalds unsigned int cur_mss = tcp_current_mss(sk, 0); 16691da177e4SLinus Torvalds int err; 16701da177e4SLinus Torvalds 16715d424d5aSJohn Heffner /* Inconslusive MTU probe */ 16725d424d5aSJohn Heffner if (icsk->icsk_mtup.probe_size) { 16735d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 16745d424d5aSJohn Heffner } 16755d424d5aSJohn Heffner 16761da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 1677caa20d9aSStephen Hemminger * copying overhead: fragmentation, tunneling, mangling etc. 16781da177e4SLinus Torvalds */ 16791da177e4SLinus Torvalds if (atomic_read(&sk->sk_wmem_alloc) > 16801da177e4SLinus Torvalds min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 16811da177e4SLinus Torvalds return -EAGAIN; 16821da177e4SLinus Torvalds 16831da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 16841da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 16851da177e4SLinus Torvalds BUG(); 16861da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 16871da177e4SLinus Torvalds return -ENOMEM; 16881da177e4SLinus Torvalds } 16891da177e4SLinus Torvalds 16901da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 16911da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 16921da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 16931da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 16941da177e4SLinus Torvalds */ 16951da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) 16961da177e4SLinus Torvalds && TCP_SKB_CB(skb)->seq != tp->snd_una) 16971da177e4SLinus Torvalds return -EAGAIN; 16981da177e4SLinus Torvalds 16991da177e4SLinus Torvalds if (skb->len > cur_mss) { 1700846998aeSDavid S. Miller if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 17011da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 17021da177e4SLinus Torvalds } 17031da177e4SLinus Torvalds 17041da177e4SLinus Torvalds /* Collapse two adjacent packets if worthwhile and we can. */ 17051da177e4SLinus Torvalds if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 17061da177e4SLinus Torvalds (skb->len < (cur_mss >> 1)) && 17071da177e4SLinus Torvalds (skb->next != sk->sk_send_head) && 17081da177e4SLinus Torvalds (skb->next != (struct sk_buff *)&sk->sk_write_queue) && 17091da177e4SLinus Torvalds (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && 17101da177e4SLinus Torvalds (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && 17111da177e4SLinus Torvalds (sysctl_tcp_retrans_collapse != 0)) 17121da177e4SLinus Torvalds tcp_retrans_try_collapse(sk, skb, cur_mss); 17131da177e4SLinus Torvalds 17148292a17aSArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 17151da177e4SLinus Torvalds return -EHOSTUNREACH; /* Routing failure or similar. */ 17161da177e4SLinus Torvalds 17171da177e4SLinus Torvalds /* Some Solaris stacks overoptimize and ignore the FIN on a 17181da177e4SLinus Torvalds * retransmit when old data is attached. So strip it off 17191da177e4SLinus Torvalds * since it is cheap to do so and saves bytes on the network. 17201da177e4SLinus Torvalds */ 17211da177e4SLinus Torvalds if(skb->len > 0 && 17221da177e4SLinus Torvalds (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 17231da177e4SLinus Torvalds tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 17241da177e4SLinus Torvalds if (!pskb_trim(skb, 0)) { 17251da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 17267967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 17277967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 17287967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 17291da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_NONE; 17301da177e4SLinus Torvalds skb->csum = 0; 17311da177e4SLinus Torvalds } 17321da177e4SLinus Torvalds } 17331da177e4SLinus Torvalds 17341da177e4SLinus Torvalds /* Make a copy, if the first transmission SKB clone we made 17351da177e4SLinus Torvalds * is still in somebody's hands, else make a clone. 17361da177e4SLinus Torvalds */ 17371da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 17381da177e4SLinus Torvalds 1739dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 17401da177e4SLinus Torvalds 17411da177e4SLinus Torvalds if (err == 0) { 17421da177e4SLinus Torvalds /* Update global TCP statistics. */ 17431da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 17441da177e4SLinus Torvalds 17451da177e4SLinus Torvalds tp->total_retrans++; 17461da177e4SLinus Torvalds 17471da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 17481da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 17491da177e4SLinus Torvalds if (net_ratelimit()) 17501da177e4SLinus Torvalds printk(KERN_DEBUG "retrans_out leaked.\n"); 17511da177e4SLinus Torvalds } 17521da177e4SLinus Torvalds #endif 17531da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 17541da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 17551da177e4SLinus Torvalds 17561da177e4SLinus Torvalds /* Save stamp of the first retransmit. */ 17571da177e4SLinus Torvalds if (!tp->retrans_stamp) 17581da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(skb)->when; 17591da177e4SLinus Torvalds 17601da177e4SLinus Torvalds tp->undo_retrans++; 17611da177e4SLinus Torvalds 17621da177e4SLinus Torvalds /* snd_nxt is stored to detect loss of retransmitted segment, 17631da177e4SLinus Torvalds * see tcp_input.c tcp_sacktag_write_queue(). 17641da177e4SLinus Torvalds */ 17651da177e4SLinus Torvalds TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 17661da177e4SLinus Torvalds } 17671da177e4SLinus Torvalds return err; 17681da177e4SLinus Torvalds } 17691da177e4SLinus Torvalds 17701da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 17711da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 17721da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 17731da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 17741da177e4SLinus Torvalds * If doing SACK, the first ACK which comes back for a timeout 17751da177e4SLinus Torvalds * based retransmit packet might feed us FACK information again. 17761da177e4SLinus Torvalds * If so, we use it to avoid unnecessarily retransmissions. 17771da177e4SLinus Torvalds */ 17781da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 17791da177e4SLinus Torvalds { 17806687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 17811da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 17821da177e4SLinus Torvalds struct sk_buff *skb; 17836a438bbeSStephen Hemminger int packet_cnt; 17846a438bbeSStephen Hemminger 17856a438bbeSStephen Hemminger if (tp->retransmit_skb_hint) { 17866a438bbeSStephen Hemminger skb = tp->retransmit_skb_hint; 17876a438bbeSStephen Hemminger packet_cnt = tp->retransmit_cnt_hint; 17886a438bbeSStephen Hemminger }else{ 17896a438bbeSStephen Hemminger skb = sk->sk_write_queue.next; 17906a438bbeSStephen Hemminger packet_cnt = 0; 17916a438bbeSStephen Hemminger } 17921da177e4SLinus Torvalds 17931da177e4SLinus Torvalds /* First pass: retransmit lost packets. */ 17946a438bbeSStephen Hemminger if (tp->lost_out) { 17956a438bbeSStephen Hemminger sk_stream_for_retrans_queue_from(skb, sk) { 17961da177e4SLinus Torvalds __u8 sacked = TCP_SKB_CB(skb)->sacked; 17971da177e4SLinus Torvalds 17986a438bbeSStephen Hemminger /* we could do better than to assign each time */ 17996a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 18006a438bbeSStephen Hemminger tp->retransmit_cnt_hint = packet_cnt; 18016a438bbeSStephen Hemminger 18021da177e4SLinus Torvalds /* Assume this retransmit will generate 18031da177e4SLinus Torvalds * only one packet for congestion window 18041da177e4SLinus Torvalds * calculation purposes. This works because 18051da177e4SLinus Torvalds * tcp_retransmit_skb() will chop up the 18061da177e4SLinus Torvalds * packet to be MSS sized and all the 18071da177e4SLinus Torvalds * packet counting works out. 18081da177e4SLinus Torvalds */ 18091da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 18101da177e4SLinus Torvalds return; 18111da177e4SLinus Torvalds 18121da177e4SLinus Torvalds if (sacked & TCPCB_LOST) { 18131da177e4SLinus Torvalds if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 18146a438bbeSStephen Hemminger if (tcp_retransmit_skb(sk, skb)) { 18156a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 18161da177e4SLinus Torvalds return; 18176a438bbeSStephen Hemminger } 18186687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Loss) 18191da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 18201da177e4SLinus Torvalds else 18211da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 18221da177e4SLinus Torvalds 18231da177e4SLinus Torvalds if (skb == 18241da177e4SLinus Torvalds skb_peek(&sk->sk_write_queue)) 1825463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 18263f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 18273f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 18281da177e4SLinus Torvalds } 18291da177e4SLinus Torvalds 18306a438bbeSStephen Hemminger packet_cnt += tcp_skb_pcount(skb); 18316a438bbeSStephen Hemminger if (packet_cnt >= tp->lost_out) 18321da177e4SLinus Torvalds break; 18331da177e4SLinus Torvalds } 18341da177e4SLinus Torvalds } 18351da177e4SLinus Torvalds } 18361da177e4SLinus Torvalds 18371da177e4SLinus Torvalds /* OK, demanded retransmission is finished. */ 18381da177e4SLinus Torvalds 18391da177e4SLinus Torvalds /* Forward retransmissions are possible only during Recovery. */ 18406687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Recovery) 18411da177e4SLinus Torvalds return; 18421da177e4SLinus Torvalds 18431da177e4SLinus Torvalds /* No forward retransmissions in Reno are possible. */ 18441da177e4SLinus Torvalds if (!tp->rx_opt.sack_ok) 18451da177e4SLinus Torvalds return; 18461da177e4SLinus Torvalds 18471da177e4SLinus Torvalds /* Yeah, we have to make difficult choice between forward transmission 18481da177e4SLinus Torvalds * and retransmission... Both ways have their merits... 18491da177e4SLinus Torvalds * 18501da177e4SLinus Torvalds * For now we do not retransmit anything, while we have some new 18511da177e4SLinus Torvalds * segments to send. 18521da177e4SLinus Torvalds */ 18531da177e4SLinus Torvalds 18541da177e4SLinus Torvalds if (tcp_may_send_now(sk, tp)) 18551da177e4SLinus Torvalds return; 18561da177e4SLinus Torvalds 18576a438bbeSStephen Hemminger if (tp->forward_skb_hint) { 18586a438bbeSStephen Hemminger skb = tp->forward_skb_hint; 18596a438bbeSStephen Hemminger packet_cnt = tp->forward_cnt_hint; 18606a438bbeSStephen Hemminger } else{ 18616a438bbeSStephen Hemminger skb = sk->sk_write_queue.next; 18621da177e4SLinus Torvalds packet_cnt = 0; 18636a438bbeSStephen Hemminger } 18641da177e4SLinus Torvalds 18656a438bbeSStephen Hemminger sk_stream_for_retrans_queue_from(skb, sk) { 18666a438bbeSStephen Hemminger tp->forward_cnt_hint = packet_cnt; 18676a438bbeSStephen Hemminger tp->forward_skb_hint = skb; 18686a438bbeSStephen Hemminger 18691da177e4SLinus Torvalds /* Similar to the retransmit loop above we 18701da177e4SLinus Torvalds * can pretend that the retransmitted SKB 18711da177e4SLinus Torvalds * we send out here will be composed of one 18721da177e4SLinus Torvalds * real MSS sized packet because tcp_retransmit_skb() 18731da177e4SLinus Torvalds * will fragment it if necessary. 18741da177e4SLinus Torvalds */ 18751da177e4SLinus Torvalds if (++packet_cnt > tp->fackets_out) 18761da177e4SLinus Torvalds break; 18771da177e4SLinus Torvalds 18781da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 18791da177e4SLinus Torvalds break; 18801da177e4SLinus Torvalds 18811da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 18821da177e4SLinus Torvalds continue; 18831da177e4SLinus Torvalds 18841da177e4SLinus Torvalds /* Ok, retransmit it. */ 18856a438bbeSStephen Hemminger if (tcp_retransmit_skb(sk, skb)) { 18866a438bbeSStephen Hemminger tp->forward_skb_hint = NULL; 18871da177e4SLinus Torvalds break; 18886a438bbeSStephen Hemminger } 18891da177e4SLinus Torvalds 18901da177e4SLinus Torvalds if (skb == skb_peek(&sk->sk_write_queue)) 18913f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 18923f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 18933f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 18941da177e4SLinus Torvalds 18951da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 18961da177e4SLinus Torvalds } 18971da177e4SLinus Torvalds } 18981da177e4SLinus Torvalds 18991da177e4SLinus Torvalds 19001da177e4SLinus Torvalds /* Send a fin. The caller locks the socket for us. This cannot be 19011da177e4SLinus Torvalds * allowed to fail queueing a FIN frame under any circumstances. 19021da177e4SLinus Torvalds */ 19031da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 19041da177e4SLinus Torvalds { 19051da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 19061da177e4SLinus Torvalds struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); 19071da177e4SLinus Torvalds int mss_now; 19081da177e4SLinus Torvalds 19091da177e4SLinus Torvalds /* Optimization, tack on the FIN if we have a queue of 19101da177e4SLinus Torvalds * unsent frames. But be careful about outgoing SACKS 19111da177e4SLinus Torvalds * and IP options. 19121da177e4SLinus Torvalds */ 19131da177e4SLinus Torvalds mss_now = tcp_current_mss(sk, 1); 19141da177e4SLinus Torvalds 19151da177e4SLinus Torvalds if (sk->sk_send_head != NULL) { 19161da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 19171da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq++; 19181da177e4SLinus Torvalds tp->write_seq++; 19191da177e4SLinus Torvalds } else { 19201da177e4SLinus Torvalds /* Socket is locked, keep trying until memory is available. */ 19211da177e4SLinus Torvalds for (;;) { 1922d179cd12SDavid S. Miller skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 19231da177e4SLinus Torvalds if (skb) 19241da177e4SLinus Torvalds break; 19251da177e4SLinus Torvalds yield(); 19261da177e4SLinus Torvalds } 19271da177e4SLinus Torvalds 19281da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 19291da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 19301da177e4SLinus Torvalds skb->csum = 0; 19311da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 19321da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 19337967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 19347967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 19357967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 19361da177e4SLinus Torvalds 19371da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 19381da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = tp->write_seq; 19391da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 19401da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 19411da177e4SLinus Torvalds } 19421da177e4SLinus Torvalds __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); 19431da177e4SLinus Torvalds } 19441da177e4SLinus Torvalds 19451da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 19461da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 19471da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 19481da177e4SLinus Torvalds * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 19491da177e4SLinus Torvalds */ 1950dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 19511da177e4SLinus Torvalds { 19521da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 19531da177e4SLinus Torvalds struct sk_buff *skb; 19541da177e4SLinus Torvalds 19551da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 19561da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 19571da177e4SLinus Torvalds if (!skb) { 19581da177e4SLinus Torvalds NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 19591da177e4SLinus Torvalds return; 19601da177e4SLinus Torvalds } 19611da177e4SLinus Torvalds 19621da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 19631da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 19641da177e4SLinus Torvalds skb->csum = 0; 19651da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 19661da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 19677967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 19687967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 19697967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 19701da177e4SLinus Torvalds 19711da177e4SLinus Torvalds /* Send it off. */ 19721da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 19731da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 19741da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 1975dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 19761da177e4SLinus Torvalds NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 19771da177e4SLinus Torvalds } 19781da177e4SLinus Torvalds 19791da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent 19801da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 19811da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 19821da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 19831da177e4SLinus Torvalds */ 19841da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 19851da177e4SLinus Torvalds { 19861da177e4SLinus Torvalds struct sk_buff* skb; 19871da177e4SLinus Torvalds 19881da177e4SLinus Torvalds skb = skb_peek(&sk->sk_write_queue); 19891da177e4SLinus Torvalds if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { 19901da177e4SLinus Torvalds printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 19911da177e4SLinus Torvalds return -EFAULT; 19921da177e4SLinus Torvalds } 19931da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { 19941da177e4SLinus Torvalds if (skb_cloned(skb)) { 19951da177e4SLinus Torvalds struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 19961da177e4SLinus Torvalds if (nskb == NULL) 19971da177e4SLinus Torvalds return -ENOMEM; 19981da177e4SLinus Torvalds __skb_unlink(skb, &sk->sk_write_queue); 19991da177e4SLinus Torvalds skb_header_release(nskb); 20001da177e4SLinus Torvalds __skb_queue_head(&sk->sk_write_queue, nskb); 20011da177e4SLinus Torvalds sk_stream_free_skb(sk, skb); 20021da177e4SLinus Torvalds sk_charge_skb(sk, nskb); 20031da177e4SLinus Torvalds skb = nskb; 20041da177e4SLinus Torvalds } 20051da177e4SLinus Torvalds 20061da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 20071da177e4SLinus Torvalds TCP_ECN_send_synack(tcp_sk(sk), skb); 20081da177e4SLinus Torvalds } 20091da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2010dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 20111da177e4SLinus Torvalds } 20121da177e4SLinus Torvalds 20131da177e4SLinus Torvalds /* 20141da177e4SLinus Torvalds * Prepare a SYN-ACK. 20151da177e4SLinus Torvalds */ 20161da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 201760236fddSArnaldo Carvalho de Melo struct request_sock *req) 20181da177e4SLinus Torvalds { 20192e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 20201da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 20211da177e4SLinus Torvalds struct tcphdr *th; 20221da177e4SLinus Torvalds int tcp_header_size; 20231da177e4SLinus Torvalds struct sk_buff *skb; 20241da177e4SLinus Torvalds 20251da177e4SLinus Torvalds skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 20261da177e4SLinus Torvalds if (skb == NULL) 20271da177e4SLinus Torvalds return NULL; 20281da177e4SLinus Torvalds 20291da177e4SLinus Torvalds /* Reserve space for headers. */ 20301da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 20311da177e4SLinus Torvalds 20321da177e4SLinus Torvalds skb->dst = dst_clone(dst); 20331da177e4SLinus Torvalds 20341da177e4SLinus Torvalds tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + 20352e6599cbSArnaldo Carvalho de Melo (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + 20362e6599cbSArnaldo Carvalho de Melo (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 20371da177e4SLinus Torvalds /* SACK_PERM is in the place of NOP NOP of TS */ 20382e6599cbSArnaldo Carvalho de Melo ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 20391da177e4SLinus Torvalds skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); 20401da177e4SLinus Torvalds 20411da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 20421da177e4SLinus Torvalds th->syn = 1; 20431da177e4SLinus Torvalds th->ack = 1; 20441da177e4SLinus Torvalds TCP_ECN_make_synack(req, th); 20451da177e4SLinus Torvalds th->source = inet_sk(sk)->sport; 20462e6599cbSArnaldo Carvalho de Melo th->dest = ireq->rmt_port; 20472e6599cbSArnaldo Carvalho de Melo TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 20481da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 20491da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 20507967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 20517967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 20527967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 20531da177e4SLinus Torvalds th->seq = htonl(TCP_SKB_CB(skb)->seq); 20542e6599cbSArnaldo Carvalho de Melo th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 20551da177e4SLinus Torvalds if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 20561da177e4SLinus Torvalds __u8 rcv_wscale; 20571da177e4SLinus Torvalds /* Set this up on the first call only */ 20581da177e4SLinus Torvalds req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 20591da177e4SLinus Torvalds /* tcp_full_space because it is guaranteed to be the first packet */ 20601da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 20612e6599cbSArnaldo Carvalho de Melo dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 20621da177e4SLinus Torvalds &req->rcv_wnd, 20631da177e4SLinus Torvalds &req->window_clamp, 20642e6599cbSArnaldo Carvalho de Melo ireq->wscale_ok, 20651da177e4SLinus Torvalds &rcv_wscale); 20662e6599cbSArnaldo Carvalho de Melo ireq->rcv_wscale = rcv_wscale; 20671da177e4SLinus Torvalds } 20681da177e4SLinus Torvalds 20691da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 20701da177e4SLinus Torvalds th->window = htons(req->rcv_wnd); 20711da177e4SLinus Torvalds 20721da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 20732e6599cbSArnaldo Carvalho de Melo tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 20742e6599cbSArnaldo Carvalho de Melo ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 20751da177e4SLinus Torvalds TCP_SKB_CB(skb)->when, 20761da177e4SLinus Torvalds req->ts_recent); 20771da177e4SLinus Torvalds 20781da177e4SLinus Torvalds skb->csum = 0; 20791da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 20801da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_OUTSEGS); 20811da177e4SLinus Torvalds return skb; 20821da177e4SLinus Torvalds } 20831da177e4SLinus Torvalds 20841da177e4SLinus Torvalds /* 20851da177e4SLinus Torvalds * Do all connect socket setups that can be done AF independent. 20861da177e4SLinus Torvalds */ 208740efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk) 20881da177e4SLinus Torvalds { 20891da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 20901da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 20911da177e4SLinus Torvalds __u8 rcv_wscale; 20921da177e4SLinus Torvalds 20931da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 20941da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 20951da177e4SLinus Torvalds */ 20961da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr) + 20971da177e4SLinus Torvalds (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 20981da177e4SLinus Torvalds 20991da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 21001da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 21011da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 21021da177e4SLinus Torvalds tp->max_window = 0; 21035d424d5aSJohn Heffner tcp_mtup_init(sk); 21041da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 21051da177e4SLinus Torvalds 21061da177e4SLinus Torvalds if (!tp->window_clamp) 21071da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 21081da177e4SLinus Torvalds tp->advmss = dst_metric(dst, RTAX_ADVMSS); 21091da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 21101da177e4SLinus Torvalds 21111da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 21121da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 21131da177e4SLinus Torvalds &tp->rcv_wnd, 21141da177e4SLinus Torvalds &tp->window_clamp, 21151da177e4SLinus Torvalds sysctl_tcp_window_scaling, 21161da177e4SLinus Torvalds &rcv_wscale); 21171da177e4SLinus Torvalds 21181da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 21191da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 21201da177e4SLinus Torvalds 21211da177e4SLinus Torvalds sk->sk_err = 0; 21221da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 21231da177e4SLinus Torvalds tp->snd_wnd = 0; 21241da177e4SLinus Torvalds tcp_init_wl(tp, tp->write_seq, 0); 21251da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 21261da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 21271da177e4SLinus Torvalds tp->rcv_nxt = 0; 21281da177e4SLinus Torvalds tp->rcv_wup = 0; 21291da177e4SLinus Torvalds tp->copied_seq = 0; 21301da177e4SLinus Torvalds 2131463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2132463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 21331da177e4SLinus Torvalds tcp_clear_retrans(tp); 21341da177e4SLinus Torvalds } 21351da177e4SLinus Torvalds 21361da177e4SLinus Torvalds /* 21371da177e4SLinus Torvalds * Build a SYN and send it off. 21381da177e4SLinus Torvalds */ 21391da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 21401da177e4SLinus Torvalds { 21411da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 21421da177e4SLinus Torvalds struct sk_buff *buff; 21431da177e4SLinus Torvalds 21441da177e4SLinus Torvalds tcp_connect_init(sk); 21451da177e4SLinus Torvalds 2146d179cd12SDavid S. Miller buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 21471da177e4SLinus Torvalds if (unlikely(buff == NULL)) 21481da177e4SLinus Torvalds return -ENOBUFS; 21491da177e4SLinus Torvalds 21501da177e4SLinus Torvalds /* Reserve space for headers. */ 21511da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 21521da177e4SLinus Torvalds 21531da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 21541da177e4SLinus Torvalds TCP_ECN_send_syn(sk, tp, buff); 21551da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 0; 21567967168cSHerbert Xu skb_shinfo(buff)->gso_segs = 1; 21577967168cSHerbert Xu skb_shinfo(buff)->gso_size = 0; 21587967168cSHerbert Xu skb_shinfo(buff)->gso_type = 0; 21591da177e4SLinus Torvalds buff->csum = 0; 2160bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 21611da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = tp->write_seq++; 21621da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = tp->write_seq; 21631da177e4SLinus Torvalds 21641da177e4SLinus Torvalds /* Send it off. */ 21651da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 21661da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(buff)->when; 21671da177e4SLinus Torvalds skb_header_release(buff); 21681da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_write_queue, buff); 21691da177e4SLinus Torvalds sk_charge_skb(sk, buff); 21701da177e4SLinus Torvalds tp->packets_out += tcp_skb_pcount(buff); 2171dfb4b9dcSDavid S. Miller tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2172bd37a088SWei Yongjun 2173bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 2174bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 2175bd37a088SWei Yongjun */ 2176bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 2177bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 21781da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 21791da177e4SLinus Torvalds 21801da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 21813f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 21823f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 21831da177e4SLinus Torvalds return 0; 21841da177e4SLinus Torvalds } 21851da177e4SLinus Torvalds 21861da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 21871da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 21881da177e4SLinus Torvalds * for details. 21891da177e4SLinus Torvalds */ 21901da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 21911da177e4SLinus Torvalds { 2192463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 2193463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 21941da177e4SLinus Torvalds unsigned long timeout; 21951da177e4SLinus Torvalds 21961da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 2197463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 21981da177e4SLinus Torvalds int max_ato = HZ/2; 21991da177e4SLinus Torvalds 2200463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 22011da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 22021da177e4SLinus Torvalds 22031da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 22041da177e4SLinus Torvalds 22051da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 2206463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 22071da177e4SLinus Torvalds * directly. 22081da177e4SLinus Torvalds */ 22091da177e4SLinus Torvalds if (tp->srtt) { 22101da177e4SLinus Torvalds int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); 22111da177e4SLinus Torvalds 22121da177e4SLinus Torvalds if (rtt < max_ato) 22131da177e4SLinus Torvalds max_ato = rtt; 22141da177e4SLinus Torvalds } 22151da177e4SLinus Torvalds 22161da177e4SLinus Torvalds ato = min(ato, max_ato); 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds 22191da177e4SLinus Torvalds /* Stay within the limit we were given */ 22201da177e4SLinus Torvalds timeout = jiffies + ato; 22211da177e4SLinus Torvalds 22221da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 2223463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 22241da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 22251da177e4SLinus Torvalds * send ACK now. 22261da177e4SLinus Torvalds */ 2227463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 2228463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 22291da177e4SLinus Torvalds tcp_send_ack(sk); 22301da177e4SLinus Torvalds return; 22311da177e4SLinus Torvalds } 22321da177e4SLinus Torvalds 2233463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 2234463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 22351da177e4SLinus Torvalds } 2236463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2237463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 2238463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 22391da177e4SLinus Torvalds } 22401da177e4SLinus Torvalds 22411da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 22421da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk) 22431da177e4SLinus Torvalds { 22441da177e4SLinus Torvalds /* If we have been reset, we may not send again. */ 22451da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 22461da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 22471da177e4SLinus Torvalds struct sk_buff *buff; 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds /* We are not putting this on the write queue, so 22501da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 22511da177e4SLinus Torvalds * sock. 22521da177e4SLinus Torvalds */ 22531da177e4SLinus Torvalds buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 22541da177e4SLinus Torvalds if (buff == NULL) { 2255463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 2256463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 22573f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 22583f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 22591da177e4SLinus Torvalds return; 22601da177e4SLinus Torvalds } 22611da177e4SLinus Torvalds 22621da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 22631da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 22641da177e4SLinus Torvalds buff->csum = 0; 22651da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 22661da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 0; 22677967168cSHerbert Xu skb_shinfo(buff)->gso_segs = 1; 22687967168cSHerbert Xu skb_shinfo(buff)->gso_size = 0; 22697967168cSHerbert Xu skb_shinfo(buff)->gso_type = 0; 22701da177e4SLinus Torvalds 22711da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 22721da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 22731da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 2274dfb4b9dcSDavid S. Miller tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 22751da177e4SLinus Torvalds } 22761da177e4SLinus Torvalds } 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 22791da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 22801da177e4SLinus Torvalds * 22811da177e4SLinus Torvalds * Question: what should we make while urgent mode? 22821da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 22831da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 22841da177e4SLinus Torvalds * 22851da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 22861da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 22871da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 22881da177e4SLinus Torvalds */ 22891da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 22901da177e4SLinus Torvalds { 22911da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 22921da177e4SLinus Torvalds struct sk_buff *skb; 22931da177e4SLinus Torvalds 22941da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 22951da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 22961da177e4SLinus Torvalds if (skb == NULL) 22971da177e4SLinus Torvalds return -1; 22981da177e4SLinus Torvalds 22991da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 23001da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 23011da177e4SLinus Torvalds skb->csum = 0; 23021da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 23031da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = urgent; 23047967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 23057967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 23067967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 23071da177e4SLinus Torvalds 23081da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 23091da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 23101da177e4SLinus Torvalds * send it. 23111da177e4SLinus Torvalds */ 23121da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; 23131da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 23141da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2315dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 23161da177e4SLinus Torvalds } 23171da177e4SLinus Torvalds 23181da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk) 23191da177e4SLinus Torvalds { 23201da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 23211da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23221da177e4SLinus Torvalds struct sk_buff *skb; 23231da177e4SLinus Torvalds 23241da177e4SLinus Torvalds if ((skb = sk->sk_send_head) != NULL && 23251da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { 23261da177e4SLinus Torvalds int err; 23271da177e4SLinus Torvalds unsigned int mss = tcp_current_mss(sk, 0); 23281da177e4SLinus Torvalds unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; 23291da177e4SLinus Torvalds 23301da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 23311da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 23321da177e4SLinus Torvalds 23331da177e4SLinus Torvalds /* We are probing the opening of a window 23341da177e4SLinus Torvalds * but the window size is != 0 23351da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 23361da177e4SLinus Torvalds */ 23371da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 23381da177e4SLinus Torvalds skb->len > mss) { 23391da177e4SLinus Torvalds seg_size = min(seg_size, mss); 23401da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2341846998aeSDavid S. Miller if (tcp_fragment(sk, skb, seg_size, mss)) 23421da177e4SLinus Torvalds return -1; 23431da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 2344846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss); 23451da177e4SLinus Torvalds 23461da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 23471da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2348dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 23491da177e4SLinus Torvalds if (!err) { 23501da177e4SLinus Torvalds update_send_head(sk, tp, skb); 23511da177e4SLinus Torvalds } 23521da177e4SLinus Torvalds return err; 23531da177e4SLinus Torvalds } else { 23541da177e4SLinus Torvalds if (tp->urg_mode && 23551da177e4SLinus Torvalds between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 23561da177e4SLinus Torvalds tcp_xmit_probe_skb(sk, TCPCB_URG); 23571da177e4SLinus Torvalds return tcp_xmit_probe_skb(sk, 0); 23581da177e4SLinus Torvalds } 23591da177e4SLinus Torvalds } 23601da177e4SLinus Torvalds return -1; 23611da177e4SLinus Torvalds } 23621da177e4SLinus Torvalds 23631da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 23641da177e4SLinus Torvalds * a partial packet else a zero probe. 23651da177e4SLinus Torvalds */ 23661da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 23671da177e4SLinus Torvalds { 2368463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 23691da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23701da177e4SLinus Torvalds int err; 23711da177e4SLinus Torvalds 23721da177e4SLinus Torvalds err = tcp_write_wakeup(sk); 23731da177e4SLinus Torvalds 23741da177e4SLinus Torvalds if (tp->packets_out || !sk->sk_send_head) { 23751da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 23766687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 2377463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 23781da177e4SLinus Torvalds return; 23791da177e4SLinus Torvalds } 23801da177e4SLinus Torvalds 23811da177e4SLinus Torvalds if (err <= 0) { 2382463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_backoff < sysctl_tcp_retries2) 2383463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 23846687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out++; 2385463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 23863f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 23873f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 23881da177e4SLinus Torvalds } else { 23891da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 23906687e988SArnaldo Carvalho de Melo * do not backoff and do not remember icsk_probes_out. 23911da177e4SLinus Torvalds * Let local senders to fight for local resources. 23921da177e4SLinus Torvalds * 23931da177e4SLinus Torvalds * Use accumulated backoff yet. 23941da177e4SLinus Torvalds */ 23956687e988SArnaldo Carvalho de Melo if (!icsk->icsk_probes_out) 23966687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 1; 2397463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2398463c84b9SArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, 23993f421baaSArnaldo Carvalho de Melo TCP_RESOURCE_PROBE_INTERVAL), 24003f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 24011da177e4SLinus Torvalds } 24021da177e4SLinus Torvalds } 24031da177e4SLinus Torvalds 24041da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect); 24051da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack); 24061da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit); 24071da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss); 2408f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor); 24095d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init); 2410