11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 91da177e4SLinus Torvalds * 1002c30a84SJesper Juhl * Authors: Ross Biro 111da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 121da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 131da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 141da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 151da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 161da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 171da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 181da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 191da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 201da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 251da177e4SLinus Torvalds * : Fragmentation on mtu decrease 261da177e4SLinus Torvalds * : Segment collapse on retransmit 271da177e4SLinus Torvalds * : AF independence 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 301da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 311da177e4SLinus Torvalds * during syn/ack processing. 321da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 331da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 341da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 351da177e4SLinus Torvalds * J Hadi Salim : ECN support 361da177e4SLinus Torvalds * 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds #include <net/tcp.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/compiler.h> 421da177e4SLinus Torvalds #include <linux/module.h> 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */ 45ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1; 461da177e4SLinus Torvalds 4715d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that 4815d99e02SRick Jones * interpret the window field as a signed quantity. 4915d99e02SRick Jones */ 50ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 5115d99e02SRick Jones 521da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we 531da177e4SLinus Torvalds * will allow a single TSO frame to consume. Building TSO frames 541da177e4SLinus Torvalds * which are too large can cause TCP streams to be bursty. 551da177e4SLinus Torvalds */ 56ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3; 571da177e4SLinus Torvalds 58ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0; 59ab32ea5dSBrian Haley int sysctl_tcp_base_mss __read_mostly = 512; 605d424d5aSJohn Heffner 6135089bb2SDavid S. Miller /* By default, RFC2861 behavior. */ 62ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 6335089bb2SDavid S. Miller 646ff03ac3SIlpo Järvinen static inline void tcp_packets_out_inc(struct sock *sk, 656ff03ac3SIlpo Järvinen const struct sk_buff *skb) 666ff03ac3SIlpo Järvinen { 676ff03ac3SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 686ff03ac3SIlpo Järvinen int orig = tp->packets_out; 696ff03ac3SIlpo Järvinen 706ff03ac3SIlpo Järvinen tp->packets_out += tcp_skb_pcount(skb); 716ff03ac3SIlpo Järvinen if (!orig) 726ff03ac3SIlpo Järvinen inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 736ff03ac3SIlpo Järvinen inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 746ff03ac3SIlpo Järvinen } 756ff03ac3SIlpo Järvinen 769e412ba7SIlpo Järvinen static void update_send_head(struct sock *sk, struct sk_buff *skb) 771da177e4SLinus Torvalds { 789e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 799e412ba7SIlpo Järvinen 80fe067e8aSDavid S. Miller tcp_advance_send_head(sk, skb); 811da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 829e412ba7SIlpo Järvinen tcp_packets_out_inc(sk, skb); 83*8512430eSIlpo Järvinen 84*8512430eSIlpo Järvinen /* Don't override Nagle indefinately with F-RTO */ 85*8512430eSIlpo Järvinen if (tp->frto_counter == 2) 86*8512430eSIlpo Järvinen tp->frto_counter = 3; 871da177e4SLinus Torvalds } 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk. 901da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 911da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 921da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 931da177e4SLinus Torvalds * invalid. OK, let's make this for now: 941da177e4SLinus Torvalds */ 959e412ba7SIlpo Järvinen static inline __u32 tcp_acceptable_seq(struct sock *sk) 961da177e4SLinus Torvalds { 979e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 989e412ba7SIlpo Järvinen 991da177e4SLinus Torvalds if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 1001da177e4SLinus Torvalds return tp->snd_nxt; 1011da177e4SLinus Torvalds else 1021da177e4SLinus Torvalds return tp->snd_una+tp->snd_wnd; 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds 1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 1061da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 1071da177e4SLinus Torvalds * 1081da177e4SLinus Torvalds * 1. It is independent of path mtu. 1091da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 1101da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 1111da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 1121da177e4SLinus Torvalds * large MSS. 1131da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 1141da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 1151da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1161da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1171da177e4SLinus Torvalds * probably even Jumbo". 1181da177e4SLinus Torvalds */ 1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1201da177e4SLinus Torvalds { 1211da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1221da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 1231da177e4SLinus Torvalds int mss = tp->advmss; 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 1261da177e4SLinus Torvalds mss = dst_metric(dst, RTAX_ADVMSS); 1271da177e4SLinus Torvalds tp->advmss = mss; 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds return (__u16)mss; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1341da177e4SLinus Torvalds * This is the first part of cwnd validation mechanism. */ 135463c84b9SArnaldo Carvalho de Melo static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 1361da177e4SLinus Torvalds { 137463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1381da177e4SLinus Torvalds s32 delta = tcp_time_stamp - tp->lsndtime; 1391da177e4SLinus Torvalds u32 restart_cwnd = tcp_init_cwnd(tp, dst); 1401da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1411da177e4SLinus Torvalds 1426687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1431da177e4SLinus Torvalds 1446687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1451da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1461da177e4SLinus Torvalds 147463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1481da177e4SLinus Torvalds cwnd >>= 1; 1491da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 1501da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 1511da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1521da177e4SLinus Torvalds } 1531da177e4SLinus Torvalds 15440efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 1551da177e4SLinus Torvalds struct sk_buff *skb, struct sock *sk) 1561da177e4SLinus Torvalds { 157463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 158463c84b9SArnaldo Carvalho de Melo const u32 now = tcp_time_stamp; 1591da177e4SLinus Torvalds 16035089bb2SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 16135089bb2SDavid S. Miller (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 162463c84b9SArnaldo Carvalho de Melo tcp_cwnd_restart(sk, __sk_dst_get(sk)); 1631da177e4SLinus Torvalds 1641da177e4SLinus Torvalds tp->lsndtime = now; 1651da177e4SLinus Torvalds 1661da177e4SLinus Torvalds /* If it is a reply for ato after last received 1671da177e4SLinus Torvalds * packet, enter pingpong mode. 1681da177e4SLinus Torvalds */ 169463c84b9SArnaldo Carvalho de Melo if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 170463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 1; 1711da177e4SLinus Torvalds } 1721da177e4SLinus Torvalds 17340efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 1741da177e4SLinus Torvalds { 175463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 176463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 1801da177e4SLinus Torvalds * Based on the assumption that the given amount of space 1811da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 1821da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 1831da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 1841da177e4SLinus Torvalds * This MUST be enforced by all callers. 1851da177e4SLinus Torvalds */ 1861da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss, 1871da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 1881da177e4SLinus Torvalds int wscale_ok, __u8 *rcv_wscale) 1891da177e4SLinus Torvalds { 1901da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 1911da177e4SLinus Torvalds 1921da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 1931da177e4SLinus Torvalds if (*window_clamp == 0) 1941da177e4SLinus Torvalds (*window_clamp) = (65535 << 14); 1951da177e4SLinus Torvalds space = min(*window_clamp, space); 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 1981da177e4SLinus Torvalds if (space > mss) 1991da177e4SLinus Torvalds space = (space / mss) * mss; 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 20215d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 20315d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 20415d99e02SRick Jones * we will truncate our initial window offering to 32K-1 20515d99e02SRick Jones * unless the remote has sent us a window scaling option, 20615d99e02SRick Jones * which we interpret as a sign the remote TCP is not 20715d99e02SRick Jones * misinterpreting the window field as a signed quantity. 2081da177e4SLinus Torvalds */ 20915d99e02SRick Jones if (sysctl_tcp_workaround_signed_windows) 2101da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 21115d99e02SRick Jones else 21215d99e02SRick Jones (*rcv_wnd) = space; 21315d99e02SRick Jones 2141da177e4SLinus Torvalds (*rcv_wscale) = 0; 2151da177e4SLinus Torvalds if (wscale_ok) { 2161da177e4SLinus Torvalds /* Set window scaling on max possible window 2171da177e4SLinus Torvalds * See RFC1323 for an explanation of the limit to 14 2181da177e4SLinus Torvalds */ 2191da177e4SLinus Torvalds space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 220316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 2211da177e4SLinus Torvalds while (space > 65535 && (*rcv_wscale) < 14) { 2221da177e4SLinus Torvalds space >>= 1; 2231da177e4SLinus Torvalds (*rcv_wscale)++; 2241da177e4SLinus Torvalds } 2251da177e4SLinus Torvalds } 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds /* Set initial window to value enough for senders, 2286b251858SDavid S. Miller * following RFC2414. Senders, not following this RFC, 2291da177e4SLinus Torvalds * will be satisfied with 2. 2301da177e4SLinus Torvalds */ 2311da177e4SLinus Torvalds if (mss > (1<<*rcv_wscale)) { 23201ff367eSDavid S. Miller int init_cwnd = 4; 23301ff367eSDavid S. Miller if (mss > 1460*3) 2341da177e4SLinus Torvalds init_cwnd = 2; 23501ff367eSDavid S. Miller else if (mss > 1460) 23601ff367eSDavid S. Miller init_cwnd = 3; 2371da177e4SLinus Torvalds if (*rcv_wnd > init_cwnd*mss) 2381da177e4SLinus Torvalds *rcv_wnd = init_cwnd*mss; 2391da177e4SLinus Torvalds } 2401da177e4SLinus Torvalds 2411da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 2421da177e4SLinus Torvalds (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2461da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2471da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2481da177e4SLinus Torvalds * frame. 2491da177e4SLinus Torvalds */ 25040efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2511da177e4SLinus Torvalds { 2521da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2531da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2541da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2551da177e4SLinus Torvalds 2561da177e4SLinus Torvalds /* Never shrink the offered window */ 2571da177e4SLinus Torvalds if (new_win < cur_win) { 2581da177e4SLinus Torvalds /* Danger Will Robinson! 2591da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2601da177e4SLinus Torvalds * we will not be able to advertise a zero 2611da177e4SLinus Torvalds * window in time. --DaveM 2621da177e4SLinus Torvalds * 2631da177e4SLinus Torvalds * Relax Will Robinson. 2641da177e4SLinus Torvalds */ 2651da177e4SLinus Torvalds new_win = cur_win; 2661da177e4SLinus Torvalds } 2671da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2681da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2711da177e4SLinus Torvalds * scaled window. 2721da177e4SLinus Torvalds */ 27315d99e02SRick Jones if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 2741da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2751da177e4SLinus Torvalds else 2761da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2771da177e4SLinus Torvalds 2781da177e4SLinus Torvalds /* RFC1323 scaling applied */ 2791da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 2801da177e4SLinus Torvalds 2811da177e4SLinus Torvalds /* If we advertise zero window, disable fast path. */ 2821da177e4SLinus Torvalds if (new_win == 0) 2831da177e4SLinus Torvalds tp->pred_flags = 0; 2841da177e4SLinus Torvalds 2851da177e4SLinus Torvalds return new_win; 2861da177e4SLinus Torvalds } 2871da177e4SLinus Torvalds 288bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_synack(struct tcp_sock *tp, 289bdf1ee5dSIlpo Järvinen struct sk_buff *skb) 290bdf1ee5dSIlpo Järvinen { 291bdf1ee5dSIlpo Järvinen TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; 292bdf1ee5dSIlpo Järvinen if (!(tp->ecn_flags&TCP_ECN_OK)) 293bdf1ee5dSIlpo Järvinen TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; 294bdf1ee5dSIlpo Järvinen } 295bdf1ee5dSIlpo Järvinen 296bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 297bdf1ee5dSIlpo Järvinen { 298bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 299bdf1ee5dSIlpo Järvinen 300bdf1ee5dSIlpo Järvinen tp->ecn_flags = 0; 301bdf1ee5dSIlpo Järvinen if (sysctl_tcp_ecn) { 302bdf1ee5dSIlpo Järvinen TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; 303bdf1ee5dSIlpo Järvinen tp->ecn_flags = TCP_ECN_OK; 304bdf1ee5dSIlpo Järvinen } 305bdf1ee5dSIlpo Järvinen } 306bdf1ee5dSIlpo Järvinen 307bdf1ee5dSIlpo Järvinen static __inline__ void 308bdf1ee5dSIlpo Järvinen TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 309bdf1ee5dSIlpo Järvinen { 310bdf1ee5dSIlpo Järvinen if (inet_rsk(req)->ecn_ok) 311bdf1ee5dSIlpo Järvinen th->ece = 1; 312bdf1ee5dSIlpo Järvinen } 313bdf1ee5dSIlpo Järvinen 314bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 315bdf1ee5dSIlpo Järvinen int tcp_header_len) 316bdf1ee5dSIlpo Järvinen { 317bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 318bdf1ee5dSIlpo Järvinen 319bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 320bdf1ee5dSIlpo Järvinen /* Not-retransmitted data segment: set ECT and inject CWR. */ 321bdf1ee5dSIlpo Järvinen if (skb->len != tcp_header_len && 322bdf1ee5dSIlpo Järvinen !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 323bdf1ee5dSIlpo Järvinen INET_ECN_xmit(sk); 324bdf1ee5dSIlpo Järvinen if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { 325bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 326bdf1ee5dSIlpo Järvinen tcp_hdr(skb)->cwr = 1; 327bdf1ee5dSIlpo Järvinen skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 328bdf1ee5dSIlpo Järvinen } 329bdf1ee5dSIlpo Järvinen } else { 330bdf1ee5dSIlpo Järvinen /* ACK or retransmitted segment: clear ECT|CE */ 331bdf1ee5dSIlpo Järvinen INET_ECN_dontxmit(sk); 332bdf1ee5dSIlpo Järvinen } 333bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 334bdf1ee5dSIlpo Järvinen tcp_hdr(skb)->ece = 1; 335bdf1ee5dSIlpo Järvinen } 336bdf1ee5dSIlpo Järvinen } 337bdf1ee5dSIlpo Järvinen 338df7a3b07SAl Viro static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, 339cfb6eeb4SYOSHIFUJI Hideaki __u32 tstamp, __u8 **md5_hash) 34040efc6faSStephen Hemminger { 34140efc6faSStephen Hemminger if (tp->rx_opt.tstamp_ok) { 342496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 34340efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 34440efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 34540efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 34640efc6faSStephen Hemminger *ptr++ = htonl(tstamp); 34740efc6faSStephen Hemminger *ptr++ = htonl(tp->rx_opt.ts_recent); 34840efc6faSStephen Hemminger } 34940efc6faSStephen Hemminger if (tp->rx_opt.eff_sacks) { 35040efc6faSStephen Hemminger struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; 35140efc6faSStephen Hemminger int this_sack; 35240efc6faSStephen Hemminger 35340efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 35440efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 35540efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 35640efc6faSStephen Hemminger (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * 35740efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 3582de979bdSStephen Hemminger 35940efc6faSStephen Hemminger for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { 36040efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 36140efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 36240efc6faSStephen Hemminger } 3632de979bdSStephen Hemminger 36440efc6faSStephen Hemminger if (tp->rx_opt.dsack) { 36540efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 36640efc6faSStephen Hemminger tp->rx_opt.eff_sacks--; 36740efc6faSStephen Hemminger } 36840efc6faSStephen Hemminger } 369cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 370cfb6eeb4SYOSHIFUJI Hideaki if (md5_hash) { 371cfb6eeb4SYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 372cfb6eeb4SYOSHIFUJI Hideaki (TCPOPT_NOP << 16) | 373cfb6eeb4SYOSHIFUJI Hideaki (TCPOPT_MD5SIG << 8) | 374cfb6eeb4SYOSHIFUJI Hideaki TCPOLEN_MD5SIG); 375cfb6eeb4SYOSHIFUJI Hideaki *md5_hash = (__u8 *)ptr; 376cfb6eeb4SYOSHIFUJI Hideaki } 377cfb6eeb4SYOSHIFUJI Hideaki #endif 37840efc6faSStephen Hemminger } 37940efc6faSStephen Hemminger 38040efc6faSStephen Hemminger /* Construct a tcp options header for a SYN or SYN_ACK packet. 38140efc6faSStephen Hemminger * If this is every changed make sure to change the definition of 38240efc6faSStephen Hemminger * MAX_SYN_SIZE to match the new maximum number of options that you 38340efc6faSStephen Hemminger * can generate. 384cfb6eeb4SYOSHIFUJI Hideaki * 385cfb6eeb4SYOSHIFUJI Hideaki * Note - that with the RFC2385 TCP option, we make room for the 386cfb6eeb4SYOSHIFUJI Hideaki * 16 byte MD5 hash. This will be filled in later, so the pointer for the 387cfb6eeb4SYOSHIFUJI Hideaki * location to be filled is passed back up. 38840efc6faSStephen Hemminger */ 389df7a3b07SAl Viro static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack, 39040efc6faSStephen Hemminger int offer_wscale, int wscale, __u32 tstamp, 391cfb6eeb4SYOSHIFUJI Hideaki __u32 ts_recent, __u8 **md5_hash) 39240efc6faSStephen Hemminger { 39340efc6faSStephen Hemminger /* We always get an MSS option. 39440efc6faSStephen Hemminger * The option bytes which will be seen in normal data 39540efc6faSStephen Hemminger * packets should timestamps be used, must be in the MSS 39640efc6faSStephen Hemminger * advertised. But we subtract them from tp->mss_cache so 39740efc6faSStephen Hemminger * that calculations in tcp_sendmsg are simpler etc. 39840efc6faSStephen Hemminger * So account for this fact here if necessary. If we 39940efc6faSStephen Hemminger * don't do this correctly, as a receiver we won't 40040efc6faSStephen Hemminger * recognize data packets as being full sized when we 40140efc6faSStephen Hemminger * should, and thus we won't abide by the delayed ACK 40240efc6faSStephen Hemminger * rules correctly. 40340efc6faSStephen Hemminger * SACKs don't matter, we never delay an ACK when we 40440efc6faSStephen Hemminger * have any of those going out. 40540efc6faSStephen Hemminger */ 40640efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 40740efc6faSStephen Hemminger if (ts) { 40840efc6faSStephen Hemminger if (sack) 409496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 410496c98dfSYOSHIFUJI Hideaki (TCPOLEN_SACK_PERM << 16) | 411496c98dfSYOSHIFUJI Hideaki (TCPOPT_TIMESTAMP << 8) | 412496c98dfSYOSHIFUJI Hideaki TCPOLEN_TIMESTAMP); 41340efc6faSStephen Hemminger else 414496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 415496c98dfSYOSHIFUJI Hideaki (TCPOPT_NOP << 16) | 416496c98dfSYOSHIFUJI Hideaki (TCPOPT_TIMESTAMP << 8) | 417496c98dfSYOSHIFUJI Hideaki TCPOLEN_TIMESTAMP); 41840efc6faSStephen Hemminger *ptr++ = htonl(tstamp); /* TSVAL */ 41940efc6faSStephen Hemminger *ptr++ = htonl(ts_recent); /* TSECR */ 42040efc6faSStephen Hemminger } else if (sack) 421496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 422496c98dfSYOSHIFUJI Hideaki (TCPOPT_NOP << 16) | 423496c98dfSYOSHIFUJI Hideaki (TCPOPT_SACK_PERM << 8) | 424496c98dfSYOSHIFUJI Hideaki TCPOLEN_SACK_PERM); 42540efc6faSStephen Hemminger if (offer_wscale) 426496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 427496c98dfSYOSHIFUJI Hideaki (TCPOPT_WINDOW << 16) | 428496c98dfSYOSHIFUJI Hideaki (TCPOLEN_WINDOW << 8) | 429496c98dfSYOSHIFUJI Hideaki (wscale)); 430cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 431cfb6eeb4SYOSHIFUJI Hideaki /* 432cfb6eeb4SYOSHIFUJI Hideaki * If MD5 is enabled, then we set the option, and include the size 433cfb6eeb4SYOSHIFUJI Hideaki * (always 18). The actual MD5 hash is added just before the 434cfb6eeb4SYOSHIFUJI Hideaki * packet is sent. 435cfb6eeb4SYOSHIFUJI Hideaki */ 436cfb6eeb4SYOSHIFUJI Hideaki if (md5_hash) { 437cfb6eeb4SYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 438cfb6eeb4SYOSHIFUJI Hideaki (TCPOPT_NOP << 16) | 439cfb6eeb4SYOSHIFUJI Hideaki (TCPOPT_MD5SIG << 8) | 440cfb6eeb4SYOSHIFUJI Hideaki TCPOLEN_MD5SIG); 441cfb6eeb4SYOSHIFUJI Hideaki *md5_hash = (__u8 *) ptr; 442cfb6eeb4SYOSHIFUJI Hideaki } 443cfb6eeb4SYOSHIFUJI Hideaki #endif 44440efc6faSStephen Hemminger } 4451da177e4SLinus Torvalds 4461da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 4471da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 4481da177e4SLinus Torvalds * transmission and possible later retransmissions. 4491da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 4501da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 4511da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 4521da177e4SLinus Torvalds * device. 4531da177e4SLinus Torvalds * 4541da177e4SLinus Torvalds * We are working here with either a clone of the original 4551da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 4561da177e4SLinus Torvalds */ 457dfb4b9dcSDavid S. Miller static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) 4581da177e4SLinus Torvalds { 4596687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 460dfb4b9dcSDavid S. Miller struct inet_sock *inet; 461dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 462dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 463dfb4b9dcSDavid S. Miller int tcp_header_size; 464cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 465cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 466cfb6eeb4SYOSHIFUJI Hideaki __u8 *md5_hash_location; 467cfb6eeb4SYOSHIFUJI Hideaki #endif 4681da177e4SLinus Torvalds struct tcphdr *th; 4691da177e4SLinus Torvalds int sysctl_flags; 4701da177e4SLinus Torvalds int err; 4711da177e4SLinus Torvalds 472dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 473dfb4b9dcSDavid S. Miller 474dfb4b9dcSDavid S. Miller /* If congestion control is doing timestamping, we must 475dfb4b9dcSDavid S. Miller * take such a timestamp before we potentially clone/copy. 476dfb4b9dcSDavid S. Miller */ 477164891aaSStephen Hemminger if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 478dfb4b9dcSDavid S. Miller __net_timestamp(skb); 479dfb4b9dcSDavid S. Miller 480dfb4b9dcSDavid S. Miller if (likely(clone_it)) { 481dfb4b9dcSDavid S. Miller if (unlikely(skb_cloned(skb))) 482dfb4b9dcSDavid S. Miller skb = pskb_copy(skb, gfp_mask); 483dfb4b9dcSDavid S. Miller else 484dfb4b9dcSDavid S. Miller skb = skb_clone(skb, gfp_mask); 485dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 486dfb4b9dcSDavid S. Miller return -ENOBUFS; 487dfb4b9dcSDavid S. Miller } 488dfb4b9dcSDavid S. Miller 489dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 490dfb4b9dcSDavid S. Miller tp = tcp_sk(sk); 491dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 492dfb4b9dcSDavid S. Miller tcp_header_size = tp->tcp_header_len; 4931da177e4SLinus Torvalds 4941da177e4SLinus Torvalds #define SYSCTL_FLAG_TSTAMPS 0x1 4951da177e4SLinus Torvalds #define SYSCTL_FLAG_WSCALE 0x2 4961da177e4SLinus Torvalds #define SYSCTL_FLAG_SACK 0x4 4971da177e4SLinus Torvalds 4981da177e4SLinus Torvalds sysctl_flags = 0; 499dfb4b9dcSDavid S. Miller if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 5001da177e4SLinus Torvalds tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 5011da177e4SLinus Torvalds if (sysctl_tcp_timestamps) { 5021da177e4SLinus Torvalds tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 5031da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 5041da177e4SLinus Torvalds } 5051da177e4SLinus Torvalds if (sysctl_tcp_window_scaling) { 5061da177e4SLinus Torvalds tcp_header_size += TCPOLEN_WSCALE_ALIGNED; 5071da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_WSCALE; 5081da177e4SLinus Torvalds } 5091da177e4SLinus Torvalds if (sysctl_tcp_sack) { 5101da177e4SLinus Torvalds sysctl_flags |= SYSCTL_FLAG_SACK; 5111da177e4SLinus Torvalds if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) 5121da177e4SLinus Torvalds tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; 5131da177e4SLinus Torvalds } 514dfb4b9dcSDavid S. Miller } else if (unlikely(tp->rx_opt.eff_sacks)) { 5151da177e4SLinus Torvalds /* A SACK is 2 pad bytes, a 2 byte header, plus 5161da177e4SLinus Torvalds * 2 32-bit sequence numbers for each SACK block. 5171da177e4SLinus Torvalds */ 5181da177e4SLinus Torvalds tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + 519dfb4b9dcSDavid S. Miller (tp->rx_opt.eff_sacks * 520dfb4b9dcSDavid S. Miller TCPOLEN_SACK_PERBLOCK)); 5211da177e4SLinus Torvalds } 5221da177e4SLinus Torvalds 523317a76f9SStephen Hemminger if (tcp_packets_in_flight(tp) == 0) 5246687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_TX_START); 5251da177e4SLinus Torvalds 526cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 527cfb6eeb4SYOSHIFUJI Hideaki /* 528cfb6eeb4SYOSHIFUJI Hideaki * Are we doing MD5 on this segment? If so - make 529cfb6eeb4SYOSHIFUJI Hideaki * room for it. 530cfb6eeb4SYOSHIFUJI Hideaki */ 531cfb6eeb4SYOSHIFUJI Hideaki md5 = tp->af_specific->md5_lookup(sk, sk); 532cfb6eeb4SYOSHIFUJI Hideaki if (md5) 533cfb6eeb4SYOSHIFUJI Hideaki tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; 534cfb6eeb4SYOSHIFUJI Hideaki #endif 535cfb6eeb4SYOSHIFUJI Hideaki 536aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 537aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 538e89862f4SDavid S. Miller skb_set_owner_w(skb, sk); 5391da177e4SLinus Torvalds 5401da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 541aa8223c7SArnaldo Carvalho de Melo th = tcp_hdr(skb); 5421da177e4SLinus Torvalds th->source = inet->sport; 5431da177e4SLinus Torvalds th->dest = inet->dport; 5441da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 5451da177e4SLinus Torvalds th->ack_seq = htonl(tp->rcv_nxt); 546df7a3b07SAl Viro *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 547dfb4b9dcSDavid S. Miller tcb->flags); 548dfb4b9dcSDavid S. Miller 549dfb4b9dcSDavid S. Miller if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 5501da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments 5511da177e4SLinus Torvalds * is never scaled. 5521da177e4SLinus Torvalds */ 553600ff0c2SIlpo Järvinen th->window = htons(min(tp->rcv_wnd, 65535U)); 5541da177e4SLinus Torvalds } else { 5551da177e4SLinus Torvalds th->window = htons(tcp_select_window(sk)); 5561da177e4SLinus Torvalds } 5571da177e4SLinus Torvalds th->check = 0; 5581da177e4SLinus Torvalds th->urg_ptr = 0; 5591da177e4SLinus Torvalds 560dfb4b9dcSDavid S. Miller if (unlikely(tp->urg_mode && 561dfb4b9dcSDavid S. Miller between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) { 5621da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up-tcb->seq); 5631da177e4SLinus Torvalds th->urg = 1; 5641da177e4SLinus Torvalds } 5651da177e4SLinus Torvalds 566dfb4b9dcSDavid S. Miller if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 567df7a3b07SAl Viro tcp_syn_build_options((__be32 *)(th + 1), 5681da177e4SLinus Torvalds tcp_advertise_mss(sk), 5691da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 5701da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_SACK), 5711da177e4SLinus Torvalds (sysctl_flags & SYSCTL_FLAG_WSCALE), 5721da177e4SLinus Torvalds tp->rx_opt.rcv_wscale, 5731da177e4SLinus Torvalds tcb->when, 574cfb6eeb4SYOSHIFUJI Hideaki tp->rx_opt.ts_recent, 575cfb6eeb4SYOSHIFUJI Hideaki 576cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 577cfb6eeb4SYOSHIFUJI Hideaki md5 ? &md5_hash_location : 578cfb6eeb4SYOSHIFUJI Hideaki #endif 579cfb6eeb4SYOSHIFUJI Hideaki NULL); 5801da177e4SLinus Torvalds } else { 581df7a3b07SAl Viro tcp_build_and_update_options((__be32 *)(th + 1), 582cfb6eeb4SYOSHIFUJI Hideaki tp, tcb->when, 583cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 584cfb6eeb4SYOSHIFUJI Hideaki md5 ? &md5_hash_location : 585cfb6eeb4SYOSHIFUJI Hideaki #endif 586cfb6eeb4SYOSHIFUJI Hideaki NULL); 5879e412ba7SIlpo Järvinen TCP_ECN_send(sk, skb, tcp_header_size); 5881da177e4SLinus Torvalds } 589dfb4b9dcSDavid S. Miller 590cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 591cfb6eeb4SYOSHIFUJI Hideaki /* Calculate the MD5 hash, as we have all we need now */ 592cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 593cfb6eeb4SYOSHIFUJI Hideaki tp->af_specific->calc_md5_hash(md5_hash_location, 594cfb6eeb4SYOSHIFUJI Hideaki md5, 595cfb6eeb4SYOSHIFUJI Hideaki sk, NULL, NULL, 596aa8223c7SArnaldo Carvalho de Melo tcp_hdr(skb), 597cfb6eeb4SYOSHIFUJI Hideaki sk->sk_protocol, 598cfb6eeb4SYOSHIFUJI Hideaki skb->len); 599cfb6eeb4SYOSHIFUJI Hideaki } 600cfb6eeb4SYOSHIFUJI Hideaki #endif 601cfb6eeb4SYOSHIFUJI Hideaki 6028292a17aSArnaldo Carvalho de Melo icsk->icsk_af_ops->send_check(sk, skb->len, skb); 6031da177e4SLinus Torvalds 604dfb4b9dcSDavid S. Miller if (likely(tcb->flags & TCPCB_FLAG_ACK)) 605fc6415bcSDavid S. Miller tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 6061da177e4SLinus Torvalds 6071da177e4SLinus Torvalds if (skb->len != tcp_header_size) 6081da177e4SLinus Torvalds tcp_event_data_sent(tp, skb, sk); 6091da177e4SLinus Torvalds 610bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 6111da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_OUTSEGS); 6121da177e4SLinus Torvalds 613e89862f4SDavid S. Miller err = icsk->icsk_af_ops->queue_xmit(skb, 0); 61483de47cdSHua Zhong if (likely(err <= 0)) 6151da177e4SLinus Torvalds return err; 6161da177e4SLinus Torvalds 6173cfe3baaSIlpo Järvinen tcp_enter_cwr(sk, 1); 6181da177e4SLinus Torvalds 619b9df3cb8SGerrit Renker return net_xmit_eval(err); 620dfb4b9dcSDavid S. Miller 6211da177e4SLinus Torvalds #undef SYSCTL_FLAG_TSTAMPS 6221da177e4SLinus Torvalds #undef SYSCTL_FLAG_WSCALE 6231da177e4SLinus Torvalds #undef SYSCTL_FLAG_SACK 6241da177e4SLinus Torvalds } 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds /* This routine just queue's the buffer 6281da177e4SLinus Torvalds * 6291da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 6301da177e4SLinus Torvalds * otherwise socket can stall. 6311da177e4SLinus Torvalds */ 6321da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 6331da177e4SLinus Torvalds { 6341da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6351da177e4SLinus Torvalds 6361da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 6371da177e4SLinus Torvalds tp->write_seq = TCP_SKB_CB(skb)->end_seq; 6381da177e4SLinus Torvalds skb_header_release(skb); 639fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 6401da177e4SLinus Torvalds sk_charge_skb(sk, skb); 6411da177e4SLinus Torvalds } 6421da177e4SLinus Torvalds 643846998aeSDavid S. Miller static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 644f6302d1dSDavid S. Miller { 645bcd76111SHerbert Xu if (skb->len <= mss_now || !sk_can_gso(sk)) { 646f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 647f6302d1dSDavid S. Miller * non-TSO case. 648f6302d1dSDavid S. Miller */ 6497967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 6507967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 6517967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 652f6302d1dSDavid S. Miller } else { 653356f89e1SIlpo Järvinen skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 6547967168cSHerbert Xu skb_shinfo(skb)->gso_size = mss_now; 655bcd76111SHerbert Xu skb_shinfo(skb)->gso_type = sk->sk_gso_type; 6561da177e4SLinus Torvalds } 6571da177e4SLinus Torvalds } 6581da177e4SLinus Torvalds 65991fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check 66068f8353bSIlpo Järvinen * skb is counted to fackets_out or not. 66191fed7a1SIlpo Järvinen */ 662a47e5a98SIlpo Järvinen static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 66391fed7a1SIlpo Järvinen int decr) 66491fed7a1SIlpo Järvinen { 665a47e5a98SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 666a47e5a98SIlpo Järvinen 667dc86967bSIlpo Järvinen if (!tp->sacked_out || tcp_is_reno(tp)) 66891fed7a1SIlpo Järvinen return; 66991fed7a1SIlpo Järvinen 670a47e5a98SIlpo Järvinen if (!before(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 67191fed7a1SIlpo Järvinen tp->fackets_out -= decr; 67291fed7a1SIlpo Järvinen } 67391fed7a1SIlpo Järvinen 6741da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 6751da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 6761da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 6771da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 6781da177e4SLinus Torvalds */ 6796475be16SDavid S. Miller int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 6801da177e4SLinus Torvalds { 6811da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6821da177e4SLinus Torvalds struct sk_buff *buff; 6836475be16SDavid S. Miller int nsize, old_factor; 684b60b49eaSHerbert Xu int nlen; 6851da177e4SLinus Torvalds u16 flags; 6861da177e4SLinus Torvalds 687b2cc99f0SHerbert Xu BUG_ON(len > skb->len); 6886a438bbeSStephen Hemminger 689b7689205SIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 6901da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 6911da177e4SLinus Torvalds if (nsize < 0) 6921da177e4SLinus Torvalds nsize = 0; 6931da177e4SLinus Torvalds 6941da177e4SLinus Torvalds if (skb_cloned(skb) && 6951da177e4SLinus Torvalds skb_is_nonlinear(skb) && 6961da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 6971da177e4SLinus Torvalds return -ENOMEM; 6981da177e4SLinus Torvalds 6991da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 7001da177e4SLinus Torvalds buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 7011da177e4SLinus Torvalds if (buff == NULL) 7021da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 703ef5cb973SHerbert Xu 704b60b49eaSHerbert Xu sk_charge_skb(sk, buff); 705b60b49eaSHerbert Xu nlen = skb->len - len - nsize; 706b60b49eaSHerbert Xu buff->truesize += nlen; 707b60b49eaSHerbert Xu skb->truesize -= nlen; 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /* Correct the sequence numbers. */ 7101da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 7111da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 7121da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 7131da177e4SLinus Torvalds 714a47e5a98SIlpo Järvinen if (tcp_is_sack(tp) && tp->sacked_out && (skb == tp->highest_sack)) 715a47e5a98SIlpo Järvinen tp->highest_sack = buff; 7160dde7b54SIlpo Järvinen 7171da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 7181da177e4SLinus Torvalds flags = TCP_SKB_CB(skb)->flags; 7191da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 7201da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = flags; 721e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 7221da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; 7231da177e4SLinus Torvalds 72484fa7933SPatrick McHardy if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 7251da177e4SLinus Torvalds /* Copy and checksum data tail into the new buffer. */ 7261da177e4SLinus Torvalds buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), 7271da177e4SLinus Torvalds nsize, 0); 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds skb_trim(skb, len); 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds skb->csum = csum_block_sub(skb->csum, buff->csum, len); 7321da177e4SLinus Torvalds } else { 73384fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 7341da177e4SLinus Torvalds skb_split(skb, buff, len); 7351da177e4SLinus Torvalds } 7361da177e4SLinus Torvalds 7371da177e4SLinus Torvalds buff->ip_summed = skb->ip_summed; 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds /* Looks stupid, but our code really uses when of 7401da177e4SLinus Torvalds * skbs, which it never sent before. --ANK 7411da177e4SLinus Torvalds */ 7421da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 743a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 7441da177e4SLinus Torvalds 7456475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 7466475be16SDavid S. Miller 7471da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 748846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 749846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 7501da177e4SLinus Torvalds 7516475be16SDavid S. Miller /* If this packet has been sent out already, we must 7526475be16SDavid S. Miller * adjust the various packet counters. 7536475be16SDavid S. Miller */ 754cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 7556475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 7566475be16SDavid S. Miller tcp_skb_pcount(buff); 7571da177e4SLinus Torvalds 7586475be16SDavid S. Miller tp->packets_out -= diff; 759e14c3cafSHerbert Xu 760e14c3cafSHerbert Xu if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 761e14c3cafSHerbert Xu tp->sacked_out -= diff; 762e14c3cafSHerbert Xu if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 763e14c3cafSHerbert Xu tp->retrans_out -= diff; 764e14c3cafSHerbert Xu 765b5860bbaSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 7666475be16SDavid S. Miller tp->lost_out -= diff; 76783ca28beSHerbert Xu 76883ca28beSHerbert Xu /* Adjust Reno SACK estimate. */ 76991fed7a1SIlpo Järvinen if (tcp_is_reno(tp) && diff > 0) { 770af610b4cSIlpo Järvinen tcp_dec_pcount_approx_int(&tp->sacked_out, diff); 771005903bcSIlpo Järvinen tcp_verify_left_out(tp); 77283ca28beSHerbert Xu } 773a47e5a98SIlpo Järvinen tcp_adjust_fackets_out(sk, skb, diff); 7741da177e4SLinus Torvalds } 7751da177e4SLinus Torvalds 7761da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 777f44b5271SDavid S. Miller skb_header_release(buff); 778fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 7791da177e4SLinus Torvalds 7801da177e4SLinus Torvalds return 0; 7811da177e4SLinus Torvalds } 7821da177e4SLinus Torvalds 7831da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 7841da177e4SLinus Torvalds * eventually). The difference is that pulled data not copied, but 7851da177e4SLinus Torvalds * immediately discarded. 7861da177e4SLinus Torvalds */ 787f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len) 7881da177e4SLinus Torvalds { 7891da177e4SLinus Torvalds int i, k, eat; 7901da177e4SLinus Torvalds 7911da177e4SLinus Torvalds eat = len; 7921da177e4SLinus Torvalds k = 0; 7931da177e4SLinus Torvalds for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { 7941da177e4SLinus Torvalds if (skb_shinfo(skb)->frags[i].size <= eat) { 7951da177e4SLinus Torvalds put_page(skb_shinfo(skb)->frags[i].page); 7961da177e4SLinus Torvalds eat -= skb_shinfo(skb)->frags[i].size; 7971da177e4SLinus Torvalds } else { 7981da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 7991da177e4SLinus Torvalds if (eat) { 8001da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 8011da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].size -= eat; 8021da177e4SLinus Torvalds eat = 0; 8031da177e4SLinus Torvalds } 8041da177e4SLinus Torvalds k++; 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds } 8071da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 8081da177e4SLinus Torvalds 80927a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 8101da177e4SLinus Torvalds skb->data_len -= len; 8111da177e4SLinus Torvalds skb->len = skb->data_len; 8121da177e4SLinus Torvalds } 8131da177e4SLinus Torvalds 8141da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 8151da177e4SLinus Torvalds { 8161da177e4SLinus Torvalds if (skb_cloned(skb) && 8171da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 8181da177e4SLinus Torvalds return -ENOMEM; 8191da177e4SLinus Torvalds 820f2911969SHerbert Xu ~{PmVHI~} /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 821f2911969SHerbert Xu ~{PmVHI~} if (unlikely(len < skb_headlen(skb))) 8221da177e4SLinus Torvalds __skb_pull(skb, len); 823f2911969SHerbert Xu ~{PmVHI~} else 824f2911969SHerbert Xu ~{PmVHI~} __pskb_trim_head(skb, len - skb_headlen(skb)); 8251da177e4SLinus Torvalds 8261da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 82784fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 8281da177e4SLinus Torvalds 8291da177e4SLinus Torvalds skb->truesize -= len; 8301da177e4SLinus Torvalds sk->sk_wmem_queued -= len; 8311da177e4SLinus Torvalds sk->sk_forward_alloc += len; 8321da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 8331da177e4SLinus Torvalds 8341da177e4SLinus Torvalds /* Any change of skb->len requires recalculation of tso 8351da177e4SLinus Torvalds * factor and mss. 8361da177e4SLinus Torvalds */ 8371da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 838846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds return 0; 8411da177e4SLinus Torvalds } 8421da177e4SLinus Torvalds 8435d424d5aSJohn Heffner /* Not accounting for SACKs here. */ 8445d424d5aSJohn Heffner int tcp_mtu_to_mss(struct sock *sk, int pmtu) 8455d424d5aSJohn Heffner { 8465d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 8475d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 8485d424d5aSJohn Heffner int mss_now; 8495d424d5aSJohn Heffner 8505d424d5aSJohn Heffner /* Calculate base mss without TCP options: 8515d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 8525d424d5aSJohn Heffner */ 8535d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 8545d424d5aSJohn Heffner 8555d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 8565d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 8575d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 8585d424d5aSJohn Heffner 8595d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 8605d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 8615d424d5aSJohn Heffner 8625d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 8635d424d5aSJohn Heffner if (mss_now < 48) 8645d424d5aSJohn Heffner mss_now = 48; 8655d424d5aSJohn Heffner 8665d424d5aSJohn Heffner /* Now subtract TCP options size, not including SACKs */ 8675d424d5aSJohn Heffner mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 8685d424d5aSJohn Heffner 8695d424d5aSJohn Heffner return mss_now; 8705d424d5aSJohn Heffner } 8715d424d5aSJohn Heffner 8725d424d5aSJohn Heffner /* Inverse of above */ 8735d424d5aSJohn Heffner int tcp_mss_to_mtu(struct sock *sk, int mss) 8745d424d5aSJohn Heffner { 8755d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 8765d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 8775d424d5aSJohn Heffner int mtu; 8785d424d5aSJohn Heffner 8795d424d5aSJohn Heffner mtu = mss + 8805d424d5aSJohn Heffner tp->tcp_header_len + 8815d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 8825d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 8835d424d5aSJohn Heffner 8845d424d5aSJohn Heffner return mtu; 8855d424d5aSJohn Heffner } 8865d424d5aSJohn Heffner 8875d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 8885d424d5aSJohn Heffner { 8895d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 8905d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 8915d424d5aSJohn Heffner 8925d424d5aSJohn Heffner icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 8935d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 8945d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 8955d424d5aSJohn Heffner icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 8965d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 8975d424d5aSJohn Heffner } 8985d424d5aSJohn Heffner 8991da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 9001da177e4SLinus Torvalds 9011da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 9021da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 9031da177e4SLinus Torvalds 9041da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 905caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 9061da177e4SLinus Torvalds It also does not include TCP options. 9071da177e4SLinus Torvalds 908d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 9091da177e4SLinus Torvalds 9101da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 9111da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 9121da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 9131da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 9141da177e4SLinus Torvalds 9151da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 9161da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 9171da177e4SLinus Torvalds 918d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 919d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 9201da177e4SLinus Torvalds */ 9211da177e4SLinus Torvalds 9221da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 9231da177e4SLinus Torvalds { 9241da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 925d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 9265d424d5aSJohn Heffner int mss_now; 9271da177e4SLinus Torvalds 9285d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 9295d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 9301da177e4SLinus Torvalds 9315d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 9321da177e4SLinus Torvalds 9331da177e4SLinus Torvalds /* Bound mss with half of window */ 9341da177e4SLinus Torvalds if (tp->max_window && mss_now > (tp->max_window>>1)) 9351da177e4SLinus Torvalds mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); 9361da177e4SLinus Torvalds 9371da177e4SLinus Torvalds /* And store cached results */ 938d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 9395d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 9405d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 941c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 9421da177e4SLinus Torvalds 9431da177e4SLinus Torvalds return mss_now; 9441da177e4SLinus Torvalds } 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 9471da177e4SLinus Torvalds * and even PMTU discovery events into account. 9481da177e4SLinus Torvalds * 9491da177e4SLinus Torvalds * LARGESEND note: !urg_mode is overkill, only frames up to snd_up 9501da177e4SLinus Torvalds * cannot be large. However, taking into account rare use of URG, this 9511da177e4SLinus Torvalds * is not a big flaw. 9521da177e4SLinus Torvalds */ 953c1b4a7e6SDavid S. Miller unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 9541da177e4SLinus Torvalds { 9551da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 9561da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 957c1b4a7e6SDavid S. Miller u32 mss_now; 958c1b4a7e6SDavid S. Miller u16 xmit_size_goal; 959c1b4a7e6SDavid S. Miller int doing_tso = 0; 9601da177e4SLinus Torvalds 961c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 962c1b4a7e6SDavid S. Miller 963bcd76111SHerbert Xu if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) 964c1b4a7e6SDavid S. Miller doing_tso = 1; 965c1b4a7e6SDavid S. Miller 9661da177e4SLinus Torvalds if (dst) { 9671da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 968d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 9691da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 9701da177e4SLinus Torvalds } 9711da177e4SLinus Torvalds 9721da177e4SLinus Torvalds if (tp->rx_opt.eff_sacks) 9731da177e4SLinus Torvalds mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 9741da177e4SLinus Torvalds (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 975c1b4a7e6SDavid S. Miller 976cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 977cfb6eeb4SYOSHIFUJI Hideaki if (tp->af_specific->md5_lookup(sk, sk)) 978cfb6eeb4SYOSHIFUJI Hideaki mss_now -= TCPOLEN_MD5SIG_ALIGNED; 979cfb6eeb4SYOSHIFUJI Hideaki #endif 980cfb6eeb4SYOSHIFUJI Hideaki 981c1b4a7e6SDavid S. Miller xmit_size_goal = mss_now; 982c1b4a7e6SDavid S. Miller 983c1b4a7e6SDavid S. Miller if (doing_tso) { 9848292a17aSArnaldo Carvalho de Melo xmit_size_goal = (65535 - 9858292a17aSArnaldo Carvalho de Melo inet_csk(sk)->icsk_af_ops->net_header_len - 986d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ext_hdr_len - 987d83d8461SArnaldo Carvalho de Melo tp->tcp_header_len); 988c1b4a7e6SDavid S. Miller 989c1b4a7e6SDavid S. Miller if (tp->max_window && 990c1b4a7e6SDavid S. Miller (xmit_size_goal > (tp->max_window >> 1))) 991c1b4a7e6SDavid S. Miller xmit_size_goal = max((tp->max_window >> 1), 992c1b4a7e6SDavid S. Miller 68U - tp->tcp_header_len); 993c1b4a7e6SDavid S. Miller 994c1b4a7e6SDavid S. Miller xmit_size_goal -= (xmit_size_goal % mss_now); 995c1b4a7e6SDavid S. Miller } 996c1b4a7e6SDavid S. Miller tp->xmit_size_goal = xmit_size_goal; 997c1b4a7e6SDavid S. Miller 9981da177e4SLinus Torvalds return mss_now; 9991da177e4SLinus Torvalds } 10001da177e4SLinus Torvalds 1001a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */ 1002a762a980SDavid S. Miller 10039e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk) 1004a762a980SDavid S. Miller { 10059e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1006a762a980SDavid S. Miller __u32 packets_out = tp->packets_out; 1007a762a980SDavid S. Miller 1008a762a980SDavid S. Miller if (packets_out >= tp->snd_cwnd) { 1009a762a980SDavid S. Miller /* Network is feed fully. */ 1010a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 1011a762a980SDavid S. Miller tp->snd_cwnd_stamp = tcp_time_stamp; 1012a762a980SDavid S. Miller } else { 1013a762a980SDavid S. Miller /* Network starves. */ 1014a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 1015a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 1016a762a980SDavid S. Miller 101715d33c07SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 101815d33c07SDavid S. Miller (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1019a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 1020a762a980SDavid S. Miller } 1021a762a980SDavid S. Miller } 1022a762a980SDavid S. Miller 1023c1b4a7e6SDavid S. Miller static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 1024c1b4a7e6SDavid S. Miller { 1025c1b4a7e6SDavid S. Miller u32 window, cwnd_len; 1026c1b4a7e6SDavid S. Miller 1027c1b4a7e6SDavid S. Miller window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 1028c1b4a7e6SDavid S. Miller cwnd_len = mss_now * cwnd; 1029c1b4a7e6SDavid S. Miller return min(window, cwnd_len); 1030c1b4a7e6SDavid S. Miller } 1031c1b4a7e6SDavid S. Miller 1032c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 1033c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 1034c1b4a7e6SDavid S. Miller */ 1035c1b4a7e6SDavid S. Miller static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 1036c1b4a7e6SDavid S. Miller { 1037c1b4a7e6SDavid S. Miller u32 in_flight, cwnd; 1038c1b4a7e6SDavid S. Miller 1039c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 1040104439a8SJohn Heffner if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1041104439a8SJohn Heffner tcp_skb_pcount(skb) == 1) 1042c1b4a7e6SDavid S. Miller return 1; 1043c1b4a7e6SDavid S. Miller 1044c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1045c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 1046c1b4a7e6SDavid S. Miller if (in_flight < cwnd) 1047c1b4a7e6SDavid S. Miller return (cwnd - in_flight); 1048c1b4a7e6SDavid S. Miller 1049c1b4a7e6SDavid S. Miller return 0; 1050c1b4a7e6SDavid S. Miller } 1051c1b4a7e6SDavid S. Miller 1052c1b4a7e6SDavid S. Miller /* This must be invoked the first time we consider transmitting 1053c1b4a7e6SDavid S. Miller * SKB onto the wire. 1054c1b4a7e6SDavid S. Miller */ 105540efc6faSStephen Hemminger static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 1056c1b4a7e6SDavid S. Miller { 1057c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 1058c1b4a7e6SDavid S. Miller 1059846998aeSDavid S. Miller if (!tso_segs || 1060846998aeSDavid S. Miller (tso_segs > 1 && 10617967168cSHerbert Xu tcp_skb_mss(skb) != mss_now)) { 1062846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1063c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 1064c1b4a7e6SDavid S. Miller } 1065c1b4a7e6SDavid S. Miller return tso_segs; 1066c1b4a7e6SDavid S. Miller } 1067c1b4a7e6SDavid S. Miller 1068c1b4a7e6SDavid S. Miller static inline int tcp_minshall_check(const struct tcp_sock *tp) 1069c1b4a7e6SDavid S. Miller { 1070c1b4a7e6SDavid S. Miller return after(tp->snd_sml,tp->snd_una) && 1071c1b4a7e6SDavid S. Miller !after(tp->snd_sml, tp->snd_nxt); 1072c1b4a7e6SDavid S. Miller } 1073c1b4a7e6SDavid S. Miller 1074c1b4a7e6SDavid S. Miller /* Return 0, if packet can be sent now without violation Nagle's rules: 1075c1b4a7e6SDavid S. Miller * 1. It is full sized. 1076c1b4a7e6SDavid S. Miller * 2. Or it contains FIN. (already checked by caller) 1077c1b4a7e6SDavid S. Miller * 3. Or TCP_NODELAY was set. 1078c1b4a7e6SDavid S. Miller * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1079c1b4a7e6SDavid S. Miller * With Minshall's modification: all sent small packets are ACKed. 1080c1b4a7e6SDavid S. Miller */ 1081c1b4a7e6SDavid S. Miller 1082c1b4a7e6SDavid S. Miller static inline int tcp_nagle_check(const struct tcp_sock *tp, 1083c1b4a7e6SDavid S. Miller const struct sk_buff *skb, 1084c1b4a7e6SDavid S. Miller unsigned mss_now, int nonagle) 1085c1b4a7e6SDavid S. Miller { 1086c1b4a7e6SDavid S. Miller return (skb->len < mss_now && 1087c1b4a7e6SDavid S. Miller ((nonagle&TCP_NAGLE_CORK) || 1088c1b4a7e6SDavid S. Miller (!nonagle && 1089c1b4a7e6SDavid S. Miller tp->packets_out && 1090c1b4a7e6SDavid S. Miller tcp_minshall_check(tp)))); 1091c1b4a7e6SDavid S. Miller } 1092c1b4a7e6SDavid S. Miller 1093c1b4a7e6SDavid S. Miller /* Return non-zero if the Nagle test allows this packet to be 1094c1b4a7e6SDavid S. Miller * sent now. 1095c1b4a7e6SDavid S. Miller */ 1096c1b4a7e6SDavid S. Miller static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1097c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1098c1b4a7e6SDavid S. Miller { 1099c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 1100c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 1101c1b4a7e6SDavid S. Miller * 1102c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 1103c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 1104c1b4a7e6SDavid S. Miller */ 1105c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 1106c1b4a7e6SDavid S. Miller return 1; 1107c1b4a7e6SDavid S. Miller 1108d551e454SIlpo Järvinen /* Don't use the nagle rule for urgent data (or for the final FIN). 1109d551e454SIlpo Järvinen * Nagle can be ignored during F-RTO too (see RFC4138). 1110d551e454SIlpo Järvinen */ 1111d551e454SIlpo Järvinen if (tp->urg_mode || (tp->frto_counter == 2) || 1112c1b4a7e6SDavid S. Miller (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1113c1b4a7e6SDavid S. Miller return 1; 1114c1b4a7e6SDavid S. Miller 1115c1b4a7e6SDavid S. Miller if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1116c1b4a7e6SDavid S. Miller return 1; 1117c1b4a7e6SDavid S. Miller 1118c1b4a7e6SDavid S. Miller return 0; 1119c1b4a7e6SDavid S. Miller } 1120c1b4a7e6SDavid S. Miller 1121c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 1122c1b4a7e6SDavid S. Miller static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 1123c1b4a7e6SDavid S. Miller { 1124c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1125c1b4a7e6SDavid S. Miller 1126c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 1127c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1128c1b4a7e6SDavid S. Miller 1129c1b4a7e6SDavid S. Miller return !after(end_seq, tp->snd_una + tp->snd_wnd); 1130c1b4a7e6SDavid S. Miller } 1131c1b4a7e6SDavid S. Miller 1132fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1133c1b4a7e6SDavid S. Miller * should be put on the wire right now. If so, it returns the number of 1134c1b4a7e6SDavid S. Miller * packets allowed by the congestion window. 1135c1b4a7e6SDavid S. Miller */ 1136c1b4a7e6SDavid S. Miller static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1137c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1138c1b4a7e6SDavid S. Miller { 1139c1b4a7e6SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1140c1b4a7e6SDavid S. Miller unsigned int cwnd_quota; 1141c1b4a7e6SDavid S. Miller 1142846998aeSDavid S. Miller tcp_init_tso_segs(sk, skb, cur_mss); 1143c1b4a7e6SDavid S. Miller 1144c1b4a7e6SDavid S. Miller if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1145c1b4a7e6SDavid S. Miller return 0; 1146c1b4a7e6SDavid S. Miller 1147c1b4a7e6SDavid S. Miller cwnd_quota = tcp_cwnd_test(tp, skb); 1148c1b4a7e6SDavid S. Miller if (cwnd_quota && 1149c1b4a7e6SDavid S. Miller !tcp_snd_wnd_test(tp, skb, cur_mss)) 1150c1b4a7e6SDavid S. Miller cwnd_quota = 0; 1151c1b4a7e6SDavid S. Miller 1152c1b4a7e6SDavid S. Miller return cwnd_quota; 1153c1b4a7e6SDavid S. Miller } 1154c1b4a7e6SDavid S. Miller 11559e412ba7SIlpo Järvinen int tcp_may_send_now(struct sock *sk) 1156c1b4a7e6SDavid S. Miller { 11579e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1158fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 1159c1b4a7e6SDavid S. Miller 1160c1b4a7e6SDavid S. Miller return (skb && 1161c1b4a7e6SDavid S. Miller tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1162c1b4a7e6SDavid S. Miller (tcp_skb_is_last(sk, skb) ? 11634e67d876SIlpo Järvinen tp->nonagle : TCP_NAGLE_PUSH))); 1164c1b4a7e6SDavid S. Miller } 1165c1b4a7e6SDavid S. Miller 1166c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1167c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 1168c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 1169c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 1170c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 1171c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 1172c1b4a7e6SDavid S. Miller */ 1173846998aeSDavid S. Miller static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) 1174c1b4a7e6SDavid S. Miller { 1175c1b4a7e6SDavid S. Miller struct sk_buff *buff; 1176c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 1177c1b4a7e6SDavid S. Miller u16 flags; 1178c1b4a7e6SDavid S. Miller 1179c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 1180c8ac3774SHerbert Xu if (skb->len != skb->data_len) 1181c8ac3774SHerbert Xu return tcp_fragment(sk, skb, len, mss_now); 1182c1b4a7e6SDavid S. Miller 1183c1b4a7e6SDavid S. Miller buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 1184c1b4a7e6SDavid S. Miller if (unlikely(buff == NULL)) 1185c1b4a7e6SDavid S. Miller return -ENOMEM; 1186c1b4a7e6SDavid S. Miller 1187b60b49eaSHerbert Xu sk_charge_skb(sk, buff); 1188b60b49eaSHerbert Xu buff->truesize += nlen; 1189c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 1190c1b4a7e6SDavid S. Miller 1191c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 1192c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1193c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1194c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1195c1b4a7e6SDavid S. Miller 1196c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 1197c1b4a7e6SDavid S. Miller flags = TCP_SKB_CB(skb)->flags; 1198c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1199c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->flags = flags; 1200c1b4a7e6SDavid S. Miller 1201c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 1202c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 1203c1b4a7e6SDavid S. Miller 120484fa7933SPatrick McHardy buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1205c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 1206c1b4a7e6SDavid S. Miller 1207c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 1208846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1209846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 1210c1b4a7e6SDavid S. Miller 1211c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 1212c1b4a7e6SDavid S. Miller skb_header_release(buff); 1213fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 1214c1b4a7e6SDavid S. Miller 1215c1b4a7e6SDavid S. Miller return 0; 1216c1b4a7e6SDavid S. Miller } 1217c1b4a7e6SDavid S. Miller 1218c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 1219c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 1220c1b4a7e6SDavid S. Miller * 1221c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 1222c1b4a7e6SDavid S. Miller */ 12239e412ba7SIlpo Järvinen static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1224c1b4a7e6SDavid S. Miller { 12259e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 12266687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1227c1b4a7e6SDavid S. Miller u32 send_win, cong_win, limit, in_flight; 1228c1b4a7e6SDavid S. Miller 1229c1b4a7e6SDavid S. Miller if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1230ae8064acSJohn Heffner goto send_now; 1231c1b4a7e6SDavid S. Miller 12326687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 1233ae8064acSJohn Heffner goto send_now; 1234ae8064acSJohn Heffner 1235ae8064acSJohn Heffner /* Defer for less than two clock ticks. */ 1236ae8064acSJohn Heffner if (!tp->tso_deferred && ((jiffies<<1)>>1) - (tp->tso_deferred>>1) > 1) 1237ae8064acSJohn Heffner goto send_now; 1238908a75c1SDavid S. Miller 1239c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1240c1b4a7e6SDavid S. Miller 1241c1b4a7e6SDavid S. Miller BUG_ON(tcp_skb_pcount(skb) <= 1 || 1242c1b4a7e6SDavid S. Miller (tp->snd_cwnd <= in_flight)); 1243c1b4a7e6SDavid S. Miller 1244c1b4a7e6SDavid S. Miller send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; 1245c1b4a7e6SDavid S. Miller 1246c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 1247c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1248c1b4a7e6SDavid S. Miller 1249c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 1250c1b4a7e6SDavid S. Miller 1251ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 1252ba244fe9SDavid S. Miller if (limit >= 65536) 1253ae8064acSJohn Heffner goto send_now; 1254ba244fe9SDavid S. Miller 1255c1b4a7e6SDavid S. Miller if (sysctl_tcp_tso_win_divisor) { 1256c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1257c1b4a7e6SDavid S. Miller 1258c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 1259c1b4a7e6SDavid S. Miller * just use it. 1260c1b4a7e6SDavid S. Miller */ 1261c1b4a7e6SDavid S. Miller chunk /= sysctl_tcp_tso_win_divisor; 1262c1b4a7e6SDavid S. Miller if (limit >= chunk) 1263ae8064acSJohn Heffner goto send_now; 1264c1b4a7e6SDavid S. Miller } else { 1265c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 1266c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 1267c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 1268c1b4a7e6SDavid S. Miller * then send now. 1269c1b4a7e6SDavid S. Miller */ 1270c1b4a7e6SDavid S. Miller if (limit > tcp_max_burst(tp) * tp->mss_cache) 1271ae8064acSJohn Heffner goto send_now; 1272c1b4a7e6SDavid S. Miller } 1273c1b4a7e6SDavid S. Miller 1274c1b4a7e6SDavid S. Miller /* Ok, it looks like it is advisable to defer. */ 1275ae8064acSJohn Heffner tp->tso_deferred = 1 | (jiffies<<1); 1276ae8064acSJohn Heffner 1277c1b4a7e6SDavid S. Miller return 1; 1278ae8064acSJohn Heffner 1279ae8064acSJohn Heffner send_now: 1280ae8064acSJohn Heffner tp->tso_deferred = 0; 1281ae8064acSJohn Heffner return 0; 1282c1b4a7e6SDavid S. Miller } 1283c1b4a7e6SDavid S. Miller 12845d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 12855d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 12865d424d5aSJohn Heffner * 1 if a probe was sent, 12875d424d5aSJohn Heffner * -1 otherwise */ 12885d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 12895d424d5aSJohn Heffner { 12905d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 12915d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 12925d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 12935d424d5aSJohn Heffner int len; 12945d424d5aSJohn Heffner int probe_size; 129591cc17c0SIlpo Järvinen int size_needed; 12965d424d5aSJohn Heffner unsigned int pif; 12975d424d5aSJohn Heffner int copy; 12985d424d5aSJohn Heffner int mss_now; 12995d424d5aSJohn Heffner 13005d424d5aSJohn Heffner /* Not currently probing/verifying, 13015d424d5aSJohn Heffner * not in recovery, 13025d424d5aSJohn Heffner * have enough cwnd, and 13035d424d5aSJohn Heffner * not SACKing (the variable headers throw things off) */ 13045d424d5aSJohn Heffner if (!icsk->icsk_mtup.enabled || 13055d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 13065d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 13075d424d5aSJohn Heffner tp->snd_cwnd < 11 || 13085d424d5aSJohn Heffner tp->rx_opt.eff_sacks) 13095d424d5aSJohn Heffner return -1; 13105d424d5aSJohn Heffner 13115d424d5aSJohn Heffner /* Very simple search strategy: just double the MSS. */ 13125d424d5aSJohn Heffner mss_now = tcp_current_mss(sk, 0); 13135d424d5aSJohn Heffner probe_size = 2*tp->mss_cache; 131491cc17c0SIlpo Järvinen size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 13155d424d5aSJohn Heffner if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 13165d424d5aSJohn Heffner /* TODO: set timer for probe_converge_event */ 13175d424d5aSJohn Heffner return -1; 13185d424d5aSJohn Heffner } 13195d424d5aSJohn Heffner 13205d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 13217f9c33e5SIlpo Järvinen if (tp->write_seq - tp->snd_nxt < size_needed) 13225d424d5aSJohn Heffner return -1; 13235d424d5aSJohn Heffner 132491cc17c0SIlpo Järvinen if (tp->snd_wnd < size_needed) 13255d424d5aSJohn Heffner return -1; 132691cc17c0SIlpo Järvinen if (after(tp->snd_nxt + size_needed, tp->snd_una + tp->snd_wnd)) 13275d424d5aSJohn Heffner return 0; 13285d424d5aSJohn Heffner 13295d424d5aSJohn Heffner /* Do we need to wait to drain cwnd? */ 13305d424d5aSJohn Heffner pif = tcp_packets_in_flight(tp); 13315d424d5aSJohn Heffner if (pif + 2 > tp->snd_cwnd) { 13325d424d5aSJohn Heffner /* With no packets in flight, don't stall. */ 13335d424d5aSJohn Heffner if (pif == 0) 13345d424d5aSJohn Heffner return -1; 13355d424d5aSJohn Heffner else 13365d424d5aSJohn Heffner return 0; 13375d424d5aSJohn Heffner } 13385d424d5aSJohn Heffner 13395d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 13405d424d5aSJohn Heffner if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 13415d424d5aSJohn Heffner return -1; 13425d424d5aSJohn Heffner sk_charge_skb(sk, nskb); 13435d424d5aSJohn Heffner 1344fe067e8aSDavid S. Miller skb = tcp_send_head(sk); 1345fe067e8aSDavid S. Miller tcp_insert_write_queue_before(nskb, skb, sk); 13465d424d5aSJohn Heffner 13475d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 13485d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 13495d424d5aSJohn Heffner TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 13505d424d5aSJohn Heffner TCP_SKB_CB(nskb)->sacked = 0; 13515d424d5aSJohn Heffner nskb->csum = 0; 135284fa7933SPatrick McHardy nskb->ip_summed = skb->ip_summed; 13535d424d5aSJohn Heffner 13545d424d5aSJohn Heffner len = 0; 13555d424d5aSJohn Heffner while (len < probe_size) { 1356fe067e8aSDavid S. Miller next = tcp_write_queue_next(sk, skb); 13575d424d5aSJohn Heffner 13585d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 13595d424d5aSJohn Heffner if (nskb->ip_summed) 13605d424d5aSJohn Heffner skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 13615d424d5aSJohn Heffner else 13625d424d5aSJohn Heffner nskb->csum = skb_copy_and_csum_bits(skb, 0, 13635d424d5aSJohn Heffner skb_put(nskb, copy), copy, nskb->csum); 13645d424d5aSJohn Heffner 13655d424d5aSJohn Heffner if (skb->len <= copy) { 13665d424d5aSJohn Heffner /* We've eaten all the data from this skb. 13675d424d5aSJohn Heffner * Throw it away. */ 13685d424d5aSJohn Heffner TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1369fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 13705d424d5aSJohn Heffner sk_stream_free_skb(sk, skb); 13715d424d5aSJohn Heffner } else { 13725d424d5aSJohn Heffner TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 13735d424d5aSJohn Heffner ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 13745d424d5aSJohn Heffner if (!skb_shinfo(skb)->nr_frags) { 13755d424d5aSJohn Heffner skb_pull(skb, copy); 137684fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 13775d424d5aSJohn Heffner skb->csum = csum_partial(skb->data, skb->len, 0); 13785d424d5aSJohn Heffner } else { 13795d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 13805d424d5aSJohn Heffner tcp_set_skb_tso_segs(sk, skb, mss_now); 13815d424d5aSJohn Heffner } 13825d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 13835d424d5aSJohn Heffner } 13845d424d5aSJohn Heffner 13855d424d5aSJohn Heffner len += copy; 13865d424d5aSJohn Heffner skb = next; 13875d424d5aSJohn Heffner } 13885d424d5aSJohn Heffner tcp_init_tso_segs(sk, nskb, nskb->len); 13895d424d5aSJohn Heffner 13905d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 13915d424d5aSJohn Heffner * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 13925d424d5aSJohn Heffner TCP_SKB_CB(nskb)->when = tcp_time_stamp; 13935d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 13945d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 13955d424d5aSJohn Heffner * effectively two packets. */ 13965d424d5aSJohn Heffner tp->snd_cwnd--; 13979e412ba7SIlpo Järvinen update_send_head(sk, nskb); 13985d424d5aSJohn Heffner 13995d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 14000e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 14010e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 14025d424d5aSJohn Heffner 14035d424d5aSJohn Heffner return 1; 14045d424d5aSJohn Heffner } 14055d424d5aSJohn Heffner 14065d424d5aSJohn Heffner return -1; 14075d424d5aSJohn Heffner } 14085d424d5aSJohn Heffner 14095d424d5aSJohn Heffner 14101da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 14111da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 14121da177e4SLinus Torvalds * window for us. 14131da177e4SLinus Torvalds * 14141da177e4SLinus Torvalds * Returns 1, if no segments are in flight and we have queued segments, but 14151da177e4SLinus Torvalds * cannot send anything now because of SWS or another problem. 14161da177e4SLinus Torvalds */ 1417a2e2a59cSDavid S. Miller static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) 14181da177e4SLinus Torvalds { 14191da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 142092df7b51SDavid S. Miller struct sk_buff *skb; 1421c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 1422c1b4a7e6SDavid S. Miller int cwnd_quota; 14235d424d5aSJohn Heffner int result; 14241da177e4SLinus Torvalds 14251da177e4SLinus Torvalds /* If we are closed, the bytes will have to remain here. 14261da177e4SLinus Torvalds * In time closedown will finish, we empty the write queue and all 14271da177e4SLinus Torvalds * will be happy. 14281da177e4SLinus Torvalds */ 142992df7b51SDavid S. Miller if (unlikely(sk->sk_state == TCP_CLOSE)) 143092df7b51SDavid S. Miller return 0; 143192df7b51SDavid S. Miller 1432c1b4a7e6SDavid S. Miller sent_pkts = 0; 14335d424d5aSJohn Heffner 14345d424d5aSJohn Heffner /* Do MTU probing. */ 14355d424d5aSJohn Heffner if ((result = tcp_mtu_probe(sk)) == 0) { 14365d424d5aSJohn Heffner return 0; 14375d424d5aSJohn Heffner } else if (result > 0) { 14385d424d5aSJohn Heffner sent_pkts = 1; 14395d424d5aSJohn Heffner } 14405d424d5aSJohn Heffner 1441fe067e8aSDavid S. Miller while ((skb = tcp_send_head(sk))) { 1442c8ac3774SHerbert Xu unsigned int limit; 1443c8ac3774SHerbert Xu 1444b68e9f85SHerbert Xu tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1445c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1446c1b4a7e6SDavid S. Miller 1447b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 1448b68e9f85SHerbert Xu if (!cwnd_quota) 1449b68e9f85SHerbert Xu break; 1450b68e9f85SHerbert Xu 1451b68e9f85SHerbert Xu if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1452b68e9f85SHerbert Xu break; 1453b68e9f85SHerbert Xu 1454c1b4a7e6SDavid S. Miller if (tso_segs == 1) { 1455aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1456aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1457aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 1458aa93466bSDavid S. Miller break; 1459c1b4a7e6SDavid S. Miller } else { 14609e412ba7SIlpo Järvinen if (tcp_tso_should_defer(sk, skb)) 1461aa93466bSDavid S. Miller break; 1462c1b4a7e6SDavid S. Miller } 1463aa93466bSDavid S. Miller 1464c8ac3774SHerbert Xu limit = mss_now; 1465c1b4a7e6SDavid S. Miller if (tso_segs > 1) { 1466c8ac3774SHerbert Xu limit = tcp_window_allows(tp, skb, 1467c1b4a7e6SDavid S. Miller mss_now, cwnd_quota); 1468c1b4a7e6SDavid S. Miller 1469c1b4a7e6SDavid S. Miller if (skb->len < limit) { 1470c1b4a7e6SDavid S. Miller unsigned int trim = skb->len % mss_now; 1471c1b4a7e6SDavid S. Miller 1472c1b4a7e6SDavid S. Miller if (trim) 1473c1b4a7e6SDavid S. Miller limit = skb->len - trim; 1474c1b4a7e6SDavid S. Miller } 1475c1b4a7e6SDavid S. Miller } 1476c8ac3774SHerbert Xu 1477c8ac3774SHerbert Xu if (skb->len > limit && 1478c8ac3774SHerbert Xu unlikely(tso_fragment(sk, skb, limit, mss_now))) 14791da177e4SLinus Torvalds break; 14801da177e4SLinus Torvalds 14811da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 1482c1b4a7e6SDavid S. Miller 1483dfb4b9dcSDavid S. Miller if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC))) 14841da177e4SLinus Torvalds break; 14851da177e4SLinus Torvalds 14861da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 14871da177e4SLinus Torvalds * This call will increment packets_out. 14881da177e4SLinus Torvalds */ 14899e412ba7SIlpo Järvinen update_send_head(sk, skb); 14901da177e4SLinus Torvalds 14911da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 1492aa93466bSDavid S. Miller sent_pkts++; 14931da177e4SLinus Torvalds } 14941da177e4SLinus Torvalds 1495aa93466bSDavid S. Miller if (likely(sent_pkts)) { 14969e412ba7SIlpo Järvinen tcp_cwnd_validate(sk); 14971da177e4SLinus Torvalds return 0; 14981da177e4SLinus Torvalds } 1499fe067e8aSDavid S. Miller return !tp->packets_out && tcp_send_head(sk); 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds 1502a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 1503a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 1504a762a980SDavid S. Miller * The socket must be locked by the caller. 1505a762a980SDavid S. Miller */ 15069e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 15079e412ba7SIlpo Järvinen int nonagle) 1508a762a980SDavid S. Miller { 1509fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 1510a762a980SDavid S. Miller 1511a762a980SDavid S. Miller if (skb) { 151255c97f3eSDavid S. Miller if (tcp_write_xmit(sk, cur_mss, nonagle)) 15139e412ba7SIlpo Järvinen tcp_check_probe_timer(sk); 1514a762a980SDavid S. Miller } 1515a762a980SDavid S. Miller } 1516a762a980SDavid S. Miller 1517c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 1518c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 1519c1b4a7e6SDavid S. Miller */ 1520c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 1521c1b4a7e6SDavid S. Miller { 1522c1b4a7e6SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1523fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 1524c1b4a7e6SDavid S. Miller unsigned int tso_segs, cwnd_quota; 1525c1b4a7e6SDavid S. Miller 1526c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 1527c1b4a7e6SDavid S. Miller 1528846998aeSDavid S. Miller tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1529c1b4a7e6SDavid S. Miller cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1530c1b4a7e6SDavid S. Miller 1531c1b4a7e6SDavid S. Miller if (likely(cwnd_quota)) { 1532c8ac3774SHerbert Xu unsigned int limit; 1533c8ac3774SHerbert Xu 1534c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1535c1b4a7e6SDavid S. Miller 1536c8ac3774SHerbert Xu limit = mss_now; 1537c1b4a7e6SDavid S. Miller if (tso_segs > 1) { 1538c8ac3774SHerbert Xu limit = tcp_window_allows(tp, skb, 1539c1b4a7e6SDavid S. Miller mss_now, cwnd_quota); 1540c1b4a7e6SDavid S. Miller 1541c1b4a7e6SDavid S. Miller if (skb->len < limit) { 1542c1b4a7e6SDavid S. Miller unsigned int trim = skb->len % mss_now; 1543c1b4a7e6SDavid S. Miller 1544c1b4a7e6SDavid S. Miller if (trim) 1545c1b4a7e6SDavid S. Miller limit = skb->len - trim; 1546c1b4a7e6SDavid S. Miller } 1547c1b4a7e6SDavid S. Miller } 1548c8ac3774SHerbert Xu 1549c8ac3774SHerbert Xu if (skb->len > limit && 1550c8ac3774SHerbert Xu unlikely(tso_fragment(sk, skb, limit, mss_now))) 1551c1b4a7e6SDavid S. Miller return; 1552c1b4a7e6SDavid S. Miller 1553c1b4a7e6SDavid S. Miller /* Send it out now. */ 1554c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->when = tcp_time_stamp; 1555c1b4a7e6SDavid S. Miller 1556dfb4b9dcSDavid S. Miller if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { 15579e412ba7SIlpo Järvinen update_send_head(sk, skb); 15589e412ba7SIlpo Järvinen tcp_cwnd_validate(sk); 1559c1b4a7e6SDavid S. Miller return; 1560c1b4a7e6SDavid S. Miller } 1561c1b4a7e6SDavid S. Miller } 1562c1b4a7e6SDavid S. Miller } 1563c1b4a7e6SDavid S. Miller 15641da177e4SLinus Torvalds /* This function returns the amount that we can raise the 15651da177e4SLinus Torvalds * usable window based on the following constraints 15661da177e4SLinus Torvalds * 15671da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 15681da177e4SLinus Torvalds * 2. We limit memory per socket 15691da177e4SLinus Torvalds * 15701da177e4SLinus Torvalds * RFC 1122: 15711da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 15721da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 15731da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 15741da177e4SLinus Torvalds * 15751da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 15761da177e4SLinus Torvalds * it at least MSS bytes. 15771da177e4SLinus Torvalds * 15781da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 15791da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 15801da177e4SLinus Torvalds * 15811da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 15821da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 15831da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 15841da177e4SLinus Torvalds * window to always advance by a single byte. 15851da177e4SLinus Torvalds * 15861da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 15871da177e4SLinus Torvalds * then this will not be a problem. 15881da177e4SLinus Torvalds * 15891da177e4SLinus Torvalds * BSD seems to make the following compromise: 15901da177e4SLinus Torvalds * 15911da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 15921da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 15931da177e4SLinus Torvalds * then set the window to 0. 15941da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 15951da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 15961da177e4SLinus Torvalds * and from being larger than the largest representable value. 15971da177e4SLinus Torvalds * 15981da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 15991da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 16001da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 16011da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 16021da177e4SLinus Torvalds * because the pipeline is full. 16031da177e4SLinus Torvalds * 16041da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 16051da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 16061da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 16071da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 16081da177e4SLinus Torvalds * of having a fixed window size at almost all times. 16091da177e4SLinus Torvalds * 16101da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 16111da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 16121da177e4SLinus Torvalds * 16131da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 16141da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 16151da177e4SLinus Torvalds */ 16161da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 16171da177e4SLinus Torvalds { 1618463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 16191da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1620caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 16211da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 16221da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 16231da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 16241da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 16251da177e4SLinus Torvalds */ 1626463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 16271da177e4SLinus Torvalds int free_space = tcp_space(sk); 16281da177e4SLinus Torvalds int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 16291da177e4SLinus Torvalds int window; 16301da177e4SLinus Torvalds 16311da177e4SLinus Torvalds if (mss > full_space) 16321da177e4SLinus Torvalds mss = full_space; 16331da177e4SLinus Torvalds 16341da177e4SLinus Torvalds if (free_space < full_space/2) { 1635463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 16361da177e4SLinus Torvalds 16371da177e4SLinus Torvalds if (tcp_memory_pressure) 16381da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 16391da177e4SLinus Torvalds 16401da177e4SLinus Torvalds if (free_space < mss) 16411da177e4SLinus Torvalds return 0; 16421da177e4SLinus Torvalds } 16431da177e4SLinus Torvalds 16441da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 16451da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 16461da177e4SLinus Torvalds 16471da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 16481da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 16491da177e4SLinus Torvalds */ 16501da177e4SLinus Torvalds window = tp->rcv_wnd; 16511da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 16521da177e4SLinus Torvalds window = free_space; 16531da177e4SLinus Torvalds 16541da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 16551da177e4SLinus Torvalds * Import case: prevent zero window announcement if 16561da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 16571da177e4SLinus Torvalds */ 16581da177e4SLinus Torvalds if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 16591da177e4SLinus Torvalds window = (((window >> tp->rx_opt.rcv_wscale) + 1) 16601da177e4SLinus Torvalds << tp->rx_opt.rcv_wscale); 16611da177e4SLinus Torvalds } else { 16621da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 16631da177e4SLinus Torvalds * Window clamp already applied above. 16641da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 16651da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 16661da177e4SLinus Torvalds * and multiply from happening most of the time. 16671da177e4SLinus Torvalds * We also don't do any window rounding when the free space 16681da177e4SLinus Torvalds * is too small. 16691da177e4SLinus Torvalds */ 16701da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 16711da177e4SLinus Torvalds window = (free_space/mss)*mss; 167284565070SJohn Heffner else if (mss == full_space && 167384565070SJohn Heffner free_space > window + full_space/2) 167484565070SJohn Heffner window = free_space; 16751da177e4SLinus Torvalds } 16761da177e4SLinus Torvalds 16771da177e4SLinus Torvalds return window; 16781da177e4SLinus Torvalds } 16791da177e4SLinus Torvalds 16801da177e4SLinus Torvalds /* Attempt to collapse two adjacent SKB's during retransmission. */ 16811da177e4SLinus Torvalds static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) 16821da177e4SLinus Torvalds { 16831da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1684fe067e8aSDavid S. Miller struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 16851da177e4SLinus Torvalds 16861da177e4SLinus Torvalds /* The first test we must make is that neither of these two 16871da177e4SLinus Torvalds * SKB's are still referenced by someone else. 16881da177e4SLinus Torvalds */ 16891da177e4SLinus Torvalds if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 16901da177e4SLinus Torvalds int skb_size = skb->len, next_skb_size = next_skb->len; 16911da177e4SLinus Torvalds u16 flags = TCP_SKB_CB(skb)->flags; 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds /* Also punt if next skb has been SACK'd. */ 16941da177e4SLinus Torvalds if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 16951da177e4SLinus Torvalds return; 16961da177e4SLinus Torvalds 16971da177e4SLinus Torvalds /* Next skb is out of window. */ 16981da177e4SLinus Torvalds if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) 16991da177e4SLinus Torvalds return; 17001da177e4SLinus Torvalds 17011da177e4SLinus Torvalds /* Punt if not enough space exists in the first SKB for 17021da177e4SLinus Torvalds * the data in the second, or the total combined payload 17031da177e4SLinus Torvalds * would exceed the MSS. 17041da177e4SLinus Torvalds */ 17051da177e4SLinus Torvalds if ((next_skb_size > skb_tailroom(skb)) || 17061da177e4SLinus Torvalds ((skb_size + next_skb_size) > mss_now)) 17071da177e4SLinus Torvalds return; 17081da177e4SLinus Torvalds 17091da177e4SLinus Torvalds BUG_ON(tcp_skb_pcount(skb) != 1 || 17101da177e4SLinus Torvalds tcp_skb_pcount(next_skb) != 1); 17111da177e4SLinus Torvalds 1712dc86967bSIlpo Järvinen if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out && 1713a47e5a98SIlpo Järvinen (next_skb == tp->highest_sack))) 1714a6963a6bSIlpo Järvinen return; 1715a6963a6bSIlpo Järvinen 17161da177e4SLinus Torvalds /* Ok. We will be able to collapse the packet. */ 1717fe067e8aSDavid S. Miller tcp_unlink_write_queue(next_skb, sk); 17181da177e4SLinus Torvalds 17191a4e2d09SArnaldo Carvalho de Melo skb_copy_from_linear_data(next_skb, 17201a4e2d09SArnaldo Carvalho de Melo skb_put(skb, next_skb_size), 17211a4e2d09SArnaldo Carvalho de Melo next_skb_size); 17221da177e4SLinus Torvalds 172352d570aaSJarek Poplawski if (next_skb->ip_summed == CHECKSUM_PARTIAL) 172452d570aaSJarek Poplawski skb->ip_summed = CHECKSUM_PARTIAL; 17251da177e4SLinus Torvalds 172684fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 17271da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 17281da177e4SLinus Torvalds 17291da177e4SLinus Torvalds /* Update sequence range on original skb. */ 17301da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 17311da177e4SLinus Torvalds 17321da177e4SLinus Torvalds /* Merge over control information. */ 17331da177e4SLinus Torvalds flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 17341da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = flags; 17351da177e4SLinus Torvalds 17361da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 17371da177e4SLinus Torvalds * packet counting does not break. 17381da177e4SLinus Torvalds */ 17391da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); 17401da177e4SLinus Torvalds if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 17411da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(next_skb); 1742b5860bbaSIlpo Järvinen if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) 17431da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(next_skb); 17441da177e4SLinus Torvalds /* Reno case is special. Sigh... */ 1745e60402d0SIlpo Järvinen if (tcp_is_reno(tp) && tp->sacked_out) 17461da177e4SLinus Torvalds tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 17471da177e4SLinus Torvalds 1748a47e5a98SIlpo Järvinen tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb)); 1749e9144bd8SIlpo Järvinen tp->packets_out -= tcp_skb_pcount(next_skb); 1750b7689205SIlpo Järvinen 1751b7689205SIlpo Järvinen /* changed transmit queue under us so clear hints */ 1752b7689205SIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 1753b7689205SIlpo Järvinen 17541da177e4SLinus Torvalds sk_stream_free_skb(sk, next_skb); 17551da177e4SLinus Torvalds } 17561da177e4SLinus Torvalds } 17571da177e4SLinus Torvalds 17581da177e4SLinus Torvalds /* Do a simple retransmit without using the backoff mechanisms in 17591da177e4SLinus Torvalds * tcp_timer. This is used for path mtu discovery. 17601da177e4SLinus Torvalds * The socket is already locked here. 17611da177e4SLinus Torvalds */ 17621da177e4SLinus Torvalds void tcp_simple_retransmit(struct sock *sk) 17631da177e4SLinus Torvalds { 17646687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 17651da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 17661da177e4SLinus Torvalds struct sk_buff *skb; 17671da177e4SLinus Torvalds unsigned int mss = tcp_current_mss(sk, 0); 17681da177e4SLinus Torvalds int lost = 0; 17691da177e4SLinus Torvalds 1770fe067e8aSDavid S. Miller tcp_for_write_queue(skb, sk) { 1771fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 1772fe067e8aSDavid S. Miller break; 17731da177e4SLinus Torvalds if (skb->len > mss && 17741da177e4SLinus Torvalds !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 17751da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 17761da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 17771da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 17781da177e4SLinus Torvalds } 17791da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { 17801da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 17811da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 17821da177e4SLinus Torvalds lost = 1; 17831da177e4SLinus Torvalds } 17841da177e4SLinus Torvalds } 17851da177e4SLinus Torvalds } 17861da177e4SLinus Torvalds 17875af4ec23SIlpo Järvinen tcp_clear_all_retrans_hints(tp); 17886a438bbeSStephen Hemminger 17891da177e4SLinus Torvalds if (!lost) 17901da177e4SLinus Torvalds return; 17911da177e4SLinus Torvalds 1792005903bcSIlpo Järvinen tcp_verify_left_out(tp); 17931da177e4SLinus Torvalds 17941da177e4SLinus Torvalds /* Don't muck with the congestion window here. 17951da177e4SLinus Torvalds * Reason is that we do not increase amount of _data_ 17961da177e4SLinus Torvalds * in network, but units changed and effective 17971da177e4SLinus Torvalds * cwnd/ssthresh really reduced now. 17981da177e4SLinus Torvalds */ 17996687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Loss) { 18001da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 18016687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 18021da177e4SLinus Torvalds tp->prior_ssthresh = 0; 18031da177e4SLinus Torvalds tp->undo_marker = 0; 18046687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 18051da177e4SLinus Torvalds } 18061da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 18071da177e4SLinus Torvalds } 18081da177e4SLinus Torvalds 18091da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 18101da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 18111da177e4SLinus Torvalds * error occurred which prevented the send. 18121da177e4SLinus Torvalds */ 18131da177e4SLinus Torvalds int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 18141da177e4SLinus Torvalds { 18151da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 18165d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 18171da177e4SLinus Torvalds unsigned int cur_mss = tcp_current_mss(sk, 0); 18181da177e4SLinus Torvalds int err; 18191da177e4SLinus Torvalds 18205d424d5aSJohn Heffner /* Inconslusive MTU probe */ 18215d424d5aSJohn Heffner if (icsk->icsk_mtup.probe_size) { 18225d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 18235d424d5aSJohn Heffner } 18245d424d5aSJohn Heffner 18251da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 1826caa20d9aSStephen Hemminger * copying overhead: fragmentation, tunneling, mangling etc. 18271da177e4SLinus Torvalds */ 18281da177e4SLinus Torvalds if (atomic_read(&sk->sk_wmem_alloc) > 18291da177e4SLinus Torvalds min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 18301da177e4SLinus Torvalds return -EAGAIN; 18311da177e4SLinus Torvalds 18321da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 18331da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 18341da177e4SLinus Torvalds BUG(); 18351da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 18361da177e4SLinus Torvalds return -ENOMEM; 18371da177e4SLinus Torvalds } 18381da177e4SLinus Torvalds 18391da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 18401da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 18411da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 18421da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 18431da177e4SLinus Torvalds */ 18441da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) 18451da177e4SLinus Torvalds && TCP_SKB_CB(skb)->seq != tp->snd_una) 18461da177e4SLinus Torvalds return -EAGAIN; 18471da177e4SLinus Torvalds 18481da177e4SLinus Torvalds if (skb->len > cur_mss) { 1849846998aeSDavid S. Miller if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 18501da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 18511da177e4SLinus Torvalds } 18521da177e4SLinus Torvalds 18531da177e4SLinus Torvalds /* Collapse two adjacent packets if worthwhile and we can. */ 18541da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 18551da177e4SLinus Torvalds (skb->len < (cur_mss >> 1)) && 1856fe067e8aSDavid S. Miller (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && 1857fe067e8aSDavid S. Miller (!tcp_skb_is_last(sk, skb)) && 1858fe067e8aSDavid S. Miller (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && 1859fe067e8aSDavid S. Miller (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) && 18601da177e4SLinus Torvalds (sysctl_tcp_retrans_collapse != 0)) 18611da177e4SLinus Torvalds tcp_retrans_try_collapse(sk, skb, cur_mss); 18621da177e4SLinus Torvalds 18638292a17aSArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 18641da177e4SLinus Torvalds return -EHOSTUNREACH; /* Routing failure or similar. */ 18651da177e4SLinus Torvalds 18661da177e4SLinus Torvalds /* Some Solaris stacks overoptimize and ignore the FIN on a 18671da177e4SLinus Torvalds * retransmit when old data is attached. So strip it off 18681da177e4SLinus Torvalds * since it is cheap to do so and saves bytes on the network. 18691da177e4SLinus Torvalds */ 18701da177e4SLinus Torvalds if (skb->len > 0 && 18711da177e4SLinus Torvalds (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 18721da177e4SLinus Torvalds tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 18731da177e4SLinus Torvalds if (!pskb_trim(skb, 0)) { 18741da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 18757967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 18767967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 18777967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 18781da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_NONE; 18791da177e4SLinus Torvalds skb->csum = 0; 18801da177e4SLinus Torvalds } 18811da177e4SLinus Torvalds } 18821da177e4SLinus Torvalds 18831da177e4SLinus Torvalds /* Make a copy, if the first transmission SKB clone we made 18841da177e4SLinus Torvalds * is still in somebody's hands, else make a clone. 18851da177e4SLinus Torvalds */ 18861da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 18871da177e4SLinus Torvalds 1888dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 18891da177e4SLinus Torvalds 18901da177e4SLinus Torvalds if (err == 0) { 18911da177e4SLinus Torvalds /* Update global TCP statistics. */ 18921da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 18931da177e4SLinus Torvalds 18941da177e4SLinus Torvalds tp->total_retrans++; 18951da177e4SLinus Torvalds 18961da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 18971da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 18981da177e4SLinus Torvalds if (net_ratelimit()) 18991da177e4SLinus Torvalds printk(KERN_DEBUG "retrans_out leaked.\n"); 19001da177e4SLinus Torvalds } 19011da177e4SLinus Torvalds #endif 1902b08d6cb2SIlpo Järvinen if (!tp->retrans_out) 1903b08d6cb2SIlpo Järvinen tp->lost_retrans_low = tp->snd_nxt; 19041da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 19051da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 19061da177e4SLinus Torvalds 19071da177e4SLinus Torvalds /* Save stamp of the first retransmit. */ 19081da177e4SLinus Torvalds if (!tp->retrans_stamp) 19091da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(skb)->when; 19101da177e4SLinus Torvalds 19111da177e4SLinus Torvalds tp->undo_retrans++; 19121da177e4SLinus Torvalds 19131da177e4SLinus Torvalds /* snd_nxt is stored to detect loss of retransmitted segment, 19141da177e4SLinus Torvalds * see tcp_input.c tcp_sacktag_write_queue(). 19151da177e4SLinus Torvalds */ 19161da177e4SLinus Torvalds TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 19171da177e4SLinus Torvalds } 19181da177e4SLinus Torvalds return err; 19191da177e4SLinus Torvalds } 19201da177e4SLinus Torvalds 19211da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 19221da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 19231da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 19241da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 19251da177e4SLinus Torvalds * If doing SACK, the first ACK which comes back for a timeout 19261da177e4SLinus Torvalds * based retransmit packet might feed us FACK information again. 19271da177e4SLinus Torvalds * If so, we use it to avoid unnecessarily retransmissions. 19281da177e4SLinus Torvalds */ 19291da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 19301da177e4SLinus Torvalds { 19316687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 19321da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 19331da177e4SLinus Torvalds struct sk_buff *skb; 19346a438bbeSStephen Hemminger int packet_cnt; 19356a438bbeSStephen Hemminger 19366a438bbeSStephen Hemminger if (tp->retransmit_skb_hint) { 19376a438bbeSStephen Hemminger skb = tp->retransmit_skb_hint; 19386a438bbeSStephen Hemminger packet_cnt = tp->retransmit_cnt_hint; 19396a438bbeSStephen Hemminger }else{ 1940fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 19416a438bbeSStephen Hemminger packet_cnt = 0; 19426a438bbeSStephen Hemminger } 19431da177e4SLinus Torvalds 19441da177e4SLinus Torvalds /* First pass: retransmit lost packets. */ 19456a438bbeSStephen Hemminger if (tp->lost_out) { 1946fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 19471da177e4SLinus Torvalds __u8 sacked = TCP_SKB_CB(skb)->sacked; 19481da177e4SLinus Torvalds 1949fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 1950fe067e8aSDavid S. Miller break; 19516a438bbeSStephen Hemminger /* we could do better than to assign each time */ 19526a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 19536a438bbeSStephen Hemminger tp->retransmit_cnt_hint = packet_cnt; 19546a438bbeSStephen Hemminger 19551da177e4SLinus Torvalds /* Assume this retransmit will generate 19561da177e4SLinus Torvalds * only one packet for congestion window 19571da177e4SLinus Torvalds * calculation purposes. This works because 19581da177e4SLinus Torvalds * tcp_retransmit_skb() will chop up the 19591da177e4SLinus Torvalds * packet to be MSS sized and all the 19601da177e4SLinus Torvalds * packet counting works out. 19611da177e4SLinus Torvalds */ 19621da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 19631da177e4SLinus Torvalds return; 19641da177e4SLinus Torvalds 19651da177e4SLinus Torvalds if (sacked & TCPCB_LOST) { 19661da177e4SLinus Torvalds if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 19676a438bbeSStephen Hemminger if (tcp_retransmit_skb(sk, skb)) { 19686a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 19691da177e4SLinus Torvalds return; 19706a438bbeSStephen Hemminger } 19716687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Loss) 19721da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 19731da177e4SLinus Torvalds else 19741da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 19751da177e4SLinus Torvalds 1976fe067e8aSDavid S. Miller if (skb == tcp_write_queue_head(sk)) 1977463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 19783f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 19793f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 19801da177e4SLinus Torvalds } 19811da177e4SLinus Torvalds 19826a438bbeSStephen Hemminger packet_cnt += tcp_skb_pcount(skb); 19836a438bbeSStephen Hemminger if (packet_cnt >= tp->lost_out) 19841da177e4SLinus Torvalds break; 19851da177e4SLinus Torvalds } 19861da177e4SLinus Torvalds } 19871da177e4SLinus Torvalds } 19881da177e4SLinus Torvalds 19891da177e4SLinus Torvalds /* OK, demanded retransmission is finished. */ 19901da177e4SLinus Torvalds 19911da177e4SLinus Torvalds /* Forward retransmissions are possible only during Recovery. */ 19926687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Recovery) 19931da177e4SLinus Torvalds return; 19941da177e4SLinus Torvalds 19951da177e4SLinus Torvalds /* No forward retransmissions in Reno are possible. */ 1996e60402d0SIlpo Järvinen if (tcp_is_reno(tp)) 19971da177e4SLinus Torvalds return; 19981da177e4SLinus Torvalds 19991da177e4SLinus Torvalds /* Yeah, we have to make difficult choice between forward transmission 20001da177e4SLinus Torvalds * and retransmission... Both ways have their merits... 20011da177e4SLinus Torvalds * 20021da177e4SLinus Torvalds * For now we do not retransmit anything, while we have some new 2003539d243fSIlpo Järvinen * segments to send. In the other cases, follow rule 3 for 2004539d243fSIlpo Järvinen * NextSeg() specified in RFC3517. 20051da177e4SLinus Torvalds */ 20061da177e4SLinus Torvalds 20079e412ba7SIlpo Järvinen if (tcp_may_send_now(sk)) 20081da177e4SLinus Torvalds return; 20091da177e4SLinus Torvalds 2010539d243fSIlpo Järvinen /* If nothing is SACKed, highest_sack in the loop won't be valid */ 2011539d243fSIlpo Järvinen if (!tp->sacked_out) 2012539d243fSIlpo Järvinen return; 2013539d243fSIlpo Järvinen 2014539d243fSIlpo Järvinen if (tp->forward_skb_hint) 20156a438bbeSStephen Hemminger skb = tp->forward_skb_hint; 2016539d243fSIlpo Järvinen else 2017fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 20181da177e4SLinus Torvalds 2019fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 2020fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2021fe067e8aSDavid S. Miller break; 20226a438bbeSStephen Hemminger tp->forward_skb_hint = skb; 20236a438bbeSStephen Hemminger 2024a47e5a98SIlpo Järvinen if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 20251da177e4SLinus Torvalds break; 20261da177e4SLinus Torvalds 20271da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 20281da177e4SLinus Torvalds break; 20291da177e4SLinus Torvalds 20301da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 20311da177e4SLinus Torvalds continue; 20321da177e4SLinus Torvalds 20331da177e4SLinus Torvalds /* Ok, retransmit it. */ 20346a438bbeSStephen Hemminger if (tcp_retransmit_skb(sk, skb)) { 20356a438bbeSStephen Hemminger tp->forward_skb_hint = NULL; 20361da177e4SLinus Torvalds break; 20376a438bbeSStephen Hemminger } 20381da177e4SLinus Torvalds 2039fe067e8aSDavid S. Miller if (skb == tcp_write_queue_head(sk)) 20403f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 20413f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 20423f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 20431da177e4SLinus Torvalds 20441da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 20451da177e4SLinus Torvalds } 20461da177e4SLinus Torvalds } 20471da177e4SLinus Torvalds 20481da177e4SLinus Torvalds 20491da177e4SLinus Torvalds /* Send a fin. The caller locks the socket for us. This cannot be 20501da177e4SLinus Torvalds * allowed to fail queueing a FIN frame under any circumstances. 20511da177e4SLinus Torvalds */ 20521da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 20531da177e4SLinus Torvalds { 20541da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2055fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_write_queue_tail(sk); 20561da177e4SLinus Torvalds int mss_now; 20571da177e4SLinus Torvalds 20581da177e4SLinus Torvalds /* Optimization, tack on the FIN if we have a queue of 20591da177e4SLinus Torvalds * unsent frames. But be careful about outgoing SACKS 20601da177e4SLinus Torvalds * and IP options. 20611da177e4SLinus Torvalds */ 20621da177e4SLinus Torvalds mss_now = tcp_current_mss(sk, 1); 20631da177e4SLinus Torvalds 2064fe067e8aSDavid S. Miller if (tcp_send_head(sk) != NULL) { 20651da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 20661da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq++; 20671da177e4SLinus Torvalds tp->write_seq++; 20681da177e4SLinus Torvalds } else { 20691da177e4SLinus Torvalds /* Socket is locked, keep trying until memory is available. */ 20701da177e4SLinus Torvalds for (;;) { 2071d179cd12SDavid S. Miller skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 20721da177e4SLinus Torvalds if (skb) 20731da177e4SLinus Torvalds break; 20741da177e4SLinus Torvalds yield(); 20751da177e4SLinus Torvalds } 20761da177e4SLinus Torvalds 20771da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 20781da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 20791da177e4SLinus Torvalds skb->csum = 0; 20801da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 20811da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 20827967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 20837967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 20847967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 20851da177e4SLinus Torvalds 20861da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 20871da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = tp->write_seq; 20881da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 20891da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 20901da177e4SLinus Torvalds } 20919e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 20921da177e4SLinus Torvalds } 20931da177e4SLinus Torvalds 20941da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 20951da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 20961da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 209765bb723cSGerrit Renker * by RFC 2525, section 2.17. -DaveM 20981da177e4SLinus Torvalds */ 2099dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 21001da177e4SLinus Torvalds { 21011da177e4SLinus Torvalds struct sk_buff *skb; 21021da177e4SLinus Torvalds 21031da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 21041da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 21051da177e4SLinus Torvalds if (!skb) { 21061da177e4SLinus Torvalds NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 21071da177e4SLinus Torvalds return; 21081da177e4SLinus Torvalds } 21091da177e4SLinus Torvalds 21101da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 21111da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 21121da177e4SLinus Torvalds skb->csum = 0; 21131da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 21141da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 21157967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 21167967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 21177967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 21181da177e4SLinus Torvalds 21191da177e4SLinus Torvalds /* Send it off. */ 21209e412ba7SIlpo Järvinen TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk); 21211da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 21221da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2123dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 21241da177e4SLinus Torvalds NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 21251da177e4SLinus Torvalds } 21261da177e4SLinus Torvalds 21271da177e4SLinus Torvalds /* WARNING: This routine must only be called when we have already sent 21281da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 21291da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 21301da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 21311da177e4SLinus Torvalds */ 21321da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 21331da177e4SLinus Torvalds { 21341da177e4SLinus Torvalds struct sk_buff* skb; 21351da177e4SLinus Torvalds 2136fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 21371da177e4SLinus Torvalds if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { 21381da177e4SLinus Torvalds printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 21391da177e4SLinus Torvalds return -EFAULT; 21401da177e4SLinus Torvalds } 21411da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { 21421da177e4SLinus Torvalds if (skb_cloned(skb)) { 21431da177e4SLinus Torvalds struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 21441da177e4SLinus Torvalds if (nskb == NULL) 21451da177e4SLinus Torvalds return -ENOMEM; 2146fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 21471da177e4SLinus Torvalds skb_header_release(nskb); 2148fe067e8aSDavid S. Miller __tcp_add_write_queue_head(sk, nskb); 21491da177e4SLinus Torvalds sk_stream_free_skb(sk, skb); 21501da177e4SLinus Torvalds sk_charge_skb(sk, nskb); 21511da177e4SLinus Torvalds skb = nskb; 21521da177e4SLinus Torvalds } 21531da177e4SLinus Torvalds 21541da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 21551da177e4SLinus Torvalds TCP_ECN_send_synack(tcp_sk(sk), skb); 21561da177e4SLinus Torvalds } 21571da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2158dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 21591da177e4SLinus Torvalds } 21601da177e4SLinus Torvalds 21611da177e4SLinus Torvalds /* 21621da177e4SLinus Torvalds * Prepare a SYN-ACK. 21631da177e4SLinus Torvalds */ 21641da177e4SLinus Torvalds struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 216560236fddSArnaldo Carvalho de Melo struct request_sock *req) 21661da177e4SLinus Torvalds { 21672e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 21681da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 21691da177e4SLinus Torvalds struct tcphdr *th; 21701da177e4SLinus Torvalds int tcp_header_size; 21711da177e4SLinus Torvalds struct sk_buff *skb; 2172cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2173cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 2174cfb6eeb4SYOSHIFUJI Hideaki __u8 *md5_hash_location; 2175cfb6eeb4SYOSHIFUJI Hideaki #endif 21761da177e4SLinus Torvalds 21771da177e4SLinus Torvalds skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 21781da177e4SLinus Torvalds if (skb == NULL) 21791da177e4SLinus Torvalds return NULL; 21801da177e4SLinus Torvalds 21811da177e4SLinus Torvalds /* Reserve space for headers. */ 21821da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 21831da177e4SLinus Torvalds 21841da177e4SLinus Torvalds skb->dst = dst_clone(dst); 21851da177e4SLinus Torvalds 21861da177e4SLinus Torvalds tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + 21872e6599cbSArnaldo Carvalho de Melo (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + 21882e6599cbSArnaldo Carvalho de Melo (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 21891da177e4SLinus Torvalds /* SACK_PERM is in the place of NOP NOP of TS */ 21902e6599cbSArnaldo Carvalho de Melo ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 2191cfb6eeb4SYOSHIFUJI Hideaki 2192cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2193cfb6eeb4SYOSHIFUJI Hideaki /* Are we doing MD5 on this segment? If so - make room for it */ 2194cfb6eeb4SYOSHIFUJI Hideaki md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 2195cfb6eeb4SYOSHIFUJI Hideaki if (md5) 2196cfb6eeb4SYOSHIFUJI Hideaki tcp_header_size += TCPOLEN_MD5SIG_ALIGNED; 2197cfb6eeb4SYOSHIFUJI Hideaki #endif 2198aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 2199aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 22001da177e4SLinus Torvalds 2201aa8223c7SArnaldo Carvalho de Melo th = tcp_hdr(skb); 22021da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 22031da177e4SLinus Torvalds th->syn = 1; 22041da177e4SLinus Torvalds th->ack = 1; 22051da177e4SLinus Torvalds TCP_ECN_make_synack(req, th); 22061da177e4SLinus Torvalds th->source = inet_sk(sk)->sport; 22072e6599cbSArnaldo Carvalho de Melo th->dest = ireq->rmt_port; 22082e6599cbSArnaldo Carvalho de Melo TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 22091da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 22101da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = 0; 22117967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 22127967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 22137967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 22141da177e4SLinus Torvalds th->seq = htonl(TCP_SKB_CB(skb)->seq); 22152e6599cbSArnaldo Carvalho de Melo th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 22161da177e4SLinus Torvalds if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 22171da177e4SLinus Torvalds __u8 rcv_wscale; 22181da177e4SLinus Torvalds /* Set this up on the first call only */ 22191da177e4SLinus Torvalds req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 22201da177e4SLinus Torvalds /* tcp_full_space because it is guaranteed to be the first packet */ 22211da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 22222e6599cbSArnaldo Carvalho de Melo dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 22231da177e4SLinus Torvalds &req->rcv_wnd, 22241da177e4SLinus Torvalds &req->window_clamp, 22252e6599cbSArnaldo Carvalho de Melo ireq->wscale_ok, 22261da177e4SLinus Torvalds &rcv_wscale); 22272e6599cbSArnaldo Carvalho de Melo ireq->rcv_wscale = rcv_wscale; 22281da177e4SLinus Torvalds } 22291da177e4SLinus Torvalds 22301da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2231600ff0c2SIlpo Järvinen th->window = htons(min(req->rcv_wnd, 65535U)); 22321da177e4SLinus Torvalds 22331da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2234df7a3b07SAl Viro tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 22352e6599cbSArnaldo Carvalho de Melo ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 22361da177e4SLinus Torvalds TCP_SKB_CB(skb)->when, 2237cfb6eeb4SYOSHIFUJI Hideaki req->ts_recent, 2238cfb6eeb4SYOSHIFUJI Hideaki ( 2239cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2240cfb6eeb4SYOSHIFUJI Hideaki md5 ? &md5_hash_location : 2241cfb6eeb4SYOSHIFUJI Hideaki #endif 2242cfb6eeb4SYOSHIFUJI Hideaki NULL) 2243cfb6eeb4SYOSHIFUJI Hideaki ); 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds skb->csum = 0; 22461da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 22471da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_OUTSEGS); 2248cfb6eeb4SYOSHIFUJI Hideaki 2249cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2250cfb6eeb4SYOSHIFUJI Hideaki /* Okay, we have all we need - do the md5 hash if needed */ 2251cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 2252cfb6eeb4SYOSHIFUJI Hideaki tp->af_specific->calc_md5_hash(md5_hash_location, 2253cfb6eeb4SYOSHIFUJI Hideaki md5, 2254cfb6eeb4SYOSHIFUJI Hideaki NULL, dst, req, 2255aa8223c7SArnaldo Carvalho de Melo tcp_hdr(skb), sk->sk_protocol, 2256cfb6eeb4SYOSHIFUJI Hideaki skb->len); 2257cfb6eeb4SYOSHIFUJI Hideaki } 2258cfb6eeb4SYOSHIFUJI Hideaki #endif 2259cfb6eeb4SYOSHIFUJI Hideaki 22601da177e4SLinus Torvalds return skb; 22611da177e4SLinus Torvalds } 22621da177e4SLinus Torvalds 22631da177e4SLinus Torvalds /* 22641da177e4SLinus Torvalds * Do all connect socket setups that can be done AF independent. 22651da177e4SLinus Torvalds */ 226640efc6faSStephen Hemminger static void tcp_connect_init(struct sock *sk) 22671da177e4SLinus Torvalds { 22681da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 22691da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 22701da177e4SLinus Torvalds __u8 rcv_wscale; 22711da177e4SLinus Torvalds 22721da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 22731da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 22741da177e4SLinus Torvalds */ 22751da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr) + 22761da177e4SLinus Torvalds (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 22771da177e4SLinus Torvalds 2278cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2279cfb6eeb4SYOSHIFUJI Hideaki if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2280cfb6eeb4SYOSHIFUJI Hideaki tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2281cfb6eeb4SYOSHIFUJI Hideaki #endif 2282cfb6eeb4SYOSHIFUJI Hideaki 22831da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 22841da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 22851da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 22861da177e4SLinus Torvalds tp->max_window = 0; 22875d424d5aSJohn Heffner tcp_mtup_init(sk); 22881da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 22891da177e4SLinus Torvalds 22901da177e4SLinus Torvalds if (!tp->window_clamp) 22911da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 22921da177e4SLinus Torvalds tp->advmss = dst_metric(dst, RTAX_ADVMSS); 22931da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 22941da177e4SLinus Torvalds 22951da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 22961da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 22971da177e4SLinus Torvalds &tp->rcv_wnd, 22981da177e4SLinus Torvalds &tp->window_clamp, 22991da177e4SLinus Torvalds sysctl_tcp_window_scaling, 23001da177e4SLinus Torvalds &rcv_wscale); 23011da177e4SLinus Torvalds 23021da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 23031da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 23041da177e4SLinus Torvalds 23051da177e4SLinus Torvalds sk->sk_err = 0; 23061da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 23071da177e4SLinus Torvalds tp->snd_wnd = 0; 23081da177e4SLinus Torvalds tcp_init_wl(tp, tp->write_seq, 0); 23091da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 23101da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 23111da177e4SLinus Torvalds tp->rcv_nxt = 0; 23121da177e4SLinus Torvalds tp->rcv_wup = 0; 23131da177e4SLinus Torvalds tp->copied_seq = 0; 23141da177e4SLinus Torvalds 2315463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2316463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 23171da177e4SLinus Torvalds tcp_clear_retrans(tp); 23181da177e4SLinus Torvalds } 23191da177e4SLinus Torvalds 23201da177e4SLinus Torvalds /* 23211da177e4SLinus Torvalds * Build a SYN and send it off. 23221da177e4SLinus Torvalds */ 23231da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 23241da177e4SLinus Torvalds { 23251da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23261da177e4SLinus Torvalds struct sk_buff *buff; 23271da177e4SLinus Torvalds 23281da177e4SLinus Torvalds tcp_connect_init(sk); 23291da177e4SLinus Torvalds 2330d179cd12SDavid S. Miller buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 23311da177e4SLinus Torvalds if (unlikely(buff == NULL)) 23321da177e4SLinus Torvalds return -ENOBUFS; 23331da177e4SLinus Torvalds 23341da177e4SLinus Torvalds /* Reserve space for headers. */ 23351da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 23361da177e4SLinus Torvalds 23371da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 23389e412ba7SIlpo Järvinen TCP_ECN_send_syn(sk, buff); 23391da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 0; 23407967168cSHerbert Xu skb_shinfo(buff)->gso_segs = 1; 23417967168cSHerbert Xu skb_shinfo(buff)->gso_size = 0; 23427967168cSHerbert Xu skb_shinfo(buff)->gso_type = 0; 23431da177e4SLinus Torvalds buff->csum = 0; 2344bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 23451da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = tp->write_seq++; 23461da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = tp->write_seq; 23471da177e4SLinus Torvalds 23481da177e4SLinus Torvalds /* Send it off. */ 23491da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 23501da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(buff)->when; 23511da177e4SLinus Torvalds skb_header_release(buff); 2352fe067e8aSDavid S. Miller __tcp_add_write_queue_tail(sk, buff); 23531da177e4SLinus Torvalds sk_charge_skb(sk, buff); 23541da177e4SLinus Torvalds tp->packets_out += tcp_skb_pcount(buff); 2355dfb4b9dcSDavid S. Miller tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2356bd37a088SWei Yongjun 2357bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 2358bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 2359bd37a088SWei Yongjun */ 2360bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 2361bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 23621da177e4SLinus Torvalds TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 23631da177e4SLinus Torvalds 23641da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 23653f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 23663f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 23671da177e4SLinus Torvalds return 0; 23681da177e4SLinus Torvalds } 23691da177e4SLinus Torvalds 23701da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 23711da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 23721da177e4SLinus Torvalds * for details. 23731da177e4SLinus Torvalds */ 23741da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 23751da177e4SLinus Torvalds { 2376463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 2377463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 23781da177e4SLinus Torvalds unsigned long timeout; 23791da177e4SLinus Torvalds 23801da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 2381463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 23821da177e4SLinus Torvalds int max_ato = HZ/2; 23831da177e4SLinus Torvalds 2384463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 23851da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 23861da177e4SLinus Torvalds 23871da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 23881da177e4SLinus Torvalds 23891da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 2390463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 23911da177e4SLinus Torvalds * directly. 23921da177e4SLinus Torvalds */ 23931da177e4SLinus Torvalds if (tp->srtt) { 23941da177e4SLinus Torvalds int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); 23951da177e4SLinus Torvalds 23961da177e4SLinus Torvalds if (rtt < max_ato) 23971da177e4SLinus Torvalds max_ato = rtt; 23981da177e4SLinus Torvalds } 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds ato = min(ato, max_ato); 24011da177e4SLinus Torvalds } 24021da177e4SLinus Torvalds 24031da177e4SLinus Torvalds /* Stay within the limit we were given */ 24041da177e4SLinus Torvalds timeout = jiffies + ato; 24051da177e4SLinus Torvalds 24061da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 2407463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 24081da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 24091da177e4SLinus Torvalds * send ACK now. 24101da177e4SLinus Torvalds */ 2411463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 2412463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 24131da177e4SLinus Torvalds tcp_send_ack(sk); 24141da177e4SLinus Torvalds return; 24151da177e4SLinus Torvalds } 24161da177e4SLinus Torvalds 2417463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 2418463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 24191da177e4SLinus Torvalds } 2420463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2421463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 2422463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 24231da177e4SLinus Torvalds } 24241da177e4SLinus Torvalds 24251da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 24261da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk) 24271da177e4SLinus Torvalds { 24281da177e4SLinus Torvalds /* If we have been reset, we may not send again. */ 24291da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 24301da177e4SLinus Torvalds struct sk_buff *buff; 24311da177e4SLinus Torvalds 24321da177e4SLinus Torvalds /* We are not putting this on the write queue, so 24331da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 24341da177e4SLinus Torvalds * sock. 24351da177e4SLinus Torvalds */ 24361da177e4SLinus Torvalds buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 24371da177e4SLinus Torvalds if (buff == NULL) { 2438463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 2439463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 24403f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 24413f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 24421da177e4SLinus Torvalds return; 24431da177e4SLinus Torvalds } 24441da177e4SLinus Torvalds 24451da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 24461da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 24471da177e4SLinus Torvalds buff->csum = 0; 24481da177e4SLinus Torvalds TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 24491da177e4SLinus Torvalds TCP_SKB_CB(buff)->sacked = 0; 24507967168cSHerbert Xu skb_shinfo(buff)->gso_segs = 1; 24517967168cSHerbert Xu skb_shinfo(buff)->gso_size = 0; 24527967168cSHerbert Xu skb_shinfo(buff)->gso_type = 0; 24531da177e4SLinus Torvalds 24541da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 24559e412ba7SIlpo Järvinen TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk); 24561da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 2457dfb4b9dcSDavid S. Miller tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 24581da177e4SLinus Torvalds } 24591da177e4SLinus Torvalds } 24601da177e4SLinus Torvalds 24611da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 24621da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 24631da177e4SLinus Torvalds * 24641da177e4SLinus Torvalds * Question: what should we make while urgent mode? 24651da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 24661da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 24671da177e4SLinus Torvalds * 24681da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 24691da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 24701da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 24711da177e4SLinus Torvalds */ 24721da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 24731da177e4SLinus Torvalds { 24741da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 24751da177e4SLinus Torvalds struct sk_buff *skb; 24761da177e4SLinus Torvalds 24771da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 24781da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 24791da177e4SLinus Torvalds if (skb == NULL) 24801da177e4SLinus Torvalds return -1; 24811da177e4SLinus Torvalds 24821da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 24831da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 24841da177e4SLinus Torvalds skb->csum = 0; 24851da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 24861da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = urgent; 24877967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 24887967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 24897967168cSHerbert Xu skb_shinfo(skb)->gso_type = 0; 24901da177e4SLinus Torvalds 24911da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 24921da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 24931da177e4SLinus Torvalds * send it. 24941da177e4SLinus Torvalds */ 24951da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; 24961da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 24971da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2498dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 24991da177e4SLinus Torvalds } 25001da177e4SLinus Torvalds 25011da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk) 25021da177e4SLinus Torvalds { 25031da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 25041da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 25051da177e4SLinus Torvalds struct sk_buff *skb; 25061da177e4SLinus Torvalds 2507fe067e8aSDavid S. Miller if ((skb = tcp_send_head(sk)) != NULL && 25081da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { 25091da177e4SLinus Torvalds int err; 25101da177e4SLinus Torvalds unsigned int mss = tcp_current_mss(sk, 0); 25111da177e4SLinus Torvalds unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; 25121da177e4SLinus Torvalds 25131da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 25141da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 25151da177e4SLinus Torvalds 25161da177e4SLinus Torvalds /* We are probing the opening of a window 25171da177e4SLinus Torvalds * but the window size is != 0 25181da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 25191da177e4SLinus Torvalds */ 25201da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 25211da177e4SLinus Torvalds skb->len > mss) { 25221da177e4SLinus Torvalds seg_size = min(seg_size, mss); 25231da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2524846998aeSDavid S. Miller if (tcp_fragment(sk, skb, seg_size, mss)) 25251da177e4SLinus Torvalds return -1; 25261da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 2527846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss); 25281da177e4SLinus Torvalds 25291da177e4SLinus Torvalds TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 25301da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2531dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 25321da177e4SLinus Torvalds if (!err) { 25339e412ba7SIlpo Järvinen update_send_head(sk, skb); 25341da177e4SLinus Torvalds } 25351da177e4SLinus Torvalds return err; 25361da177e4SLinus Torvalds } else { 25371da177e4SLinus Torvalds if (tp->urg_mode && 25381da177e4SLinus Torvalds between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 25391da177e4SLinus Torvalds tcp_xmit_probe_skb(sk, TCPCB_URG); 25401da177e4SLinus Torvalds return tcp_xmit_probe_skb(sk, 0); 25411da177e4SLinus Torvalds } 25421da177e4SLinus Torvalds } 25431da177e4SLinus Torvalds return -1; 25441da177e4SLinus Torvalds } 25451da177e4SLinus Torvalds 25461da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 25471da177e4SLinus Torvalds * a partial packet else a zero probe. 25481da177e4SLinus Torvalds */ 25491da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 25501da177e4SLinus Torvalds { 2551463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 25521da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 25531da177e4SLinus Torvalds int err; 25541da177e4SLinus Torvalds 25551da177e4SLinus Torvalds err = tcp_write_wakeup(sk); 25561da177e4SLinus Torvalds 2557fe067e8aSDavid S. Miller if (tp->packets_out || !tcp_send_head(sk)) { 25581da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 25596687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 2560463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 25611da177e4SLinus Torvalds return; 25621da177e4SLinus Torvalds } 25631da177e4SLinus Torvalds 25641da177e4SLinus Torvalds if (err <= 0) { 2565463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_backoff < sysctl_tcp_retries2) 2566463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 25676687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out++; 2568463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 25693f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 25703f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 25711da177e4SLinus Torvalds } else { 25721da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 25736687e988SArnaldo Carvalho de Melo * do not backoff and do not remember icsk_probes_out. 25741da177e4SLinus Torvalds * Let local senders to fight for local resources. 25751da177e4SLinus Torvalds * 25761da177e4SLinus Torvalds * Use accumulated backoff yet. 25771da177e4SLinus Torvalds */ 25786687e988SArnaldo Carvalho de Melo if (!icsk->icsk_probes_out) 25796687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 1; 2580463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2581463c84b9SArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, 25823f421baaSArnaldo Carvalho de Melo TCP_RESOURCE_PROBE_INTERVAL), 25833f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 25841da177e4SLinus Torvalds } 25851da177e4SLinus Torvalds } 25861da177e4SLinus Torvalds 25871da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_connect); 25881da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_make_synack); 25891da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_simple_retransmit); 25901da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sync_mss); 2591f4805edeSStephen Hemminger EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor); 25925d424d5aSJohn Heffner EXPORT_SYMBOL(tcp_mtup_init); 2593