11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 802c30a84SJesper Juhl * Authors: Ross Biro 91da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 101da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 111da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 121da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 131da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 141da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 151da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 161da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 171da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 181da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 191da177e4SLinus Torvalds */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds /* 221da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 231da177e4SLinus Torvalds * : Fragmentation on mtu decrease 241da177e4SLinus Torvalds * : Segment collapse on retransmit 251da177e4SLinus Torvalds * : AF independence 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 281da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 291da177e4SLinus Torvalds * during syn/ack processing. 301da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 311da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 321da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 331da177e4SLinus Torvalds * J Hadi Salim : ECN support 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt 3891df42beSJoe Perches 391da177e4SLinus Torvalds #include <net/tcp.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/compiler.h> 425a0e3ad6STejun Heo #include <linux/gfp.h> 431da177e4SLinus Torvalds #include <linux/module.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */ 46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1; 471da177e4SLinus Torvalds 4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that 4915d99e02SRick Jones * interpret the window field as a signed quantity. 5015d99e02SRick Jones */ 51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 5215d99e02SRick Jones 5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */ 5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072; 5546d3ceabSEric Dumazet 561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we 571da177e4SLinus Torvalds * will allow a single TSO frame to consume. Building TSO frames 581da177e4SLinus Torvalds * which are too large can cause TCP streams to be bursty. 591da177e4SLinus Torvalds */ 60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3; 611da177e4SLinus Torvalds 62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0; 6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; 645d424d5aSJohn Heffner 6535089bb2SDavid S. Miller /* By default, RFC2861 behavior. */ 66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 6735089bb2SDavid S. Miller 68c9bee3b7SEric Dumazet unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX; 69c9bee3b7SEric Dumazet EXPORT_SYMBOL(sysctl_tcp_notsent_lowat); 70c9bee3b7SEric Dumazet 7146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 7246d3ceabSEric Dumazet int push_one, gfp_t gfp); 73519855c5SWilliam Allen Simpson 7467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */ 75cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 766ff03ac3SIlpo Järvinen { 776ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 786ff03ac3SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 7966f5fe62SIlpo Järvinen unsigned int prior_packets = tp->packets_out; 809e412ba7SIlpo Järvinen 81fe067e8aSDavid S. Miller tcp_advance_send_head(sk, skb); 821da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 838512430eSIlpo Järvinen 8466f5fe62SIlpo Järvinen tp->packets_out += tcp_skb_pcount(skb); 856ba8a3b1SNandita Dukkipati if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 866a5dc9e5SEric Dumazet icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 87750ea2baSYuchung Cheng tcp_rearm_rto(sk); 881da177e4SLinus Torvalds } 896a5dc9e5SEric Dumazet } 901da177e4SLinus Torvalds 911da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk. 921da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 931da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 941da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 951da177e4SLinus Torvalds * invalid. OK, let's make this for now: 961da177e4SLinus Torvalds */ 97cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk) 981da177e4SLinus Torvalds { 99cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1009e412ba7SIlpo Järvinen 10190840defSIlpo Järvinen if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 1021da177e4SLinus Torvalds return tp->snd_nxt; 1031da177e4SLinus Torvalds else 10490840defSIlpo Järvinen return tcp_wnd_end(tp); 1051da177e4SLinus Torvalds } 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 1081da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 1091da177e4SLinus Torvalds * 1101da177e4SLinus Torvalds * 1. It is independent of path mtu. 1111da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 1121da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 1131da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 1141da177e4SLinus Torvalds * large MSS. 1151da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 1161da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 1171da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1181da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1191da177e4SLinus Torvalds * probably even Jumbo". 1201da177e4SLinus Torvalds */ 1211da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1221da177e4SLinus Torvalds { 1231da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 124cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1251da177e4SLinus Torvalds int mss = tp->advmss; 1261da177e4SLinus Torvalds 1270dbaee3bSDavid S. Miller if (dst) { 1280dbaee3bSDavid S. Miller unsigned int metric = dst_metric_advmss(dst); 1290dbaee3bSDavid S. Miller 1300dbaee3bSDavid S. Miller if (metric < mss) { 1310dbaee3bSDavid S. Miller mss = metric; 1321da177e4SLinus Torvalds tp->advmss = mss; 1331da177e4SLinus Torvalds } 1340dbaee3bSDavid S. Miller } 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds return (__u16)mss; 1371da177e4SLinus Torvalds } 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1401da177e4SLinus Torvalds * This is the first part of cwnd validation mechanism. */ 141cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) 1421da177e4SLinus Torvalds { 143463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1441da177e4SLinus Torvalds s32 delta = tcp_time_stamp - tp->lsndtime; 1451da177e4SLinus Torvalds u32 restart_cwnd = tcp_init_cwnd(tp, dst); 1461da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1471da177e4SLinus Torvalds 1486687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1491da177e4SLinus Torvalds 1506687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1511da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1521da177e4SLinus Torvalds 153463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1541da177e4SLinus Torvalds cwnd >>= 1; 1551da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 1561da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 1571da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1581da177e4SLinus Torvalds } 1591da177e4SLinus Torvalds 16067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */ 16140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 162cf533ea5SEric Dumazet struct sock *sk) 1631da177e4SLinus Torvalds { 164463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 165463c84b9SArnaldo Carvalho de Melo const u32 now = tcp_time_stamp; 166bcefe17cSCong Wang const struct dst_entry *dst = __sk_dst_get(sk); 1671da177e4SLinus Torvalds 16835089bb2SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 16935089bb2SDavid S. Miller (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 170463c84b9SArnaldo Carvalho de Melo tcp_cwnd_restart(sk, __sk_dst_get(sk)); 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds tp->lsndtime = now; 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds /* If it is a reply for ato after last received 1751da177e4SLinus Torvalds * packet, enter pingpong mode. 1761da177e4SLinus Torvalds */ 177bcefe17cSCong Wang if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato && 178bcefe17cSCong Wang (!dst || !dst_metric(dst, RTAX_QUICKACK))) 179463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 1; 1801da177e4SLinus Torvalds } 1811da177e4SLinus Torvalds 18267edfef7SAndi Kleen /* Account for an ACK we sent. */ 18340efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 1841da177e4SLinus Torvalds { 185463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 186463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds 18985f16525SYuchung Cheng 19085f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss) 19185f16525SYuchung Cheng { 19285f16525SYuchung Cheng /* Initial receive window should be twice of TCP_INIT_CWND to 1939ef71e0cSWeiping Pan * enable proper sending of new unsent data during fast recovery 19485f16525SYuchung Cheng * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a 19585f16525SYuchung Cheng * limit when mss is larger than 1460. 19685f16525SYuchung Cheng */ 19785f16525SYuchung Cheng u32 init_rwnd = TCP_INIT_CWND * 2; 19885f16525SYuchung Cheng 19985f16525SYuchung Cheng if (mss > 1460) 20085f16525SYuchung Cheng init_rwnd = max((1460 * init_rwnd) / mss, 2U); 20185f16525SYuchung Cheng return init_rwnd; 20285f16525SYuchung Cheng } 20385f16525SYuchung Cheng 2041da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 2051da177e4SLinus Torvalds * Based on the assumption that the given amount of space 2061da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 2071da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 2081da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 2091da177e4SLinus Torvalds * This MUST be enforced by all callers. 2101da177e4SLinus Torvalds */ 2111da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss, 2121da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 21331d12926Slaurent chavey int wscale_ok, __u8 *rcv_wscale, 21431d12926Slaurent chavey __u32 init_rcv_wnd) 2151da177e4SLinus Torvalds { 2161da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 2171da177e4SLinus Torvalds 2181da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 2191da177e4SLinus Torvalds if (*window_clamp == 0) 2201da177e4SLinus Torvalds (*window_clamp) = (65535 << 14); 2211da177e4SLinus Torvalds space = min(*window_clamp, space); 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 2241da177e4SLinus Torvalds if (space > mss) 2251da177e4SLinus Torvalds space = (space / mss) * mss; 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 22815d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 22915d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 23015d99e02SRick Jones * we will truncate our initial window offering to 32K-1 23115d99e02SRick Jones * unless the remote has sent us a window scaling option, 23215d99e02SRick Jones * which we interpret as a sign the remote TCP is not 23315d99e02SRick Jones * misinterpreting the window field as a signed quantity. 2341da177e4SLinus Torvalds */ 23515d99e02SRick Jones if (sysctl_tcp_workaround_signed_windows) 2361da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 23715d99e02SRick Jones else 23815d99e02SRick Jones (*rcv_wnd) = space; 23915d99e02SRick Jones 2401da177e4SLinus Torvalds (*rcv_wscale) = 0; 2411da177e4SLinus Torvalds if (wscale_ok) { 2421da177e4SLinus Torvalds /* Set window scaling on max possible window 2431da177e4SLinus Torvalds * See RFC1323 for an explanation of the limit to 14 2441da177e4SLinus Torvalds */ 2451da177e4SLinus Torvalds space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 246316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 2471da177e4SLinus Torvalds while (space > 65535 && (*rcv_wscale) < 14) { 2481da177e4SLinus Torvalds space >>= 1; 2491da177e4SLinus Torvalds (*rcv_wscale)++; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds if (mss > (1 << *rcv_wscale)) { 25485f16525SYuchung Cheng if (!init_rcv_wnd) /* Use default unless specified otherwise */ 25585f16525SYuchung Cheng init_rcv_wnd = tcp_default_init_rwnd(mss); 256b1afde60SNandita Dukkipati *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 2571da177e4SLinus Torvalds } 2581da177e4SLinus Torvalds 2591da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 2601da177e4SLinus Torvalds (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 2611da177e4SLinus Torvalds } 2624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window); 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2651da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2661da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2671da177e4SLinus Torvalds * frame. 2681da177e4SLinus Torvalds */ 26940efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2701da177e4SLinus Torvalds { 2711da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2728e165e20SFlorian Westphal u32 old_win = tp->rcv_wnd; 2731da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2741da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2751da177e4SLinus Torvalds 2761da177e4SLinus Torvalds /* Never shrink the offered window */ 2771da177e4SLinus Torvalds if (new_win < cur_win) { 2781da177e4SLinus Torvalds /* Danger Will Robinson! 2791da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2801da177e4SLinus Torvalds * we will not be able to advertise a zero 2811da177e4SLinus Torvalds * window in time. --DaveM 2821da177e4SLinus Torvalds * 2831da177e4SLinus Torvalds * Relax Will Robinson. 2841da177e4SLinus Torvalds */ 2858e165e20SFlorian Westphal if (new_win == 0) 2868e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), 2878e165e20SFlorian Westphal LINUX_MIB_TCPWANTZEROWINDOWADV); 288607bfbf2SPatrick McHardy new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 2891da177e4SLinus Torvalds } 2901da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2911da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2921da177e4SLinus Torvalds 2931da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2941da177e4SLinus Torvalds * scaled window. 2951da177e4SLinus Torvalds */ 29615d99e02SRick Jones if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 2971da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2981da177e4SLinus Torvalds else 2991da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 3001da177e4SLinus Torvalds 3011da177e4SLinus Torvalds /* RFC1323 scaling applied */ 3021da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 3031da177e4SLinus Torvalds 3041da177e4SLinus Torvalds /* If we advertise zero window, disable fast path. */ 3058e165e20SFlorian Westphal if (new_win == 0) { 3061da177e4SLinus Torvalds tp->pred_flags = 0; 3078e165e20SFlorian Westphal if (old_win) 3088e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), 3098e165e20SFlorian Westphal LINUX_MIB_TCPTOZEROWINDOWADV); 3108e165e20SFlorian Westphal } else if (old_win == 0) { 3118e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); 3128e165e20SFlorian Westphal } 3131da177e4SLinus Torvalds 3141da177e4SLinus Torvalds return new_win; 3151da177e4SLinus Torvalds } 3161da177e4SLinus Torvalds 31767edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */ 318cf533ea5SEric Dumazet static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 319bdf1ee5dSIlpo Järvinen { 3204de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 321bdf1ee5dSIlpo Järvinen if (!(tp->ecn_flags & TCP_ECN_OK)) 3224de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 323bdf1ee5dSIlpo Järvinen } 324bdf1ee5dSIlpo Järvinen 32567edfef7SAndi Kleen /* Packet ECN state for a SYN. */ 326bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 327bdf1ee5dSIlpo Järvinen { 328bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 329bdf1ee5dSIlpo Järvinen 330bdf1ee5dSIlpo Järvinen tp->ecn_flags = 0; 3315d134f1cSHannes Frederic Sowa if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { 3324de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 333bdf1ee5dSIlpo Järvinen tp->ecn_flags = TCP_ECN_OK; 334bdf1ee5dSIlpo Järvinen } 335bdf1ee5dSIlpo Järvinen } 336bdf1ee5dSIlpo Järvinen 337bdf1ee5dSIlpo Järvinen static __inline__ void 338cf533ea5SEric Dumazet TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 339bdf1ee5dSIlpo Järvinen { 340bdf1ee5dSIlpo Järvinen if (inet_rsk(req)->ecn_ok) 341bdf1ee5dSIlpo Järvinen th->ece = 1; 342bdf1ee5dSIlpo Järvinen } 343bdf1ee5dSIlpo Järvinen 34467edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 34567edfef7SAndi Kleen * be sent. 34667edfef7SAndi Kleen */ 347bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 348bdf1ee5dSIlpo Järvinen int tcp_header_len) 349bdf1ee5dSIlpo Järvinen { 350bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 351bdf1ee5dSIlpo Järvinen 352bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 353bdf1ee5dSIlpo Järvinen /* Not-retransmitted data segment: set ECT and inject CWR. */ 354bdf1ee5dSIlpo Järvinen if (skb->len != tcp_header_len && 355bdf1ee5dSIlpo Järvinen !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 356bdf1ee5dSIlpo Järvinen INET_ECN_xmit(sk); 357bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 358bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 359bdf1ee5dSIlpo Järvinen tcp_hdr(skb)->cwr = 1; 360bdf1ee5dSIlpo Järvinen skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 361bdf1ee5dSIlpo Järvinen } 362bdf1ee5dSIlpo Järvinen } else { 363bdf1ee5dSIlpo Järvinen /* ACK or retransmitted segment: clear ECT|CE */ 364bdf1ee5dSIlpo Järvinen INET_ECN_dontxmit(sk); 365bdf1ee5dSIlpo Järvinen } 366bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 367bdf1ee5dSIlpo Järvinen tcp_hdr(skb)->ece = 1; 368bdf1ee5dSIlpo Järvinen } 369bdf1ee5dSIlpo Järvinen } 370bdf1ee5dSIlpo Järvinen 371e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present, 372e870a8efSIlpo Järvinen * auto increment end seqno. 373e870a8efSIlpo Järvinen */ 374e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 375e870a8efSIlpo Järvinen { 3767b7fc97aSEric Dumazet struct skb_shared_info *shinfo = skb_shinfo(skb); 3777b7fc97aSEric Dumazet 3782e8e18efSDavid S. Miller skb->ip_summed = CHECKSUM_PARTIAL; 379e870a8efSIlpo Järvinen skb->csum = 0; 380e870a8efSIlpo Järvinen 3814de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags; 382e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->sacked = 0; 383e870a8efSIlpo Järvinen 3847b7fc97aSEric Dumazet shinfo->gso_segs = 1; 3857b7fc97aSEric Dumazet shinfo->gso_size = 0; 3867b7fc97aSEric Dumazet shinfo->gso_type = 0; 387e870a8efSIlpo Järvinen 388e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->seq = seq; 389a3433f35SChangli Gao if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 390e870a8efSIlpo Järvinen seq++; 391e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->end_seq = seq; 392e870a8efSIlpo Järvinen } 393e870a8efSIlpo Järvinen 394a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp) 39533f5f57eSIlpo Järvinen { 39633f5f57eSIlpo Järvinen return tp->snd_una != tp->snd_up; 39733f5f57eSIlpo Järvinen } 39833f5f57eSIlpo Järvinen 39933ad798cSAdam Langley #define OPTION_SACK_ADVERTISE (1 << 0) 40033ad798cSAdam Langley #define OPTION_TS (1 << 1) 40133ad798cSAdam Langley #define OPTION_MD5 (1 << 2) 40289e95a61SOri Finkelman #define OPTION_WSCALE (1 << 3) 4032100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE (1 << 8) 40433ad798cSAdam Langley 40533ad798cSAdam Langley struct tcp_out_options { 4062100c8d2SYuchung Cheng u16 options; /* bit field of OPTION_* */ 4072100c8d2SYuchung Cheng u16 mss; /* 0 to disable */ 40833ad798cSAdam Langley u8 ws; /* window scale, 0 to disable */ 40933ad798cSAdam Langley u8 num_sack_blocks; /* number of SACK blocks to include */ 410bd0388aeSWilliam Allen Simpson u8 hash_size; /* bytes in hash_location */ 411bd0388aeSWilliam Allen Simpson __u8 *hash_location; /* temporary pointer, overloaded */ 4122100c8d2SYuchung Cheng __u32 tsval, tsecr; /* need to include OPTION_TS */ 4132100c8d2SYuchung Cheng struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 41433ad798cSAdam Langley }; 41533ad798cSAdam Langley 41667edfef7SAndi Kleen /* Write previously computed TCP options to the packet. 41767edfef7SAndi Kleen * 41867edfef7SAndi Kleen * Beware: Something in the Internet is very sensitive to the ordering of 419fd6149d3SIlpo Järvinen * TCP options, we learned this through the hard way, so be careful here. 420fd6149d3SIlpo Järvinen * Luckily we can at least blame others for their non-compliance but from 4218e3bff96Sstephen hemminger * inter-operability perspective it seems that we're somewhat stuck with 422fd6149d3SIlpo Järvinen * the ordering which we have been using if we want to keep working with 423fd6149d3SIlpo Järvinen * those broken things (not that it currently hurts anybody as there isn't 424fd6149d3SIlpo Järvinen * particular reason why the ordering would need to be changed). 425fd6149d3SIlpo Järvinen * 426fd6149d3SIlpo Järvinen * At least SACK_PERM as the first option is known to lead to a disaster 427fd6149d3SIlpo Järvinen * (but it may well be that other scenarios fail similarly). 428fd6149d3SIlpo Järvinen */ 42933ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 430bd0388aeSWilliam Allen Simpson struct tcp_out_options *opts) 431bd0388aeSWilliam Allen Simpson { 4322100c8d2SYuchung Cheng u16 options = opts->options; /* mungable copy */ 433bd0388aeSWilliam Allen Simpson 434bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_MD5 & options)) { 4351a2c6181SChristoph Paasch *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 4361a2c6181SChristoph Paasch (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 437bd0388aeSWilliam Allen Simpson /* overload cookie hash location */ 438bd0388aeSWilliam Allen Simpson opts->hash_location = (__u8 *)ptr; 43933ad798cSAdam Langley ptr += 4; 44033ad798cSAdam Langley } 44133ad798cSAdam Langley 442fd6149d3SIlpo Järvinen if (unlikely(opts->mss)) { 443fd6149d3SIlpo Järvinen *ptr++ = htonl((TCPOPT_MSS << 24) | 444fd6149d3SIlpo Järvinen (TCPOLEN_MSS << 16) | 445fd6149d3SIlpo Järvinen opts->mss); 446fd6149d3SIlpo Järvinen } 447fd6149d3SIlpo Järvinen 448bd0388aeSWilliam Allen Simpson if (likely(OPTION_TS & options)) { 449bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 45033ad798cSAdam Langley *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 45133ad798cSAdam Langley (TCPOLEN_SACK_PERM << 16) | 45233ad798cSAdam Langley (TCPOPT_TIMESTAMP << 8) | 45333ad798cSAdam Langley TCPOLEN_TIMESTAMP); 454bd0388aeSWilliam Allen Simpson options &= ~OPTION_SACK_ADVERTISE; 45533ad798cSAdam Langley } else { 456496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 45740efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 45840efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 45940efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 46040efc6faSStephen Hemminger } 46133ad798cSAdam Langley *ptr++ = htonl(opts->tsval); 46233ad798cSAdam Langley *ptr++ = htonl(opts->tsecr); 46333ad798cSAdam Langley } 46433ad798cSAdam Langley 465bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 46633ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 46733ad798cSAdam Langley (TCPOPT_NOP << 16) | 46833ad798cSAdam Langley (TCPOPT_SACK_PERM << 8) | 46933ad798cSAdam Langley TCPOLEN_SACK_PERM); 47033ad798cSAdam Langley } 47133ad798cSAdam Langley 472bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_WSCALE & options)) { 47333ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 47433ad798cSAdam Langley (TCPOPT_WINDOW << 16) | 47533ad798cSAdam Langley (TCPOLEN_WINDOW << 8) | 47633ad798cSAdam Langley opts->ws); 47733ad798cSAdam Langley } 47833ad798cSAdam Langley 47933ad798cSAdam Langley if (unlikely(opts->num_sack_blocks)) { 48033ad798cSAdam Langley struct tcp_sack_block *sp = tp->rx_opt.dsack ? 48133ad798cSAdam Langley tp->duplicate_sack : tp->selective_acks; 48240efc6faSStephen Hemminger int this_sack; 48340efc6faSStephen Hemminger 48440efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 48540efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 48640efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 48733ad798cSAdam Langley (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 48840efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 4892de979bdSStephen Hemminger 49033ad798cSAdam Langley for (this_sack = 0; this_sack < opts->num_sack_blocks; 49133ad798cSAdam Langley ++this_sack) { 49240efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 49340efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 49440efc6faSStephen Hemminger } 4952de979bdSStephen Hemminger 49640efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 49740efc6faSStephen Hemminger } 4982100c8d2SYuchung Cheng 4992100c8d2SYuchung Cheng if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 5002100c8d2SYuchung Cheng struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 5012100c8d2SYuchung Cheng 5022100c8d2SYuchung Cheng *ptr++ = htonl((TCPOPT_EXP << 24) | 5032100c8d2SYuchung Cheng ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | 5042100c8d2SYuchung Cheng TCPOPT_FASTOPEN_MAGIC); 5052100c8d2SYuchung Cheng 5062100c8d2SYuchung Cheng memcpy(ptr, foc->val, foc->len); 5072100c8d2SYuchung Cheng if ((foc->len & 3) == 2) { 5082100c8d2SYuchung Cheng u8 *align = ((u8 *)ptr) + foc->len; 5092100c8d2SYuchung Cheng align[0] = align[1] = TCPOPT_NOP; 5102100c8d2SYuchung Cheng } 5112100c8d2SYuchung Cheng ptr += (foc->len + 3) >> 2; 5122100c8d2SYuchung Cheng } 51340efc6faSStephen Hemminger } 51440efc6faSStephen Hemminger 51567edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final 51667edfef7SAndi Kleen * network wire format yet. 51767edfef7SAndi Kleen */ 51895c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 51933ad798cSAdam Langley struct tcp_out_options *opts, 520cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 521cf533ea5SEric Dumazet { 52233ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 52395c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 524783237e8SYuchung Cheng struct tcp_fastopen_request *fastopen = tp->fastopen_req; 52533ad798cSAdam Langley 526cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 52733ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 52833ad798cSAdam Langley if (*md5) { 52933ad798cSAdam Langley opts->options |= OPTION_MD5; 530bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 531cfb6eeb4SYOSHIFUJI Hideaki } 53233ad798cSAdam Langley #else 53333ad798cSAdam Langley *md5 = NULL; 534cfb6eeb4SYOSHIFUJI Hideaki #endif 53533ad798cSAdam Langley 53633ad798cSAdam Langley /* We always get an MSS option. The option bytes which will be seen in 53733ad798cSAdam Langley * normal data packets should timestamps be used, must be in the MSS 53833ad798cSAdam Langley * advertised. But we subtract them from tp->mss_cache so that 53933ad798cSAdam Langley * calculations in tcp_sendmsg are simpler etc. So account for this 54033ad798cSAdam Langley * fact here if necessary. If we don't do this correctly, as a 54133ad798cSAdam Langley * receiver we won't recognize data packets as being full sized when we 54233ad798cSAdam Langley * should, and thus we won't abide by the delayed ACK rules correctly. 54333ad798cSAdam Langley * SACKs don't matter, we never delay an ACK when we have any of those 54433ad798cSAdam Langley * going out. */ 54533ad798cSAdam Langley opts->mss = tcp_advertise_mss(sk); 546bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 54733ad798cSAdam Langley 548bb5b7c11SDavid S. Miller if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 54933ad798cSAdam Langley opts->options |= OPTION_TS; 550ee684b6fSAndrey Vagin opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; 55133ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 552bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 55333ad798cSAdam Langley } 554bb5b7c11SDavid S. Miller if (likely(sysctl_tcp_window_scaling)) { 55533ad798cSAdam Langley opts->ws = tp->rx_opt.rcv_wscale; 55689e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 557bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 55833ad798cSAdam Langley } 559bb5b7c11SDavid S. Miller if (likely(sysctl_tcp_sack)) { 56033ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 561b32d1310SDavid S. Miller if (unlikely(!(OPTION_TS & opts->options))) 562bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 56333ad798cSAdam Langley } 56433ad798cSAdam Langley 565783237e8SYuchung Cheng if (fastopen && fastopen->cookie.len >= 0) { 566783237e8SYuchung Cheng u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; 567783237e8SYuchung Cheng need = (need + 3) & ~3U; /* Align to 32 bits */ 568783237e8SYuchung Cheng if (remaining >= need) { 569783237e8SYuchung Cheng opts->options |= OPTION_FAST_OPEN_COOKIE; 570783237e8SYuchung Cheng opts->fastopen_cookie = &fastopen->cookie; 571783237e8SYuchung Cheng remaining -= need; 572783237e8SYuchung Cheng tp->syn_fastopen = 1; 573783237e8SYuchung Cheng } 574783237e8SYuchung Cheng } 575bd0388aeSWilliam Allen Simpson 576bd0388aeSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 57733ad798cSAdam Langley } 57833ad798cSAdam Langley 57967edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */ 58095c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk, 58133ad798cSAdam Langley struct request_sock *req, 58295c96174SEric Dumazet unsigned int mss, struct sk_buff *skb, 58333ad798cSAdam Langley struct tcp_out_options *opts, 5844957faadSWilliam Allen Simpson struct tcp_md5sig_key **md5, 5858336886fSJerry Chu struct tcp_fastopen_cookie *foc) 5864957faadSWilliam Allen Simpson { 58733ad798cSAdam Langley struct inet_request_sock *ireq = inet_rsk(req); 58895c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 58933ad798cSAdam Langley 59033ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 59133ad798cSAdam Langley *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 59233ad798cSAdam Langley if (*md5) { 59333ad798cSAdam Langley opts->options |= OPTION_MD5; 5944957faadSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 5954957faadSWilliam Allen Simpson 5964957faadSWilliam Allen Simpson /* We can't fit any SACK blocks in a packet with MD5 + TS 5974957faadSWilliam Allen Simpson * options. There was discussion about disabling SACK 5984957faadSWilliam Allen Simpson * rather than TS in order to fit in better with old, 5994957faadSWilliam Allen Simpson * buggy kernels, but that was deemed to be unnecessary. 6004957faadSWilliam Allen Simpson */ 601de213e5eSEric Dumazet ireq->tstamp_ok &= !ireq->sack_ok; 60233ad798cSAdam Langley } 60333ad798cSAdam Langley #else 60433ad798cSAdam Langley *md5 = NULL; 60533ad798cSAdam Langley #endif 60633ad798cSAdam Langley 6074957faadSWilliam Allen Simpson /* We always send an MSS option. */ 60833ad798cSAdam Langley opts->mss = mss; 6094957faadSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 61033ad798cSAdam Langley 61133ad798cSAdam Langley if (likely(ireq->wscale_ok)) { 61233ad798cSAdam Langley opts->ws = ireq->rcv_wscale; 61389e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 6144957faadSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 61533ad798cSAdam Langley } 616de213e5eSEric Dumazet if (likely(ireq->tstamp_ok)) { 61733ad798cSAdam Langley opts->options |= OPTION_TS; 61833ad798cSAdam Langley opts->tsval = TCP_SKB_CB(skb)->when; 61933ad798cSAdam Langley opts->tsecr = req->ts_recent; 6204957faadSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 62133ad798cSAdam Langley } 62233ad798cSAdam Langley if (likely(ireq->sack_ok)) { 62333ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 624de213e5eSEric Dumazet if (unlikely(!ireq->tstamp_ok)) 6254957faadSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 62633ad798cSAdam Langley } 6278336886fSJerry Chu if (foc != NULL) { 6288336886fSJerry Chu u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 6298336886fSJerry Chu need = (need + 3) & ~3U; /* Align to 32 bits */ 6308336886fSJerry Chu if (remaining >= need) { 6318336886fSJerry Chu opts->options |= OPTION_FAST_OPEN_COOKIE; 6328336886fSJerry Chu opts->fastopen_cookie = foc; 6338336886fSJerry Chu remaining -= need; 6348336886fSJerry Chu } 6358336886fSJerry Chu } 6364957faadSWilliam Allen Simpson 6374957faadSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 63833ad798cSAdam Langley } 63933ad798cSAdam Langley 64067edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the 64167edfef7SAndi Kleen * final wire format yet. 64267edfef7SAndi Kleen */ 64395c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 64433ad798cSAdam Langley struct tcp_out_options *opts, 645cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 646cf533ea5SEric Dumazet { 64733ad798cSAdam Langley struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 64833ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 64995c96174SEric Dumazet unsigned int size = 0; 650cabeccbdSIlpo Järvinen unsigned int eff_sacks; 65133ad798cSAdam Langley 6525843ef42SAndi Kleen opts->options = 0; 6535843ef42SAndi Kleen 65433ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 65533ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 65633ad798cSAdam Langley if (unlikely(*md5)) { 65733ad798cSAdam Langley opts->options |= OPTION_MD5; 65833ad798cSAdam Langley size += TCPOLEN_MD5SIG_ALIGNED; 65933ad798cSAdam Langley } 66033ad798cSAdam Langley #else 66133ad798cSAdam Langley *md5 = NULL; 66233ad798cSAdam Langley #endif 66333ad798cSAdam Langley 66433ad798cSAdam Langley if (likely(tp->rx_opt.tstamp_ok)) { 66533ad798cSAdam Langley opts->options |= OPTION_TS; 666ee684b6fSAndrey Vagin opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; 66733ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 66833ad798cSAdam Langley size += TCPOLEN_TSTAMP_ALIGNED; 66933ad798cSAdam Langley } 67033ad798cSAdam Langley 671cabeccbdSIlpo Järvinen eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 672cabeccbdSIlpo Järvinen if (unlikely(eff_sacks)) { 67395c96174SEric Dumazet const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 67433ad798cSAdam Langley opts->num_sack_blocks = 67595c96174SEric Dumazet min_t(unsigned int, eff_sacks, 67633ad798cSAdam Langley (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 67733ad798cSAdam Langley TCPOLEN_SACK_PERBLOCK); 67833ad798cSAdam Langley size += TCPOLEN_SACK_BASE_ALIGNED + 67933ad798cSAdam Langley opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 68033ad798cSAdam Langley } 68133ad798cSAdam Langley 68233ad798cSAdam Langley return size; 68340efc6faSStephen Hemminger } 6841da177e4SLinus Torvalds 68546d3ceabSEric Dumazet 68646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ) 68746d3ceabSEric Dumazet * 68846d3ceabSEric Dumazet * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 68946d3ceabSEric Dumazet * to reduce RTT and bufferbloat. 69046d3ceabSEric Dumazet * We do this using a special skb destructor (tcp_wfree). 69146d3ceabSEric Dumazet * 69246d3ceabSEric Dumazet * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 69346d3ceabSEric Dumazet * needs to be reallocated in a driver. 6948e3bff96Sstephen hemminger * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 69546d3ceabSEric Dumazet * 69646d3ceabSEric Dumazet * Since transmit from skb destructor is forbidden, we use a tasklet 69746d3ceabSEric Dumazet * to process all sockets that eventually need to send more skbs. 69846d3ceabSEric Dumazet * We use one tasklet per cpu, with its own queue of sockets. 69946d3ceabSEric Dumazet */ 70046d3ceabSEric Dumazet struct tsq_tasklet { 70146d3ceabSEric Dumazet struct tasklet_struct tasklet; 70246d3ceabSEric Dumazet struct list_head head; /* queue of tcp sockets */ 70346d3ceabSEric Dumazet }; 70446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 70546d3ceabSEric Dumazet 7066f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk) 7076f458dfbSEric Dumazet { 7086f458dfbSEric Dumazet if ((1 << sk->sk_state) & 7096f458dfbSEric Dumazet (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 7106f458dfbSEric Dumazet TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 711bf06200eSJohn Ogness tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, 712bf06200eSJohn Ogness 0, GFP_ATOMIC); 7136f458dfbSEric Dumazet } 71446d3ceabSEric Dumazet /* 7158e3bff96Sstephen hemminger * One tasklet per cpu tries to send more skbs. 71646d3ceabSEric Dumazet * We run in tasklet context but need to disable irqs when 7178e3bff96Sstephen hemminger * transferring tsq->head because tcp_wfree() might 71846d3ceabSEric Dumazet * interrupt us (non NAPI drivers) 71946d3ceabSEric Dumazet */ 72046d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data) 72146d3ceabSEric Dumazet { 72246d3ceabSEric Dumazet struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 72346d3ceabSEric Dumazet LIST_HEAD(list); 72446d3ceabSEric Dumazet unsigned long flags; 72546d3ceabSEric Dumazet struct list_head *q, *n; 72646d3ceabSEric Dumazet struct tcp_sock *tp; 72746d3ceabSEric Dumazet struct sock *sk; 72846d3ceabSEric Dumazet 72946d3ceabSEric Dumazet local_irq_save(flags); 73046d3ceabSEric Dumazet list_splice_init(&tsq->head, &list); 73146d3ceabSEric Dumazet local_irq_restore(flags); 73246d3ceabSEric Dumazet 73346d3ceabSEric Dumazet list_for_each_safe(q, n, &list) { 73446d3ceabSEric Dumazet tp = list_entry(q, struct tcp_sock, tsq_node); 73546d3ceabSEric Dumazet list_del(&tp->tsq_node); 73646d3ceabSEric Dumazet 73746d3ceabSEric Dumazet sk = (struct sock *)tp; 73846d3ceabSEric Dumazet bh_lock_sock(sk); 73946d3ceabSEric Dumazet 74046d3ceabSEric Dumazet if (!sock_owned_by_user(sk)) { 7416f458dfbSEric Dumazet tcp_tsq_handler(sk); 74246d3ceabSEric Dumazet } else { 74346d3ceabSEric Dumazet /* defer the work to tcp_release_cb() */ 7446f458dfbSEric Dumazet set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); 74546d3ceabSEric Dumazet } 74646d3ceabSEric Dumazet bh_unlock_sock(sk); 74746d3ceabSEric Dumazet 74846d3ceabSEric Dumazet clear_bit(TSQ_QUEUED, &tp->tsq_flags); 74946d3ceabSEric Dumazet sk_free(sk); 75046d3ceabSEric Dumazet } 75146d3ceabSEric Dumazet } 75246d3ceabSEric Dumazet 7536f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ 7546f458dfbSEric Dumazet (1UL << TCP_WRITE_TIMER_DEFERRED) | \ 755563d34d0SEric Dumazet (1UL << TCP_DELACK_TIMER_DEFERRED) | \ 756563d34d0SEric Dumazet (1UL << TCP_MTU_REDUCED_DEFERRED)) 75746d3ceabSEric Dumazet /** 75846d3ceabSEric Dumazet * tcp_release_cb - tcp release_sock() callback 75946d3ceabSEric Dumazet * @sk: socket 76046d3ceabSEric Dumazet * 76146d3ceabSEric Dumazet * called from release_sock() to perform protocol dependent 76246d3ceabSEric Dumazet * actions before socket release. 76346d3ceabSEric Dumazet */ 76446d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk) 76546d3ceabSEric Dumazet { 76646d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 7676f458dfbSEric Dumazet unsigned long flags, nflags; 76846d3ceabSEric Dumazet 7696f458dfbSEric Dumazet /* perform an atomic operation only if at least one flag is set */ 7706f458dfbSEric Dumazet do { 7716f458dfbSEric Dumazet flags = tp->tsq_flags; 7726f458dfbSEric Dumazet if (!(flags & TCP_DEFERRED_ALL)) 7736f458dfbSEric Dumazet return; 7746f458dfbSEric Dumazet nflags = flags & ~TCP_DEFERRED_ALL; 7756f458dfbSEric Dumazet } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); 7766f458dfbSEric Dumazet 7776f458dfbSEric Dumazet if (flags & (1UL << TCP_TSQ_DEFERRED)) 7786f458dfbSEric Dumazet tcp_tsq_handler(sk); 7796f458dfbSEric Dumazet 780144d56e9SEric Dumazet if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 7816f458dfbSEric Dumazet tcp_write_timer_handler(sk); 782144d56e9SEric Dumazet __sock_put(sk); 783144d56e9SEric Dumazet } 784144d56e9SEric Dumazet if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { 7856f458dfbSEric Dumazet tcp_delack_timer_handler(sk); 786144d56e9SEric Dumazet __sock_put(sk); 787144d56e9SEric Dumazet } 788144d56e9SEric Dumazet if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { 789563d34d0SEric Dumazet sk->sk_prot->mtu_reduced(sk); 790144d56e9SEric Dumazet __sock_put(sk); 791144d56e9SEric Dumazet } 79246d3ceabSEric Dumazet } 79346d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb); 79446d3ceabSEric Dumazet 79546d3ceabSEric Dumazet void __init tcp_tasklet_init(void) 79646d3ceabSEric Dumazet { 79746d3ceabSEric Dumazet int i; 79846d3ceabSEric Dumazet 79946d3ceabSEric Dumazet for_each_possible_cpu(i) { 80046d3ceabSEric Dumazet struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 80146d3ceabSEric Dumazet 80246d3ceabSEric Dumazet INIT_LIST_HEAD(&tsq->head); 80346d3ceabSEric Dumazet tasklet_init(&tsq->tasklet, 80446d3ceabSEric Dumazet tcp_tasklet_func, 80546d3ceabSEric Dumazet (unsigned long)tsq); 80646d3ceabSEric Dumazet } 80746d3ceabSEric Dumazet } 80846d3ceabSEric Dumazet 80946d3ceabSEric Dumazet /* 81046d3ceabSEric Dumazet * Write buffer destructor automatically called from kfree_skb. 8118e3bff96Sstephen hemminger * We can't xmit new skbs from this context, as we might already 81246d3ceabSEric Dumazet * hold qdisc lock. 81346d3ceabSEric Dumazet */ 814d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb) 81546d3ceabSEric Dumazet { 81646d3ceabSEric Dumazet struct sock *sk = skb->sk; 81746d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 81846d3ceabSEric Dumazet 81946d3ceabSEric Dumazet if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && 82046d3ceabSEric Dumazet !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { 82146d3ceabSEric Dumazet unsigned long flags; 82246d3ceabSEric Dumazet struct tsq_tasklet *tsq; 82346d3ceabSEric Dumazet 82446d3ceabSEric Dumazet /* Keep a ref on socket. 82546d3ceabSEric Dumazet * This last ref will be released in tcp_tasklet_func() 82646d3ceabSEric Dumazet */ 82746d3ceabSEric Dumazet atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); 82846d3ceabSEric Dumazet 82946d3ceabSEric Dumazet /* queue this socket to tasklet queue */ 83046d3ceabSEric Dumazet local_irq_save(flags); 83146d3ceabSEric Dumazet tsq = &__get_cpu_var(tsq_tasklet); 83246d3ceabSEric Dumazet list_add(&tp->tsq_node, &tsq->head); 83346d3ceabSEric Dumazet tasklet_schedule(&tsq->tasklet); 83446d3ceabSEric Dumazet local_irq_restore(flags); 83546d3ceabSEric Dumazet } else { 83646d3ceabSEric Dumazet sock_wfree(skb); 83746d3ceabSEric Dumazet } 83846d3ceabSEric Dumazet } 83946d3ceabSEric Dumazet 8401da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 8411da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 8421da177e4SLinus Torvalds * transmission and possible later retransmissions. 8431da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 8441da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 8451da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 8461da177e4SLinus Torvalds * device. 8471da177e4SLinus Torvalds * 8481da177e4SLinus Torvalds * We are working here with either a clone of the original 8491da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 8501da177e4SLinus Torvalds */ 851056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 852056834d9SIlpo Järvinen gfp_t gfp_mask) 8531da177e4SLinus Torvalds { 8546687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 855dfb4b9dcSDavid S. Miller struct inet_sock *inet; 856dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 857dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 85833ad798cSAdam Langley struct tcp_out_options opts; 85995c96174SEric Dumazet unsigned int tcp_options_size, tcp_header_size; 860cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 8611da177e4SLinus Torvalds struct tcphdr *th; 8621da177e4SLinus Torvalds int err; 8631da177e4SLinus Torvalds 864dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 865dfb4b9dcSDavid S. Miller 866ccdbb6e9SEric Dumazet if (clone_it) { 867ccdbb6e9SEric Dumazet const struct sk_buff *fclone = skb + 1; 868ccdbb6e9SEric Dumazet 869*740b0f18SEric Dumazet skb_mstamp_get(&skb->skb_mstamp); 870dfb4b9dcSDavid S. Miller 8710e280af0SEric Dumazet if (unlikely(skb->fclone == SKB_FCLONE_ORIG && 8720e280af0SEric Dumazet fclone->fclone == SKB_FCLONE_CLONE)) 8730e280af0SEric Dumazet NET_INC_STATS_BH(sock_net(sk), 8740e280af0SEric Dumazet LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 8750e280af0SEric Dumazet 876dfb4b9dcSDavid S. Miller if (unlikely(skb_cloned(skb))) 877dfb4b9dcSDavid S. Miller skb = pskb_copy(skb, gfp_mask); 878dfb4b9dcSDavid S. Miller else 879dfb4b9dcSDavid S. Miller skb = skb_clone(skb, gfp_mask); 880dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 881dfb4b9dcSDavid S. Miller return -ENOBUFS; 882dfb4b9dcSDavid S. Miller } 883dfb4b9dcSDavid S. Miller 884dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 885dfb4b9dcSDavid S. Miller tp = tcp_sk(sk); 886dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 88733ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 8881da177e4SLinus Torvalds 8894de075e0SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 89033ad798cSAdam Langley tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 89133ad798cSAdam Langley else 89233ad798cSAdam Langley tcp_options_size = tcp_established_options(sk, skb, &opts, 89333ad798cSAdam Langley &md5); 89433ad798cSAdam Langley tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 8951da177e4SLinus Torvalds 896547669d4SEric Dumazet if (tcp_packets_in_flight(tp) == 0) 8976687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_TX_START); 898547669d4SEric Dumazet 899547669d4SEric Dumazet /* if no packet is in qdisc/device queue, then allow XPS to select 900547669d4SEric Dumazet * another queue. 901547669d4SEric Dumazet */ 902547669d4SEric Dumazet skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; 9031da177e4SLinus Torvalds 904aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 905aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 90646d3ceabSEric Dumazet 90746d3ceabSEric Dumazet skb_orphan(skb); 90846d3ceabSEric Dumazet skb->sk = sk; 909c9eeec26SEric Dumazet skb->destructor = tcp_wfree; 91046d3ceabSEric Dumazet atomic_add(skb->truesize, &sk->sk_wmem_alloc); 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 913aa8223c7SArnaldo Carvalho de Melo th = tcp_hdr(skb); 914c720c7e8SEric Dumazet th->source = inet->inet_sport; 915c720c7e8SEric Dumazet th->dest = inet->inet_dport; 9161da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 9171da177e4SLinus Torvalds th->ack_seq = htonl(tp->rcv_nxt); 918df7a3b07SAl Viro *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 9194de075e0SEric Dumazet tcb->tcp_flags); 920dfb4b9dcSDavid S. Miller 9214de075e0SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 9221da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments 9231da177e4SLinus Torvalds * is never scaled. 9241da177e4SLinus Torvalds */ 925600ff0c2SIlpo Järvinen th->window = htons(min(tp->rcv_wnd, 65535U)); 9261da177e4SLinus Torvalds } else { 9271da177e4SLinus Torvalds th->window = htons(tcp_select_window(sk)); 9281da177e4SLinus Torvalds } 9291da177e4SLinus Torvalds th->check = 0; 9301da177e4SLinus Torvalds th->urg_ptr = 0; 9311da177e4SLinus Torvalds 93233f5f57eSIlpo Järvinen /* The urg_mode check is necessary during a below snd_una win probe */ 9337691367dSHerbert Xu if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 9347691367dSHerbert Xu if (before(tp->snd_up, tcb->seq + 0x10000)) { 9351da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up - tcb->seq); 9361da177e4SLinus Torvalds th->urg = 1; 9377691367dSHerbert Xu } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 9380eae88f3SEric Dumazet th->urg_ptr = htons(0xFFFF); 9397691367dSHerbert Xu th->urg = 1; 9407691367dSHerbert Xu } 9411da177e4SLinus Torvalds } 9421da177e4SLinus Torvalds 943bd0388aeSWilliam Allen Simpson tcp_options_write((__be32 *)(th + 1), tp, &opts); 9444de075e0SEric Dumazet if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) 9459e412ba7SIlpo Järvinen TCP_ECN_send(sk, skb, tcp_header_size); 946dfb4b9dcSDavid S. Miller 947cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 948cfb6eeb4SYOSHIFUJI Hideaki /* Calculate the MD5 hash, as we have all we need now */ 949cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 950a465419bSEric Dumazet sk_nocaps_add(sk, NETIF_F_GSO_MASK); 951bd0388aeSWilliam Allen Simpson tp->af_specific->calc_md5_hash(opts.hash_location, 95249a72dfbSAdam Langley md5, sk, NULL, skb); 953cfb6eeb4SYOSHIFUJI Hideaki } 954cfb6eeb4SYOSHIFUJI Hideaki #endif 955cfb6eeb4SYOSHIFUJI Hideaki 956bb296246SHerbert Xu icsk->icsk_af_ops->send_check(sk, skb); 9571da177e4SLinus Torvalds 9584de075e0SEric Dumazet if (likely(tcb->tcp_flags & TCPHDR_ACK)) 959fc6415bcSDavid S. Miller tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 9601da177e4SLinus Torvalds 9611da177e4SLinus Torvalds if (skb->len != tcp_header_size) 962cf533ea5SEric Dumazet tcp_event_data_sent(tp, sk); 9631da177e4SLinus Torvalds 964bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 965aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 966aa2ea058STom Herbert tcp_skb_pcount(skb)); 9671da177e4SLinus Torvalds 968d9d8da80SDavid S. Miller err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 96983de47cdSHua Zhong if (likely(err <= 0)) 9701da177e4SLinus Torvalds return err; 9711da177e4SLinus Torvalds 9723cfe3baaSIlpo Järvinen tcp_enter_cwr(sk, 1); 9731da177e4SLinus Torvalds 974b9df3cb8SGerrit Renker return net_xmit_eval(err); 9751da177e4SLinus Torvalds } 9761da177e4SLinus Torvalds 97767edfef7SAndi Kleen /* This routine just queues the buffer for sending. 9781da177e4SLinus Torvalds * 9791da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 9801da177e4SLinus Torvalds * otherwise socket can stall. 9811da177e4SLinus Torvalds */ 9821da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 9831da177e4SLinus Torvalds { 9841da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 9851da177e4SLinus Torvalds 9861da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 9871da177e4SLinus Torvalds tp->write_seq = TCP_SKB_CB(skb)->end_seq; 9881da177e4SLinus Torvalds skb_header_release(skb); 989fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 9903ab224beSHideo Aoki sk->sk_wmem_queued += skb->truesize; 9913ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 9921da177e4SLinus Torvalds } 9931da177e4SLinus Torvalds 99467edfef7SAndi Kleen /* Initialize TSO segments for a packet. */ 995cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 996056834d9SIlpo Järvinen unsigned int mss_now) 997f6302d1dSDavid S. Miller { 9987b7fc97aSEric Dumazet struct skb_shared_info *shinfo = skb_shinfo(skb); 9997b7fc97aSEric Dumazet 1000c52e2421SEric Dumazet /* Make sure we own this skb before messing gso_size/gso_segs */ 1001c52e2421SEric Dumazet WARN_ON_ONCE(skb_cloned(skb)); 1002c52e2421SEric Dumazet 10038f26fb1cSEric Dumazet if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { 1004f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 1005f6302d1dSDavid S. Miller * non-TSO case. 1006f6302d1dSDavid S. Miller */ 10077b7fc97aSEric Dumazet shinfo->gso_segs = 1; 10087b7fc97aSEric Dumazet shinfo->gso_size = 0; 10097b7fc97aSEric Dumazet shinfo->gso_type = 0; 1010f6302d1dSDavid S. Miller } else { 10117b7fc97aSEric Dumazet shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 10127b7fc97aSEric Dumazet shinfo->gso_size = mss_now; 10137b7fc97aSEric Dumazet shinfo->gso_type = sk->sk_gso_type; 10141da177e4SLinus Torvalds } 10151da177e4SLinus Torvalds } 10161da177e4SLinus Torvalds 101791fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check 101868f8353bSIlpo Järvinen * skb is counted to fackets_out or not. 101991fed7a1SIlpo Järvinen */ 1020cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 102191fed7a1SIlpo Järvinen int decr) 102291fed7a1SIlpo Järvinen { 1023a47e5a98SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1024a47e5a98SIlpo Järvinen 1025dc86967bSIlpo Järvinen if (!tp->sacked_out || tcp_is_reno(tp)) 102691fed7a1SIlpo Järvinen return; 102791fed7a1SIlpo Järvinen 10286859d494SIlpo Järvinen if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 102991fed7a1SIlpo Järvinen tp->fackets_out -= decr; 103091fed7a1SIlpo Järvinen } 103191fed7a1SIlpo Järvinen 1032797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various 1033797108d1SIlpo Järvinen * tweaks to fix counters 1034797108d1SIlpo Järvinen */ 1035cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1036797108d1SIlpo Järvinen { 1037797108d1SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1038797108d1SIlpo Järvinen 1039797108d1SIlpo Järvinen tp->packets_out -= decr; 1040797108d1SIlpo Järvinen 1041797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1042797108d1SIlpo Järvinen tp->sacked_out -= decr; 1043797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1044797108d1SIlpo Järvinen tp->retrans_out -= decr; 1045797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1046797108d1SIlpo Järvinen tp->lost_out -= decr; 1047797108d1SIlpo Järvinen 1048797108d1SIlpo Järvinen /* Reno case is special. Sigh... */ 1049797108d1SIlpo Järvinen if (tcp_is_reno(tp) && decr > 0) 1050797108d1SIlpo Järvinen tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1051797108d1SIlpo Järvinen 1052797108d1SIlpo Järvinen tcp_adjust_fackets_out(sk, skb, decr); 1053797108d1SIlpo Järvinen 1054797108d1SIlpo Järvinen if (tp->lost_skb_hint && 1055797108d1SIlpo Järvinen before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 105652cf3cc8SIlpo Järvinen (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 1057797108d1SIlpo Järvinen tp->lost_cnt_hint -= decr; 1058797108d1SIlpo Järvinen 1059797108d1SIlpo Järvinen tcp_verify_left_out(tp); 1060797108d1SIlpo Järvinen } 1061797108d1SIlpo Järvinen 10621da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 10631da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 10641da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 10651da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 10661da177e4SLinus Torvalds */ 1067056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1068056834d9SIlpo Järvinen unsigned int mss_now) 10691da177e4SLinus Torvalds { 10701da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 10711da177e4SLinus Torvalds struct sk_buff *buff; 10726475be16SDavid S. Miller int nsize, old_factor; 1073b60b49eaSHerbert Xu int nlen; 10749ce01461SIlpo Järvinen u8 flags; 10751da177e4SLinus Torvalds 10762fceec13SIlpo Järvinen if (WARN_ON(len > skb->len)) 10772fceec13SIlpo Järvinen return -EINVAL; 10786a438bbeSStephen Hemminger 10791da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 10801da177e4SLinus Torvalds if (nsize < 0) 10811da177e4SLinus Torvalds nsize = 0; 10821da177e4SLinus Torvalds 1083c52e2421SEric Dumazet if (skb_unclone(skb, GFP_ATOMIC)) 10841da177e4SLinus Torvalds return -ENOMEM; 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 10871da177e4SLinus Torvalds buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 10881da177e4SLinus Torvalds if (buff == NULL) 10891da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 1090ef5cb973SHerbert Xu 10913ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 10923ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1093b60b49eaSHerbert Xu nlen = skb->len - len - nsize; 1094b60b49eaSHerbert Xu buff->truesize += nlen; 1095b60b49eaSHerbert Xu skb->truesize -= nlen; 10961da177e4SLinus Torvalds 10971da177e4SLinus Torvalds /* Correct the sequence numbers. */ 10981da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 10991da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 11001da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 11011da177e4SLinus Torvalds 11021da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 11034de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 11044de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 11054de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1106e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 11071da177e4SLinus Torvalds 110884fa7933SPatrick McHardy if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 11091da177e4SLinus Torvalds /* Copy and checksum data tail into the new buffer. */ 1110056834d9SIlpo Järvinen buff->csum = csum_partial_copy_nocheck(skb->data + len, 1111056834d9SIlpo Järvinen skb_put(buff, nsize), 11121da177e4SLinus Torvalds nsize, 0); 11131da177e4SLinus Torvalds 11141da177e4SLinus Torvalds skb_trim(skb, len); 11151da177e4SLinus Torvalds 11161da177e4SLinus Torvalds skb->csum = csum_block_sub(skb->csum, buff->csum, len); 11171da177e4SLinus Torvalds } else { 111884fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 11191da177e4SLinus Torvalds skb_split(skb, buff, len); 11201da177e4SLinus Torvalds } 11211da177e4SLinus Torvalds 11221da177e4SLinus Torvalds buff->ip_summed = skb->ip_summed; 11231da177e4SLinus Torvalds 11241da177e4SLinus Torvalds /* Looks stupid, but our code really uses when of 11251da177e4SLinus Torvalds * skbs, which it never sent before. --ANK 11261da177e4SLinus Torvalds */ 11271da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1128a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 11291da177e4SLinus Torvalds 11306475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 11316475be16SDavid S. Miller 11321da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 1133846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1134846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 11351da177e4SLinus Torvalds 11366475be16SDavid S. Miller /* If this packet has been sent out already, we must 11376475be16SDavid S. Miller * adjust the various packet counters. 11386475be16SDavid S. Miller */ 1139cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 11406475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 11416475be16SDavid S. Miller tcp_skb_pcount(buff); 11421da177e4SLinus Torvalds 1143797108d1SIlpo Järvinen if (diff) 1144797108d1SIlpo Järvinen tcp_adjust_pcount(sk, skb, diff); 11451da177e4SLinus Torvalds } 11461da177e4SLinus Torvalds 11471da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 1148f44b5271SDavid S. Miller skb_header_release(buff); 1149fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 11501da177e4SLinus Torvalds 11511da177e4SLinus Torvalds return 0; 11521da177e4SLinus Torvalds } 11531da177e4SLinus Torvalds 11541da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 11551da177e4SLinus Torvalds * eventually). The difference is that pulled data not copied, but 11561da177e4SLinus Torvalds * immediately discarded. 11571da177e4SLinus Torvalds */ 1158f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len) 11591da177e4SLinus Torvalds { 11607b7fc97aSEric Dumazet struct skb_shared_info *shinfo; 11611da177e4SLinus Torvalds int i, k, eat; 11621da177e4SLinus Torvalds 11634fa48bf3SEric Dumazet eat = min_t(int, len, skb_headlen(skb)); 11644fa48bf3SEric Dumazet if (eat) { 11654fa48bf3SEric Dumazet __skb_pull(skb, eat); 11664fa48bf3SEric Dumazet len -= eat; 11674fa48bf3SEric Dumazet if (!len) 11684fa48bf3SEric Dumazet return; 11694fa48bf3SEric Dumazet } 11701da177e4SLinus Torvalds eat = len; 11711da177e4SLinus Torvalds k = 0; 11727b7fc97aSEric Dumazet shinfo = skb_shinfo(skb); 11737b7fc97aSEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) { 11747b7fc97aSEric Dumazet int size = skb_frag_size(&shinfo->frags[i]); 11759e903e08SEric Dumazet 11769e903e08SEric Dumazet if (size <= eat) { 1177aff65da0SIan Campbell skb_frag_unref(skb, i); 11789e903e08SEric Dumazet eat -= size; 11791da177e4SLinus Torvalds } else { 11807b7fc97aSEric Dumazet shinfo->frags[k] = shinfo->frags[i]; 11811da177e4SLinus Torvalds if (eat) { 11827b7fc97aSEric Dumazet shinfo->frags[k].page_offset += eat; 11837b7fc97aSEric Dumazet skb_frag_size_sub(&shinfo->frags[k], eat); 11841da177e4SLinus Torvalds eat = 0; 11851da177e4SLinus Torvalds } 11861da177e4SLinus Torvalds k++; 11871da177e4SLinus Torvalds } 11881da177e4SLinus Torvalds } 11897b7fc97aSEric Dumazet shinfo->nr_frags = k; 11901da177e4SLinus Torvalds 119127a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 11921da177e4SLinus Torvalds skb->data_len -= len; 11931da177e4SLinus Torvalds skb->len = skb->data_len; 11941da177e4SLinus Torvalds } 11951da177e4SLinus Torvalds 119667edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */ 11971da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 11981da177e4SLinus Torvalds { 119914bbd6a5SPravin B Shelar if (skb_unclone(skb, GFP_ATOMIC)) 12001da177e4SLinus Torvalds return -ENOMEM; 12011da177e4SLinus Torvalds 12024fa48bf3SEric Dumazet __pskb_trim_head(skb, len); 12031da177e4SLinus Torvalds 12041da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 120584fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 12061da177e4SLinus Torvalds 12071da177e4SLinus Torvalds skb->truesize -= len; 12081da177e4SLinus Torvalds sk->sk_wmem_queued -= len; 12093ab224beSHideo Aoki sk_mem_uncharge(sk, len); 12101da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 12111da177e4SLinus Torvalds 12125b35e1e6SNeal Cardwell /* Any change of skb->len requires recalculation of tso factor. */ 12131da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 12145b35e1e6SNeal Cardwell tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); 12151da177e4SLinus Torvalds 12161da177e4SLinus Torvalds return 0; 12171da177e4SLinus Torvalds } 12181da177e4SLinus Torvalds 12191b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options. */ 12201b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 12215d424d5aSJohn Heffner { 1222cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1223cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 12245d424d5aSJohn Heffner int mss_now; 12255d424d5aSJohn Heffner 12265d424d5aSJohn Heffner /* Calculate base mss without TCP options: 12275d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 12285d424d5aSJohn Heffner */ 12295d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 12305d424d5aSJohn Heffner 123167469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 123267469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 123367469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 123467469601SEric Dumazet 123567469601SEric Dumazet if (dst && dst_allfrag(dst)) 123667469601SEric Dumazet mss_now -= icsk->icsk_af_ops->net_frag_header_len; 123767469601SEric Dumazet } 123867469601SEric Dumazet 12395d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 12405d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 12415d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 12425d424d5aSJohn Heffner 12435d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 12445d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 12455d424d5aSJohn Heffner 12465d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 12475d424d5aSJohn Heffner if (mss_now < 48) 12485d424d5aSJohn Heffner mss_now = 48; 12495d424d5aSJohn Heffner return mss_now; 12505d424d5aSJohn Heffner } 12515d424d5aSJohn Heffner 12521b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here. */ 12531b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu) 12541b63edd6SYuchung Cheng { 12551b63edd6SYuchung Cheng /* Subtract TCP options size, not including SACKs */ 12561b63edd6SYuchung Cheng return __tcp_mtu_to_mss(sk, pmtu) - 12571b63edd6SYuchung Cheng (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 12581b63edd6SYuchung Cheng } 12591b63edd6SYuchung Cheng 12605d424d5aSJohn Heffner /* Inverse of above */ 126167469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss) 12625d424d5aSJohn Heffner { 1263cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1264cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 12655d424d5aSJohn Heffner int mtu; 12665d424d5aSJohn Heffner 12675d424d5aSJohn Heffner mtu = mss + 12685d424d5aSJohn Heffner tp->tcp_header_len + 12695d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 12705d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 12715d424d5aSJohn Heffner 127267469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 127367469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 127467469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 127567469601SEric Dumazet 127667469601SEric Dumazet if (dst && dst_allfrag(dst)) 127767469601SEric Dumazet mtu += icsk->icsk_af_ops->net_frag_header_len; 127867469601SEric Dumazet } 12795d424d5aSJohn Heffner return mtu; 12805d424d5aSJohn Heffner } 12815d424d5aSJohn Heffner 128267edfef7SAndi Kleen /* MTU probing init per socket */ 12835d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 12845d424d5aSJohn Heffner { 12855d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 12865d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 12875d424d5aSJohn Heffner 12885d424d5aSJohn Heffner icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 12895d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 12905d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 12915d424d5aSJohn Heffner icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 12925d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 12935d424d5aSJohn Heffner } 12944bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init); 12955d424d5aSJohn Heffner 12961da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 12971da177e4SLinus Torvalds 12981da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 12991da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 13001da177e4SLinus Torvalds 13011da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1302caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 13031da177e4SLinus Torvalds It also does not include TCP options. 13041da177e4SLinus Torvalds 1305d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 13061da177e4SLinus Torvalds 13071da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 13081da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 13091da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 13101da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 13111da177e4SLinus Torvalds 13121da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 13131da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 13141da177e4SLinus Torvalds 1315d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1316d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 13171da177e4SLinus Torvalds */ 13181da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 13191da177e4SLinus Torvalds { 13201da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1321d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 13225d424d5aSJohn Heffner int mss_now; 13231da177e4SLinus Torvalds 13245d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 13255d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 13261da177e4SLinus Torvalds 13275d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 1328409d22b4SIlpo Järvinen mss_now = tcp_bound_to_half_wnd(tp, mss_now); 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds /* And store cached results */ 1331d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 13325d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 13335d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1334c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 13351da177e4SLinus Torvalds 13361da177e4SLinus Torvalds return mss_now; 13371da177e4SLinus Torvalds } 13384bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss); 13391da177e4SLinus Torvalds 13401da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 13411da177e4SLinus Torvalds * and even PMTU discovery events into account. 13421da177e4SLinus Torvalds */ 13430c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk) 13441da177e4SLinus Torvalds { 1345cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1346cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1347c1b4a7e6SDavid S. Miller u32 mss_now; 134895c96174SEric Dumazet unsigned int header_len; 134933ad798cSAdam Langley struct tcp_out_options opts; 135033ad798cSAdam Langley struct tcp_md5sig_key *md5; 13511da177e4SLinus Torvalds 1352c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 1353c1b4a7e6SDavid S. Miller 13541da177e4SLinus Torvalds if (dst) { 13551da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 1356d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 13571da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 13581da177e4SLinus Torvalds } 13591da177e4SLinus Torvalds 136033ad798cSAdam Langley header_len = tcp_established_options(sk, NULL, &opts, &md5) + 136133ad798cSAdam Langley sizeof(struct tcphdr); 136233ad798cSAdam Langley /* The mss_cache is sized based on tp->tcp_header_len, which assumes 136333ad798cSAdam Langley * some common options. If this is an odd packet (because we have SACK 136433ad798cSAdam Langley * blocks etc) then our calculated header_len will be different, and 136533ad798cSAdam Langley * we have to adjust mss_now correspondingly */ 136633ad798cSAdam Langley if (header_len != tp->tcp_header_len) { 136733ad798cSAdam Langley int delta = (int) header_len - tp->tcp_header_len; 136833ad798cSAdam Langley mss_now -= delta; 136933ad798cSAdam Langley } 1370cfb6eeb4SYOSHIFUJI Hideaki 13711da177e4SLinus Torvalds return mss_now; 13721da177e4SLinus Torvalds } 13731da177e4SLinus Torvalds 1374a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */ 13759e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk) 1376a762a980SDavid S. Miller { 13779e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1378a762a980SDavid S. Miller 1379d436d686SIlpo Järvinen if (tp->packets_out >= tp->snd_cwnd) { 1380a762a980SDavid S. Miller /* Network is feed fully. */ 1381a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 1382a762a980SDavid S. Miller tp->snd_cwnd_stamp = tcp_time_stamp; 1383a762a980SDavid S. Miller } else { 1384a762a980SDavid S. Miller /* Network starves. */ 1385a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 1386a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 1387a762a980SDavid S. Miller 138815d33c07SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 138915d33c07SDavid S. Miller (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1390a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 1391a762a980SDavid S. Miller } 1392a762a980SDavid S. Miller } 1393a762a980SDavid S. Miller 1394d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */ 1395d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp) 1396d4589926SEric Dumazet { 1397d4589926SEric Dumazet return after(tp->snd_sml, tp->snd_una) && 1398d4589926SEric Dumazet !after(tp->snd_sml, tp->snd_nxt); 1399d4589926SEric Dumazet } 1400d4589926SEric Dumazet 1401d4589926SEric Dumazet /* Update snd_sml if this skb is under mss 1402d4589926SEric Dumazet * Note that a TSO packet might end with a sub-mss segment 1403d4589926SEric Dumazet * The test is really : 1404d4589926SEric Dumazet * if ((skb->len % mss) != 0) 1405d4589926SEric Dumazet * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1406d4589926SEric Dumazet * But we can avoid doing the divide again given we already have 1407d4589926SEric Dumazet * skb_pcount = skb->len / mss_now 14080e3a4803SIlpo Järvinen */ 1409d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, 1410d4589926SEric Dumazet const struct sk_buff *skb) 1411d4589926SEric Dumazet { 1412d4589926SEric Dumazet if (skb->len < tcp_skb_pcount(skb) * mss_now) 1413d4589926SEric Dumazet tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1414d4589926SEric Dumazet } 1415d4589926SEric Dumazet 1416d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules: 1417d4589926SEric Dumazet * 1. It is full sized. (provided by caller in %partial bool) 1418d4589926SEric Dumazet * 2. Or it contains FIN. (already checked by caller) 1419d4589926SEric Dumazet * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1420d4589926SEric Dumazet * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1421d4589926SEric Dumazet * With Minshall's modification: all sent small packets are ACKed. 1422d4589926SEric Dumazet */ 1423d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, 1424d4589926SEric Dumazet unsigned int mss_now, int nonagle) 1425d4589926SEric Dumazet { 1426d4589926SEric Dumazet return partial && 1427d4589926SEric Dumazet ((nonagle & TCP_NAGLE_CORK) || 1428d4589926SEric Dumazet (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1429d4589926SEric Dumazet } 1430d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */ 1431d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, 1432d4589926SEric Dumazet const struct sk_buff *skb, 1433d4589926SEric Dumazet unsigned int mss_now, 1434d4589926SEric Dumazet unsigned int max_segs, 1435d4589926SEric Dumazet int nonagle) 1436c1b4a7e6SDavid S. Miller { 1437cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1438d4589926SEric Dumazet u32 partial, needed, window, max_len; 1439c1b4a7e6SDavid S. Miller 144090840defSIlpo Järvinen window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 14411485348dSBen Hutchings max_len = mss_now * max_segs; 14420e3a4803SIlpo Järvinen 14431485348dSBen Hutchings if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 14441485348dSBen Hutchings return max_len; 14450e3a4803SIlpo Järvinen 14465ea3a748SIlpo Järvinen needed = min(skb->len, window); 14475ea3a748SIlpo Järvinen 14481485348dSBen Hutchings if (max_len <= needed) 14491485348dSBen Hutchings return max_len; 14500e3a4803SIlpo Järvinen 1451d4589926SEric Dumazet partial = needed % mss_now; 1452d4589926SEric Dumazet /* If last segment is not a full MSS, check if Nagle rules allow us 1453d4589926SEric Dumazet * to include this last segment in this skb. 1454d4589926SEric Dumazet * Otherwise, we'll split the skb at last MSS boundary 1455d4589926SEric Dumazet */ 1456d4589926SEric Dumazet if (tcp_nagle_check(partial != 0, tp, mss_now, nonagle)) 1457d4589926SEric Dumazet return needed - partial; 1458d4589926SEric Dumazet 1459d4589926SEric Dumazet return needed; 1460c1b4a7e6SDavid S. Miller } 1461c1b4a7e6SDavid S. Miller 1462c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 1463c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 1464c1b4a7e6SDavid S. Miller */ 1465cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1466cf533ea5SEric Dumazet const struct sk_buff *skb) 1467c1b4a7e6SDavid S. Miller { 1468c1b4a7e6SDavid S. Miller u32 in_flight, cwnd; 1469c1b4a7e6SDavid S. Miller 1470c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 14714de075e0SEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 14724de075e0SEric Dumazet tcp_skb_pcount(skb) == 1) 1473c1b4a7e6SDavid S. Miller return 1; 1474c1b4a7e6SDavid S. Miller 1475c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1476c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 1477c1b4a7e6SDavid S. Miller if (in_flight < cwnd) 1478c1b4a7e6SDavid S. Miller return (cwnd - in_flight); 1479c1b4a7e6SDavid S. Miller 1480c1b4a7e6SDavid S. Miller return 0; 1481c1b4a7e6SDavid S. Miller } 1482c1b4a7e6SDavid S. Miller 1483b595076aSUwe Kleine-König /* Initialize TSO state of a skb. 148467edfef7SAndi Kleen * This must be invoked the first time we consider transmitting 1485c1b4a7e6SDavid S. Miller * SKB onto the wire. 1486c1b4a7e6SDavid S. Miller */ 1487cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, 1488056834d9SIlpo Järvinen unsigned int mss_now) 1489c1b4a7e6SDavid S. Miller { 1490c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 1491c1b4a7e6SDavid S. Miller 1492f8269a49SIlpo Järvinen if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1493846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1494c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 1495c1b4a7e6SDavid S. Miller } 1496c1b4a7e6SDavid S. Miller return tso_segs; 1497c1b4a7e6SDavid S. Miller } 1498c1b4a7e6SDavid S. Miller 1499c1b4a7e6SDavid S. Miller 1500a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be 1501c1b4a7e6SDavid S. Miller * sent now. 1502c1b4a7e6SDavid S. Miller */ 1503a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1504c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1505c1b4a7e6SDavid S. Miller { 1506c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 1507c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 1508c1b4a7e6SDavid S. Miller * 1509c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 1510c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 1511c1b4a7e6SDavid S. Miller */ 1512c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 1513a2a385d6SEric Dumazet return true; 1514c1b4a7e6SDavid S. Miller 15159b44190dSYuchung Cheng /* Don't use the nagle rule for urgent data (or for the final FIN). */ 15169b44190dSYuchung Cheng if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1517a2a385d6SEric Dumazet return true; 1518c1b4a7e6SDavid S. Miller 1519d4589926SEric Dumazet if (!tcp_nagle_check(skb->len < cur_mss, tp, cur_mss, nonagle)) 1520a2a385d6SEric Dumazet return true; 1521c1b4a7e6SDavid S. Miller 1522a2a385d6SEric Dumazet return false; 1523c1b4a7e6SDavid S. Miller } 1524c1b4a7e6SDavid S. Miller 1525c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 1526a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1527a2a385d6SEric Dumazet const struct sk_buff *skb, 1528056834d9SIlpo Järvinen unsigned int cur_mss) 1529c1b4a7e6SDavid S. Miller { 1530c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1531c1b4a7e6SDavid S. Miller 1532c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 1533c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1534c1b4a7e6SDavid S. Miller 153590840defSIlpo Järvinen return !after(end_seq, tcp_wnd_end(tp)); 1536c1b4a7e6SDavid S. Miller } 1537c1b4a7e6SDavid S. Miller 1538fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1539c1b4a7e6SDavid S. Miller * should be put on the wire right now. If so, it returns the number of 1540c1b4a7e6SDavid S. Miller * packets allowed by the congestion window. 1541c1b4a7e6SDavid S. Miller */ 1542cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1543c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1544c1b4a7e6SDavid S. Miller { 1545cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1546c1b4a7e6SDavid S. Miller unsigned int cwnd_quota; 1547c1b4a7e6SDavid S. Miller 1548846998aeSDavid S. Miller tcp_init_tso_segs(sk, skb, cur_mss); 1549c1b4a7e6SDavid S. Miller 1550c1b4a7e6SDavid S. Miller if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1551c1b4a7e6SDavid S. Miller return 0; 1552c1b4a7e6SDavid S. Miller 1553c1b4a7e6SDavid S. Miller cwnd_quota = tcp_cwnd_test(tp, skb); 1554056834d9SIlpo Järvinen if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1555c1b4a7e6SDavid S. Miller cwnd_quota = 0; 1556c1b4a7e6SDavid S. Miller 1557c1b4a7e6SDavid S. Miller return cwnd_quota; 1558c1b4a7e6SDavid S. Miller } 1559c1b4a7e6SDavid S. Miller 156067edfef7SAndi Kleen /* Test if sending is allowed right now. */ 1561a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk) 1562c1b4a7e6SDavid S. Miller { 1563cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1564fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 1565c1b4a7e6SDavid S. Miller 1566a02cec21SEric Dumazet return skb && 15670c54b85fSIlpo Järvinen tcp_snd_test(sk, skb, tcp_current_mss(sk), 1568c1b4a7e6SDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1569a02cec21SEric Dumazet tp->nonagle : TCP_NAGLE_PUSH)); 1570c1b4a7e6SDavid S. Miller } 1571c1b4a7e6SDavid S. Miller 1572c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1573c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 1574c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 1575c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 1576c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 1577c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 1578c1b4a7e6SDavid S. Miller */ 1579056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1580c4ead4c5SEric Dumazet unsigned int mss_now, gfp_t gfp) 1581c1b4a7e6SDavid S. Miller { 1582c1b4a7e6SDavid S. Miller struct sk_buff *buff; 1583c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 15849ce01461SIlpo Järvinen u8 flags; 1585c1b4a7e6SDavid S. Miller 1586c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 1587c8ac3774SHerbert Xu if (skb->len != skb->data_len) 1588c8ac3774SHerbert Xu return tcp_fragment(sk, skb, len, mss_now); 1589c1b4a7e6SDavid S. Miller 1590c4ead4c5SEric Dumazet buff = sk_stream_alloc_skb(sk, 0, gfp); 1591c1b4a7e6SDavid S. Miller if (unlikely(buff == NULL)) 1592c1b4a7e6SDavid S. Miller return -ENOMEM; 1593c1b4a7e6SDavid S. Miller 15943ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 15953ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1596b60b49eaSHerbert Xu buff->truesize += nlen; 1597c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 1598c1b4a7e6SDavid S. Miller 1599c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 1600c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1601c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1602c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1603c1b4a7e6SDavid S. Miller 1604c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 16054de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 16064de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 16074de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1608c1b4a7e6SDavid S. Miller 1609c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 1610c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 1611c1b4a7e6SDavid S. Miller 161284fa7933SPatrick McHardy buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1613c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 1614c1b4a7e6SDavid S. Miller 1615c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 1616846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1617846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 1618c1b4a7e6SDavid S. Miller 1619c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 1620c1b4a7e6SDavid S. Miller skb_header_release(buff); 1621fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 1622c1b4a7e6SDavid S. Miller 1623c1b4a7e6SDavid S. Miller return 0; 1624c1b4a7e6SDavid S. Miller } 1625c1b4a7e6SDavid S. Miller 1626c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 1627c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 1628c1b4a7e6SDavid S. Miller * 1629c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 1630c1b4a7e6SDavid S. Miller */ 1631a2a385d6SEric Dumazet static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1632c1b4a7e6SDavid S. Miller { 16339e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 16346687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1635c1b4a7e6SDavid S. Miller u32 send_win, cong_win, limit, in_flight; 1636ad9f4f50SEric Dumazet int win_divisor; 1637c1b4a7e6SDavid S. Miller 16384de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1639ae8064acSJohn Heffner goto send_now; 1640c1b4a7e6SDavid S. Miller 16416687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 1642ae8064acSJohn Heffner goto send_now; 1643ae8064acSJohn Heffner 1644ae8064acSJohn Heffner /* Defer for less than two clock ticks. */ 1645bd515c3eSIlpo Järvinen if (tp->tso_deferred && 1646a2acde07SIlpo Järvinen (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1647ae8064acSJohn Heffner goto send_now; 1648908a75c1SDavid S. Miller 1649c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1650c1b4a7e6SDavid S. Miller 1651056834d9SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1652c1b4a7e6SDavid S. Miller 165390840defSIlpo Järvinen send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1654c1b4a7e6SDavid S. Miller 1655c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 1656c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1657c1b4a7e6SDavid S. Miller 1658c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 1659c1b4a7e6SDavid S. Miller 1660ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 16611485348dSBen Hutchings if (limit >= min_t(unsigned int, sk->sk_gso_max_size, 166295bd09ebSEric Dumazet tp->xmit_size_goal_segs * tp->mss_cache)) 1663ae8064acSJohn Heffner goto send_now; 1664ba244fe9SDavid S. Miller 166562ad2761SIlpo Järvinen /* Middle in queue won't get any more data, full sendable already? */ 166662ad2761SIlpo Järvinen if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 166762ad2761SIlpo Järvinen goto send_now; 166862ad2761SIlpo Järvinen 1669ad9f4f50SEric Dumazet win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1670ad9f4f50SEric Dumazet if (win_divisor) { 1671c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1672c1b4a7e6SDavid S. Miller 1673c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 1674c1b4a7e6SDavid S. Miller * just use it. 1675c1b4a7e6SDavid S. Miller */ 1676ad9f4f50SEric Dumazet chunk /= win_divisor; 1677c1b4a7e6SDavid S. Miller if (limit >= chunk) 1678ae8064acSJohn Heffner goto send_now; 1679c1b4a7e6SDavid S. Miller } else { 1680c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 1681c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 1682c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 1683c1b4a7e6SDavid S. Miller * then send now. 1684c1b4a7e6SDavid S. Miller */ 16856b5a5c0dSNeal Cardwell if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1686ae8064acSJohn Heffner goto send_now; 1687c1b4a7e6SDavid S. Miller } 1688c1b4a7e6SDavid S. Miller 1689f4541d60SEric Dumazet /* Ok, it looks like it is advisable to defer. 1690f4541d60SEric Dumazet * Do not rearm the timer if already set to not break TCP ACK clocking. 1691f4541d60SEric Dumazet */ 1692f4541d60SEric Dumazet if (!tp->tso_deferred) 1693ae8064acSJohn Heffner tp->tso_deferred = 1 | (jiffies << 1); 1694ae8064acSJohn Heffner 1695a2a385d6SEric Dumazet return true; 1696ae8064acSJohn Heffner 1697ae8064acSJohn Heffner send_now: 1698ae8064acSJohn Heffner tp->tso_deferred = 0; 1699a2a385d6SEric Dumazet return false; 1700c1b4a7e6SDavid S. Miller } 1701c1b4a7e6SDavid S. Miller 17025d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 170367edfef7SAndi Kleen * MTU probe is regularly attempting to increase the path MTU by 170467edfef7SAndi Kleen * deliberately sending larger packets. This discovers routing 170567edfef7SAndi Kleen * changes resulting in larger path MTUs. 170667edfef7SAndi Kleen * 17075d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 17085d424d5aSJohn Heffner * 1 if a probe was sent, 1709056834d9SIlpo Järvinen * -1 otherwise 1710056834d9SIlpo Järvinen */ 17115d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 17125d424d5aSJohn Heffner { 17135d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 17145d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 17155d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 17165d424d5aSJohn Heffner int len; 17175d424d5aSJohn Heffner int probe_size; 171891cc17c0SIlpo Järvinen int size_needed; 17195d424d5aSJohn Heffner int copy; 17205d424d5aSJohn Heffner int mss_now; 17215d424d5aSJohn Heffner 17225d424d5aSJohn Heffner /* Not currently probing/verifying, 17235d424d5aSJohn Heffner * not in recovery, 17245d424d5aSJohn Heffner * have enough cwnd, and 17255d424d5aSJohn Heffner * not SACKing (the variable headers throw things off) */ 17265d424d5aSJohn Heffner if (!icsk->icsk_mtup.enabled || 17275d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 17285d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 17295d424d5aSJohn Heffner tp->snd_cwnd < 11 || 1730cabeccbdSIlpo Järvinen tp->rx_opt.num_sacks || tp->rx_opt.dsack) 17315d424d5aSJohn Heffner return -1; 17325d424d5aSJohn Heffner 17335d424d5aSJohn Heffner /* Very simple search strategy: just double the MSS. */ 17340c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 17355d424d5aSJohn Heffner probe_size = 2 * tp->mss_cache; 173691cc17c0SIlpo Järvinen size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 17375d424d5aSJohn Heffner if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 17385d424d5aSJohn Heffner /* TODO: set timer for probe_converge_event */ 17395d424d5aSJohn Heffner return -1; 17405d424d5aSJohn Heffner } 17415d424d5aSJohn Heffner 17425d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 17437f9c33e5SIlpo Järvinen if (tp->write_seq - tp->snd_nxt < size_needed) 17445d424d5aSJohn Heffner return -1; 17455d424d5aSJohn Heffner 174691cc17c0SIlpo Järvinen if (tp->snd_wnd < size_needed) 17475d424d5aSJohn Heffner return -1; 174890840defSIlpo Järvinen if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 17495d424d5aSJohn Heffner return 0; 17505d424d5aSJohn Heffner 1751d67c58e9SIlpo Järvinen /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1752d67c58e9SIlpo Järvinen if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1753d67c58e9SIlpo Järvinen if (!tcp_packets_in_flight(tp)) 17545d424d5aSJohn Heffner return -1; 17555d424d5aSJohn Heffner else 17565d424d5aSJohn Heffner return 0; 17575d424d5aSJohn Heffner } 17585d424d5aSJohn Heffner 17595d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 17605d424d5aSJohn Heffner if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 17615d424d5aSJohn Heffner return -1; 17623ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 17633ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 17645d424d5aSJohn Heffner 1765fe067e8aSDavid S. Miller skb = tcp_send_head(sk); 17665d424d5aSJohn Heffner 17675d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 17685d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 17694de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 17705d424d5aSJohn Heffner TCP_SKB_CB(nskb)->sacked = 0; 17715d424d5aSJohn Heffner nskb->csum = 0; 177284fa7933SPatrick McHardy nskb->ip_summed = skb->ip_summed; 17735d424d5aSJohn Heffner 177450c4817eSIlpo Järvinen tcp_insert_write_queue_before(nskb, skb, sk); 177550c4817eSIlpo Järvinen 17765d424d5aSJohn Heffner len = 0; 1777234b6860SIlpo Järvinen tcp_for_write_queue_from_safe(skb, next, sk) { 17785d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 17795d424d5aSJohn Heffner if (nskb->ip_summed) 17805d424d5aSJohn Heffner skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 17815d424d5aSJohn Heffner else 17825d424d5aSJohn Heffner nskb->csum = skb_copy_and_csum_bits(skb, 0, 1783056834d9SIlpo Järvinen skb_put(nskb, copy), 1784056834d9SIlpo Järvinen copy, nskb->csum); 17855d424d5aSJohn Heffner 17865d424d5aSJohn Heffner if (skb->len <= copy) { 17875d424d5aSJohn Heffner /* We've eaten all the data from this skb. 17885d424d5aSJohn Heffner * Throw it away. */ 17894de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1790fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 17913ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 17925d424d5aSJohn Heffner } else { 17934de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 1794a3433f35SChangli Gao ~(TCPHDR_FIN|TCPHDR_PSH); 17955d424d5aSJohn Heffner if (!skb_shinfo(skb)->nr_frags) { 17965d424d5aSJohn Heffner skb_pull(skb, copy); 179784fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 1798056834d9SIlpo Järvinen skb->csum = csum_partial(skb->data, 1799056834d9SIlpo Järvinen skb->len, 0); 18005d424d5aSJohn Heffner } else { 18015d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 18025d424d5aSJohn Heffner tcp_set_skb_tso_segs(sk, skb, mss_now); 18035d424d5aSJohn Heffner } 18045d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 18055d424d5aSJohn Heffner } 18065d424d5aSJohn Heffner 18075d424d5aSJohn Heffner len += copy; 1808234b6860SIlpo Järvinen 1809234b6860SIlpo Järvinen if (len >= probe_size) 1810234b6860SIlpo Järvinen break; 18115d424d5aSJohn Heffner } 18125d424d5aSJohn Heffner tcp_init_tso_segs(sk, nskb, nskb->len); 18135d424d5aSJohn Heffner 18145d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 18155d424d5aSJohn Heffner * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 18165d424d5aSJohn Heffner TCP_SKB_CB(nskb)->when = tcp_time_stamp; 18175d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 18185d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 18195d424d5aSJohn Heffner * effectively two packets. */ 18205d424d5aSJohn Heffner tp->snd_cwnd--; 182166f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, nskb); 18225d424d5aSJohn Heffner 18235d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 18240e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 18250e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 18265d424d5aSJohn Heffner 18275d424d5aSJohn Heffner return 1; 18285d424d5aSJohn Heffner } 18295d424d5aSJohn Heffner 18305d424d5aSJohn Heffner return -1; 18315d424d5aSJohn Heffner } 18325d424d5aSJohn Heffner 18331da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 18341da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 18351da177e4SLinus Torvalds * window for us. 18361da177e4SLinus Torvalds * 1837f8269a49SIlpo Järvinen * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1838f8269a49SIlpo Järvinen * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1839f8269a49SIlpo Järvinen * account rare use of URG, this is not a big flaw. 1840f8269a49SIlpo Järvinen * 18416ba8a3b1SNandita Dukkipati * Send at most one packet when push_one > 0. Temporarily ignore 18426ba8a3b1SNandita Dukkipati * cwnd limit to force at most one packet out when push_one == 2. 18436ba8a3b1SNandita Dukkipati 1844a2a385d6SEric Dumazet * Returns true, if no segments are in flight and we have queued segments, 1845a2a385d6SEric Dumazet * but cannot send anything now because of SWS or another problem. 18461da177e4SLinus Torvalds */ 1847a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1848d5dd9175SIlpo Järvinen int push_one, gfp_t gfp) 18491da177e4SLinus Torvalds { 18501da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 185192df7b51SDavid S. Miller struct sk_buff *skb; 1852c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 1853c1b4a7e6SDavid S. Miller int cwnd_quota; 18545d424d5aSJohn Heffner int result; 18551da177e4SLinus Torvalds 1856c1b4a7e6SDavid S. Miller sent_pkts = 0; 18575d424d5aSJohn Heffner 1858d5dd9175SIlpo Järvinen if (!push_one) { 18595d424d5aSJohn Heffner /* Do MTU probing. */ 1860d5dd9175SIlpo Järvinen result = tcp_mtu_probe(sk); 1861d5dd9175SIlpo Järvinen if (!result) { 1862a2a385d6SEric Dumazet return false; 18635d424d5aSJohn Heffner } else if (result > 0) { 18645d424d5aSJohn Heffner sent_pkts = 1; 18655d424d5aSJohn Heffner } 1866d5dd9175SIlpo Järvinen } 18675d424d5aSJohn Heffner 1868fe067e8aSDavid S. Miller while ((skb = tcp_send_head(sk))) { 1869c8ac3774SHerbert Xu unsigned int limit; 1870c8ac3774SHerbert Xu 1871b68e9f85SHerbert Xu tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1872c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1873c1b4a7e6SDavid S. Miller 1874ec342325SAndrew Vagin if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) 1875ec342325SAndrew Vagin goto repair; /* Skip network transmission */ 1876ec342325SAndrew Vagin 1877b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 18786ba8a3b1SNandita Dukkipati if (!cwnd_quota) { 18796ba8a3b1SNandita Dukkipati if (push_one == 2) 18806ba8a3b1SNandita Dukkipati /* Force out a loss probe pkt. */ 18816ba8a3b1SNandita Dukkipati cwnd_quota = 1; 18826ba8a3b1SNandita Dukkipati else 1883b68e9f85SHerbert Xu break; 18846ba8a3b1SNandita Dukkipati } 1885b68e9f85SHerbert Xu 1886b68e9f85SHerbert Xu if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1887b68e9f85SHerbert Xu break; 1888b68e9f85SHerbert Xu 1889c1b4a7e6SDavid S. Miller if (tso_segs == 1) { 1890aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1891aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1892aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 1893aa93466bSDavid S. Miller break; 1894c1b4a7e6SDavid S. Miller } else { 1895d5dd9175SIlpo Järvinen if (!push_one && tcp_tso_should_defer(sk, skb)) 1896aa93466bSDavid S. Miller break; 1897c1b4a7e6SDavid S. Miller } 1898aa93466bSDavid S. Miller 1899c9eeec26SEric Dumazet /* TCP Small Queues : 1900c9eeec26SEric Dumazet * Control number of packets in qdisc/devices to two packets / or ~1 ms. 1901c9eeec26SEric Dumazet * This allows for : 1902c9eeec26SEric Dumazet * - better RTT estimation and ACK scheduling 1903c9eeec26SEric Dumazet * - faster recovery 1904c9eeec26SEric Dumazet * - high rates 190598e09386SEric Dumazet * Alas, some drivers / subsystems require a fair amount 190698e09386SEric Dumazet * of queued bytes to ensure line rate. 190798e09386SEric Dumazet * One example is wifi aggregation (802.11 AMPDU) 190846d3ceabSEric Dumazet */ 190998e09386SEric Dumazet limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes, 191098e09386SEric Dumazet sk->sk_pacing_rate >> 10); 1911c9eeec26SEric Dumazet 1912c9eeec26SEric Dumazet if (atomic_read(&sk->sk_wmem_alloc) > limit) { 191346d3ceabSEric Dumazet set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1914bf06200eSJohn Ogness /* It is possible TX completion already happened 1915bf06200eSJohn Ogness * before we set TSQ_THROTTLED, so we must 1916bf06200eSJohn Ogness * test again the condition. 1917bf06200eSJohn Ogness * We abuse smp_mb__after_clear_bit() because 1918bf06200eSJohn Ogness * there is no smp_mb__after_set_bit() yet 1919bf06200eSJohn Ogness */ 1920bf06200eSJohn Ogness smp_mb__after_clear_bit(); 1921bf06200eSJohn Ogness if (atomic_read(&sk->sk_wmem_alloc) > limit) 192246d3ceabSEric Dumazet break; 192346d3ceabSEric Dumazet } 1924c9eeec26SEric Dumazet 1925c8ac3774SHerbert Xu limit = mss_now; 1926f8269a49SIlpo Järvinen if (tso_segs > 1 && !tcp_urg_mode(tp)) 19270e3a4803SIlpo Järvinen limit = tcp_mss_split_point(sk, skb, mss_now, 19281485348dSBen Hutchings min_t(unsigned int, 19291485348dSBen Hutchings cwnd_quota, 1930d4589926SEric Dumazet sk->sk_gso_max_segs), 1931d4589926SEric Dumazet nonagle); 1932c8ac3774SHerbert Xu 1933c8ac3774SHerbert Xu if (skb->len > limit && 1934c4ead4c5SEric Dumazet unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 19351da177e4SLinus Torvalds break; 19361da177e4SLinus Torvalds 19371da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 1938c1b4a7e6SDavid S. Miller 1939d5dd9175SIlpo Järvinen if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 19401da177e4SLinus Torvalds break; 19411da177e4SLinus Torvalds 1942ec342325SAndrew Vagin repair: 19431da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 19441da177e4SLinus Torvalds * This call will increment packets_out. 19451da177e4SLinus Torvalds */ 194666f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 19471da177e4SLinus Torvalds 19481da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 1949a262f0cdSNandita Dukkipati sent_pkts += tcp_skb_pcount(skb); 1950d5dd9175SIlpo Järvinen 1951d5dd9175SIlpo Järvinen if (push_one) 1952d5dd9175SIlpo Järvinen break; 19531da177e4SLinus Torvalds } 19541da177e4SLinus Torvalds 1955aa93466bSDavid S. Miller if (likely(sent_pkts)) { 1956684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 1957684bad11SYuchung Cheng tp->prr_out += sent_pkts; 19586ba8a3b1SNandita Dukkipati 19596ba8a3b1SNandita Dukkipati /* Send one loss probe per tail loss episode. */ 19606ba8a3b1SNandita Dukkipati if (push_one != 2) 19616ba8a3b1SNandita Dukkipati tcp_schedule_loss_probe(sk); 19629e412ba7SIlpo Järvinen tcp_cwnd_validate(sk); 1963a2a385d6SEric Dumazet return false; 19641da177e4SLinus Torvalds } 19656ba8a3b1SNandita Dukkipati return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); 19666ba8a3b1SNandita Dukkipati } 19676ba8a3b1SNandita Dukkipati 19686ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk) 19696ba8a3b1SNandita Dukkipati { 19706ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 19716ba8a3b1SNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 19726ba8a3b1SNandita Dukkipati u32 timeout, tlp_time_stamp, rto_time_stamp; 1973*740b0f18SEric Dumazet u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 19746ba8a3b1SNandita Dukkipati 19756ba8a3b1SNandita Dukkipati if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) 19766ba8a3b1SNandita Dukkipati return false; 19776ba8a3b1SNandita Dukkipati /* No consecutive loss probes. */ 19786ba8a3b1SNandita Dukkipati if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 19796ba8a3b1SNandita Dukkipati tcp_rearm_rto(sk); 19806ba8a3b1SNandita Dukkipati return false; 19816ba8a3b1SNandita Dukkipati } 19826ba8a3b1SNandita Dukkipati /* Don't do any loss probe on a Fast Open connection before 3WHS 19836ba8a3b1SNandita Dukkipati * finishes. 19846ba8a3b1SNandita Dukkipati */ 19856ba8a3b1SNandita Dukkipati if (sk->sk_state == TCP_SYN_RECV) 19866ba8a3b1SNandita Dukkipati return false; 19876ba8a3b1SNandita Dukkipati 19886ba8a3b1SNandita Dukkipati /* TLP is only scheduled when next timer event is RTO. */ 19896ba8a3b1SNandita Dukkipati if (icsk->icsk_pending != ICSK_TIME_RETRANS) 19906ba8a3b1SNandita Dukkipati return false; 19916ba8a3b1SNandita Dukkipati 19926ba8a3b1SNandita Dukkipati /* Schedule a loss probe in 2*RTT for SACK capable connections 19936ba8a3b1SNandita Dukkipati * in Open state, that are either limited by cwnd or application. 19946ba8a3b1SNandita Dukkipati */ 1995*740b0f18SEric Dumazet if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out || 19966ba8a3b1SNandita Dukkipati !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 19976ba8a3b1SNandita Dukkipati return false; 19986ba8a3b1SNandita Dukkipati 19996ba8a3b1SNandita Dukkipati if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && 20006ba8a3b1SNandita Dukkipati tcp_send_head(sk)) 20016ba8a3b1SNandita Dukkipati return false; 20026ba8a3b1SNandita Dukkipati 20036ba8a3b1SNandita Dukkipati /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account 20046ba8a3b1SNandita Dukkipati * for delayed ack when there's one outstanding packet. 20056ba8a3b1SNandita Dukkipati */ 20066ba8a3b1SNandita Dukkipati timeout = rtt << 1; 20076ba8a3b1SNandita Dukkipati if (tp->packets_out == 1) 20086ba8a3b1SNandita Dukkipati timeout = max_t(u32, timeout, 20096ba8a3b1SNandita Dukkipati (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 20106ba8a3b1SNandita Dukkipati timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 20116ba8a3b1SNandita Dukkipati 20126ba8a3b1SNandita Dukkipati /* If RTO is shorter, just schedule TLP in its place. */ 20136ba8a3b1SNandita Dukkipati tlp_time_stamp = tcp_time_stamp + timeout; 20146ba8a3b1SNandita Dukkipati rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 20156ba8a3b1SNandita Dukkipati if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 20166ba8a3b1SNandita Dukkipati s32 delta = rto_time_stamp - tcp_time_stamp; 20176ba8a3b1SNandita Dukkipati if (delta > 0) 20186ba8a3b1SNandita Dukkipati timeout = delta; 20196ba8a3b1SNandita Dukkipati } 20206ba8a3b1SNandita Dukkipati 20216ba8a3b1SNandita Dukkipati inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 20226ba8a3b1SNandita Dukkipati TCP_RTO_MAX); 20236ba8a3b1SNandita Dukkipati return true; 20246ba8a3b1SNandita Dukkipati } 20256ba8a3b1SNandita Dukkipati 20266ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else 20276ba8a3b1SNandita Dukkipati * retransmit the last segment. 20286ba8a3b1SNandita Dukkipati */ 20296ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk) 20306ba8a3b1SNandita Dukkipati { 20319b717a8dSNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 20326ba8a3b1SNandita Dukkipati struct sk_buff *skb; 20336ba8a3b1SNandita Dukkipati int pcount; 20346ba8a3b1SNandita Dukkipati int mss = tcp_current_mss(sk); 20356ba8a3b1SNandita Dukkipati int err = -1; 20366ba8a3b1SNandita Dukkipati 20376ba8a3b1SNandita Dukkipati if (tcp_send_head(sk) != NULL) { 20386ba8a3b1SNandita Dukkipati err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 20396ba8a3b1SNandita Dukkipati goto rearm_timer; 20406ba8a3b1SNandita Dukkipati } 20416ba8a3b1SNandita Dukkipati 20429b717a8dSNandita Dukkipati /* At most one outstanding TLP retransmission. */ 20439b717a8dSNandita Dukkipati if (tp->tlp_high_seq) 20449b717a8dSNandita Dukkipati goto rearm_timer; 20459b717a8dSNandita Dukkipati 20466ba8a3b1SNandita Dukkipati /* Retransmit last segment. */ 20476ba8a3b1SNandita Dukkipati skb = tcp_write_queue_tail(sk); 20486ba8a3b1SNandita Dukkipati if (WARN_ON(!skb)) 20496ba8a3b1SNandita Dukkipati goto rearm_timer; 20506ba8a3b1SNandita Dukkipati 20516ba8a3b1SNandita Dukkipati pcount = tcp_skb_pcount(skb); 20526ba8a3b1SNandita Dukkipati if (WARN_ON(!pcount)) 20536ba8a3b1SNandita Dukkipati goto rearm_timer; 20546ba8a3b1SNandita Dukkipati 20556ba8a3b1SNandita Dukkipati if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 20566ba8a3b1SNandita Dukkipati if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 20576ba8a3b1SNandita Dukkipati goto rearm_timer; 20586ba8a3b1SNandita Dukkipati skb = tcp_write_queue_tail(sk); 20596ba8a3b1SNandita Dukkipati } 20606ba8a3b1SNandita Dukkipati 20616ba8a3b1SNandita Dukkipati if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 20626ba8a3b1SNandita Dukkipati goto rearm_timer; 20636ba8a3b1SNandita Dukkipati 20646ba8a3b1SNandita Dukkipati /* Probe with zero data doesn't trigger fast recovery. */ 20656ba8a3b1SNandita Dukkipati if (skb->len > 0) 20666ba8a3b1SNandita Dukkipati err = __tcp_retransmit_skb(sk, skb); 20676ba8a3b1SNandita Dukkipati 20689b717a8dSNandita Dukkipati /* Record snd_nxt for loss detection. */ 20699b717a8dSNandita Dukkipati if (likely(!err)) 20709b717a8dSNandita Dukkipati tp->tlp_high_seq = tp->snd_nxt; 20719b717a8dSNandita Dukkipati 20726ba8a3b1SNandita Dukkipati rearm_timer: 20736ba8a3b1SNandita Dukkipati inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 20746ba8a3b1SNandita Dukkipati inet_csk(sk)->icsk_rto, 20756ba8a3b1SNandita Dukkipati TCP_RTO_MAX); 20766ba8a3b1SNandita Dukkipati 20776ba8a3b1SNandita Dukkipati if (likely(!err)) 20786ba8a3b1SNandita Dukkipati NET_INC_STATS_BH(sock_net(sk), 20796ba8a3b1SNandita Dukkipati LINUX_MIB_TCPLOSSPROBES); 20801da177e4SLinus Torvalds } 20811da177e4SLinus Torvalds 2082a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 2083a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 2084a762a980SDavid S. Miller * The socket must be locked by the caller. 2085a762a980SDavid S. Miller */ 20869e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 20879e412ba7SIlpo Järvinen int nonagle) 2088a762a980SDavid S. Miller { 2089726e07a8SIlpo Järvinen /* If we are closed, the bytes will have to remain here. 2090726e07a8SIlpo Järvinen * In time closedown will finish, we empty the write queue and 2091726e07a8SIlpo Järvinen * all will be happy. 2092726e07a8SIlpo Järvinen */ 2093726e07a8SIlpo Järvinen if (unlikely(sk->sk_state == TCP_CLOSE)) 2094726e07a8SIlpo Järvinen return; 2095726e07a8SIlpo Järvinen 209699a1dec7SMel Gorman if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 209799a1dec7SMel Gorman sk_gfp_atomic(sk, GFP_ATOMIC))) 20989e412ba7SIlpo Järvinen tcp_check_probe_timer(sk); 2099a762a980SDavid S. Miller } 2100a762a980SDavid S. Miller 2101c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 2102c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 2103c1b4a7e6SDavid S. Miller */ 2104c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 2105c1b4a7e6SDavid S. Miller { 2106fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 2107c1b4a7e6SDavid S. Miller 2108c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 2109c1b4a7e6SDavid S. Miller 2110d5dd9175SIlpo Järvinen tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2111c1b4a7e6SDavid S. Miller } 2112c1b4a7e6SDavid S. Miller 21131da177e4SLinus Torvalds /* This function returns the amount that we can raise the 21141da177e4SLinus Torvalds * usable window based on the following constraints 21151da177e4SLinus Torvalds * 21161da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 21171da177e4SLinus Torvalds * 2. We limit memory per socket 21181da177e4SLinus Torvalds * 21191da177e4SLinus Torvalds * RFC 1122: 21201da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 21211da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 21221da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 21231da177e4SLinus Torvalds * 21241da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 21251da177e4SLinus Torvalds * it at least MSS bytes. 21261da177e4SLinus Torvalds * 21271da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 21281da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 21291da177e4SLinus Torvalds * 21301da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 21311da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 21321da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 21331da177e4SLinus Torvalds * window to always advance by a single byte. 21341da177e4SLinus Torvalds * 21351da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 21361da177e4SLinus Torvalds * then this will not be a problem. 21371da177e4SLinus Torvalds * 21381da177e4SLinus Torvalds * BSD seems to make the following compromise: 21391da177e4SLinus Torvalds * 21401da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 21411da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 21421da177e4SLinus Torvalds * then set the window to 0. 21431da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 21441da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 21451da177e4SLinus Torvalds * and from being larger than the largest representable value. 21461da177e4SLinus Torvalds * 21471da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 21481da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 21491da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 21501da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 21511da177e4SLinus Torvalds * because the pipeline is full. 21521da177e4SLinus Torvalds * 21531da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 21541da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 21551da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 21561da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 21571da177e4SLinus Torvalds * of having a fixed window size at almost all times. 21581da177e4SLinus Torvalds * 21591da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 21601da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 21611da177e4SLinus Torvalds * 21621da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 21631da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 21641da177e4SLinus Torvalds */ 21651da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 21661da177e4SLinus Torvalds { 2167463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 21681da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2169caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 21701da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 21711da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 21721da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 21731da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 21741da177e4SLinus Torvalds */ 2175463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 21761da177e4SLinus Torvalds int free_space = tcp_space(sk); 217786c1a045SFlorian Westphal int allowed_space = tcp_full_space(sk); 217886c1a045SFlorian Westphal int full_space = min_t(int, tp->window_clamp, allowed_space); 21791da177e4SLinus Torvalds int window; 21801da177e4SLinus Torvalds 21811da177e4SLinus Torvalds if (mss > full_space) 21821da177e4SLinus Torvalds mss = full_space; 21831da177e4SLinus Torvalds 2184b92edbe0SEric Dumazet if (free_space < (full_space >> 1)) { 2185463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 21861da177e4SLinus Torvalds 2187180d8cd9SGlauber Costa if (sk_under_memory_pressure(sk)) 2188056834d9SIlpo Järvinen tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2189056834d9SIlpo Järvinen 4U * tp->advmss); 21901da177e4SLinus Torvalds 219186c1a045SFlorian Westphal /* free_space might become our new window, make sure we don't 219286c1a045SFlorian Westphal * increase it due to wscale. 219386c1a045SFlorian Westphal */ 219486c1a045SFlorian Westphal free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 219586c1a045SFlorian Westphal 219686c1a045SFlorian Westphal /* if free space is less than mss estimate, or is below 1/16th 219786c1a045SFlorian Westphal * of the maximum allowed, try to move to zero-window, else 219886c1a045SFlorian Westphal * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and 219986c1a045SFlorian Westphal * new incoming data is dropped due to memory limits. 220086c1a045SFlorian Westphal * With large window, mss test triggers way too late in order 220186c1a045SFlorian Westphal * to announce zero window in time before rmem limit kicks in. 220286c1a045SFlorian Westphal */ 220386c1a045SFlorian Westphal if (free_space < (allowed_space >> 4) || free_space < mss) 22041da177e4SLinus Torvalds return 0; 22051da177e4SLinus Torvalds } 22061da177e4SLinus Torvalds 22071da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 22081da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 22091da177e4SLinus Torvalds 22101da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 22111da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 22121da177e4SLinus Torvalds */ 22131da177e4SLinus Torvalds window = tp->rcv_wnd; 22141da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 22151da177e4SLinus Torvalds window = free_space; 22161da177e4SLinus Torvalds 22171da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 22181da177e4SLinus Torvalds * Import case: prevent zero window announcement if 22191da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 22201da177e4SLinus Torvalds */ 22211da177e4SLinus Torvalds if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 22221da177e4SLinus Torvalds window = (((window >> tp->rx_opt.rcv_wscale) + 1) 22231da177e4SLinus Torvalds << tp->rx_opt.rcv_wscale); 22241da177e4SLinus Torvalds } else { 22251da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 22261da177e4SLinus Torvalds * Window clamp already applied above. 22271da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 22281da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 22291da177e4SLinus Torvalds * and multiply from happening most of the time. 22301da177e4SLinus Torvalds * We also don't do any window rounding when the free space 22311da177e4SLinus Torvalds * is too small. 22321da177e4SLinus Torvalds */ 22331da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 22341da177e4SLinus Torvalds window = (free_space / mss) * mss; 223584565070SJohn Heffner else if (mss == full_space && 2236b92edbe0SEric Dumazet free_space > window + (full_space >> 1)) 223784565070SJohn Heffner window = free_space; 22381da177e4SLinus Torvalds } 22391da177e4SLinus Torvalds 22401da177e4SLinus Torvalds return window; 22411da177e4SLinus Torvalds } 22421da177e4SLinus Torvalds 22434a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */ 22444a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 22451da177e4SLinus Torvalds { 22461da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2247fe067e8aSDavid S. Miller struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 2248058dc334SIlpo Järvinen int skb_size, next_skb_size; 22491da177e4SLinus Torvalds 2250058dc334SIlpo Järvinen skb_size = skb->len; 2251058dc334SIlpo Järvinen next_skb_size = next_skb->len; 22521da177e4SLinus Torvalds 2253058dc334SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 22541da177e4SLinus Torvalds 22556859d494SIlpo Järvinen tcp_highest_sack_combine(sk, next_skb, skb); 2256a6963a6bSIlpo Järvinen 2257fe067e8aSDavid S. Miller tcp_unlink_write_queue(next_skb, sk); 22581da177e4SLinus Torvalds 2259058dc334SIlpo Järvinen skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 22601a4e2d09SArnaldo Carvalho de Melo next_skb_size); 22611da177e4SLinus Torvalds 226252d570aaSJarek Poplawski if (next_skb->ip_summed == CHECKSUM_PARTIAL) 226352d570aaSJarek Poplawski skb->ip_summed = CHECKSUM_PARTIAL; 22641da177e4SLinus Torvalds 226584fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 22661da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 22671da177e4SLinus Torvalds 22681da177e4SLinus Torvalds /* Update sequence range on original skb. */ 22691da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 22701da177e4SLinus Torvalds 2271e6c7d085SIlpo Järvinen /* Merge over control information. This moves PSH/FIN etc. over */ 22724de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 22731da177e4SLinus Torvalds 22741da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 22751da177e4SLinus Torvalds * packet counting does not break. 22761da177e4SLinus Torvalds */ 22774828e7f4SIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2278b7689205SIlpo Järvinen 2279b7689205SIlpo Järvinen /* changed transmit queue under us so clear hints */ 2280ef9da47cSIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 2281ef9da47cSIlpo Järvinen if (next_skb == tp->retransmit_skb_hint) 2282ef9da47cSIlpo Järvinen tp->retransmit_skb_hint = skb; 2283b7689205SIlpo Järvinen 2284797108d1SIlpo Järvinen tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2285797108d1SIlpo Järvinen 22863ab224beSHideo Aoki sk_wmem_free_skb(sk, next_skb); 22871da177e4SLinus Torvalds } 22881da177e4SLinus Torvalds 228967edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */ 2290a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 22914a17fc3aSIlpo Järvinen { 22924a17fc3aSIlpo Järvinen if (tcp_skb_pcount(skb) > 1) 2293a2a385d6SEric Dumazet return false; 22944a17fc3aSIlpo Järvinen /* TODO: SACK collapsing could be used to remove this condition */ 22954a17fc3aSIlpo Järvinen if (skb_shinfo(skb)->nr_frags != 0) 2296a2a385d6SEric Dumazet return false; 22974a17fc3aSIlpo Järvinen if (skb_cloned(skb)) 2298a2a385d6SEric Dumazet return false; 22994a17fc3aSIlpo Järvinen if (skb == tcp_send_head(sk)) 2300a2a385d6SEric Dumazet return false; 23014a17fc3aSIlpo Järvinen /* Some heurestics for collapsing over SACK'd could be invented */ 23024a17fc3aSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2303a2a385d6SEric Dumazet return false; 23044a17fc3aSIlpo Järvinen 2305a2a385d6SEric Dumazet return true; 23064a17fc3aSIlpo Järvinen } 23074a17fc3aSIlpo Järvinen 230867edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create 230967edfef7SAndi Kleen * less packets on the wire. This is only done on retransmission. 231067edfef7SAndi Kleen */ 23114a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 23124a17fc3aSIlpo Järvinen int space) 23134a17fc3aSIlpo Järvinen { 23144a17fc3aSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 23154a17fc3aSIlpo Järvinen struct sk_buff *skb = to, *tmp; 2316a2a385d6SEric Dumazet bool first = true; 23174a17fc3aSIlpo Järvinen 23184a17fc3aSIlpo Järvinen if (!sysctl_tcp_retrans_collapse) 23194a17fc3aSIlpo Järvinen return; 23204de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 23214a17fc3aSIlpo Järvinen return; 23224a17fc3aSIlpo Järvinen 23234a17fc3aSIlpo Järvinen tcp_for_write_queue_from_safe(skb, tmp, sk) { 23244a17fc3aSIlpo Järvinen if (!tcp_can_collapse(sk, skb)) 23254a17fc3aSIlpo Järvinen break; 23264a17fc3aSIlpo Järvinen 23274a17fc3aSIlpo Järvinen space -= skb->len; 23284a17fc3aSIlpo Järvinen 23294a17fc3aSIlpo Järvinen if (first) { 2330a2a385d6SEric Dumazet first = false; 23314a17fc3aSIlpo Järvinen continue; 23324a17fc3aSIlpo Järvinen } 23334a17fc3aSIlpo Järvinen 23344a17fc3aSIlpo Järvinen if (space < 0) 23354a17fc3aSIlpo Järvinen break; 23364a17fc3aSIlpo Järvinen /* Punt if not enough space exists in the first SKB for 23374a17fc3aSIlpo Järvinen * the data in the second 23384a17fc3aSIlpo Järvinen */ 2339a21d4572SEric Dumazet if (skb->len > skb_availroom(to)) 23404a17fc3aSIlpo Järvinen break; 23414a17fc3aSIlpo Järvinen 23424a17fc3aSIlpo Järvinen if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 23434a17fc3aSIlpo Järvinen break; 23444a17fc3aSIlpo Järvinen 23454a17fc3aSIlpo Järvinen tcp_collapse_retrans(sk, to); 23464a17fc3aSIlpo Järvinen } 23474a17fc3aSIlpo Järvinen } 23484a17fc3aSIlpo Järvinen 23491da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 23501da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 23511da177e4SLinus Torvalds * error occurred which prevented the send. 23521da177e4SLinus Torvalds */ 235393b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 23541da177e4SLinus Torvalds { 23551da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23565d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 23577d227cd2SSridhar Samudrala unsigned int cur_mss; 23581da177e4SLinus Torvalds 23595d424d5aSJohn Heffner /* Inconslusive MTU probe */ 23605d424d5aSJohn Heffner if (icsk->icsk_mtup.probe_size) { 23615d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 23625d424d5aSJohn Heffner } 23635d424d5aSJohn Heffner 23641da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 2365caa20d9aSStephen Hemminger * copying overhead: fragmentation, tunneling, mangling etc. 23661da177e4SLinus Torvalds */ 23671da177e4SLinus Torvalds if (atomic_read(&sk->sk_wmem_alloc) > 23681da177e4SLinus Torvalds min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 23691da177e4SLinus Torvalds return -EAGAIN; 23701da177e4SLinus Torvalds 23711da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 23721da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 23731da177e4SLinus Torvalds BUG(); 23741da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 23751da177e4SLinus Torvalds return -ENOMEM; 23761da177e4SLinus Torvalds } 23771da177e4SLinus Torvalds 23787d227cd2SSridhar Samudrala if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 23797d227cd2SSridhar Samudrala return -EHOSTUNREACH; /* Routing failure or similar. */ 23807d227cd2SSridhar Samudrala 23810c54b85fSIlpo Järvinen cur_mss = tcp_current_mss(sk); 23827d227cd2SSridhar Samudrala 23831da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 23841da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 23851da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 23861da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 23871da177e4SLinus Torvalds */ 23889d4fb27dSJoe Perches if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 23899d4fb27dSJoe Perches TCP_SKB_CB(skb)->seq != tp->snd_una) 23901da177e4SLinus Torvalds return -EAGAIN; 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds if (skb->len > cur_mss) { 2393846998aeSDavid S. Miller if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 23941da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 239502276f3cSIlpo Järvinen } else { 23969eb9362eSIlpo Järvinen int oldpcount = tcp_skb_pcount(skb); 23979eb9362eSIlpo Järvinen 23989eb9362eSIlpo Järvinen if (unlikely(oldpcount > 1)) { 2399c52e2421SEric Dumazet if (skb_unclone(skb, GFP_ATOMIC)) 2400c52e2421SEric Dumazet return -ENOMEM; 240102276f3cSIlpo Järvinen tcp_init_tso_segs(sk, skb, cur_mss); 24029eb9362eSIlpo Järvinen tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 24039eb9362eSIlpo Järvinen } 24041da177e4SLinus Torvalds } 24051da177e4SLinus Torvalds 24061da177e4SLinus Torvalds tcp_retrans_try_collapse(sk, skb, cur_mss); 24071da177e4SLinus Torvalds 24081da177e4SLinus Torvalds /* Make a copy, if the first transmission SKB clone we made 24091da177e4SLinus Torvalds * is still in somebody's hands, else make a clone. 24101da177e4SLinus Torvalds */ 24111da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 24121da177e4SLinus Torvalds 241350bceae9SThomas Graf /* make sure skb->data is aligned on arches that require it 241450bceae9SThomas Graf * and check if ack-trimming & collapsing extended the headroom 241550bceae9SThomas Graf * beyond what csum_start can cover. 241650bceae9SThomas Graf */ 241750bceae9SThomas Graf if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 241850bceae9SThomas Graf skb_headroom(skb) >= 0xFFFF)) { 2419117632e6SEric Dumazet struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2420117632e6SEric Dumazet GFP_ATOMIC); 242193b174adSYuchung Cheng return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2422117632e6SEric Dumazet -ENOBUFS; 2423117632e6SEric Dumazet } else { 242493b174adSYuchung Cheng return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2425117632e6SEric Dumazet } 242693b174adSYuchung Cheng } 242793b174adSYuchung Cheng 242893b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 242993b174adSYuchung Cheng { 243093b174adSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 243193b174adSYuchung Cheng int err = __tcp_retransmit_skb(sk, skb); 24321da177e4SLinus Torvalds 24331da177e4SLinus Torvalds if (err == 0) { 24341da177e4SLinus Torvalds /* Update global TCP statistics. */ 243581cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 24361da177e4SLinus Torvalds 24371da177e4SLinus Torvalds tp->total_retrans++; 24381da177e4SLinus Torvalds 24391da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 24401da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2441e87cc472SJoe Perches net_dbg_ratelimited("retrans_out leaked\n"); 24421da177e4SLinus Torvalds } 24431da177e4SLinus Torvalds #endif 2444b08d6cb2SIlpo Järvinen if (!tp->retrans_out) 2445b08d6cb2SIlpo Järvinen tp->lost_retrans_low = tp->snd_nxt; 24461da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 24471da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds /* Save stamp of the first retransmit. */ 24501da177e4SLinus Torvalds if (!tp->retrans_stamp) 24511da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(skb)->when; 24521da177e4SLinus Torvalds 2453c24f691bSYuchung Cheng tp->undo_retrans += tcp_skb_pcount(skb); 24541da177e4SLinus Torvalds 24551da177e4SLinus Torvalds /* snd_nxt is stored to detect loss of retransmitted segment, 24561da177e4SLinus Torvalds * see tcp_input.c tcp_sacktag_write_queue(). 24571da177e4SLinus Torvalds */ 24581da177e4SLinus Torvalds TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 245924ab6becSYuchung Cheng } else { 246024ab6becSYuchung Cheng NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 24611da177e4SLinus Torvalds } 24621da177e4SLinus Torvalds return err; 24631da177e4SLinus Torvalds } 24641da177e4SLinus Torvalds 246567edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current 246667edfef7SAndi Kleen * window/congestion state. 246767edfef7SAndi Kleen */ 2468a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk) 2469b5afe7bcSIlpo Järvinen { 2470b5afe7bcSIlpo Järvinen const struct inet_connection_sock *icsk = inet_csk(sk); 2471cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 2472b5afe7bcSIlpo Järvinen 2473b5afe7bcSIlpo Järvinen /* Forward retransmissions are possible only during Recovery. */ 2474b5afe7bcSIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Recovery) 2475a2a385d6SEric Dumazet return false; 2476b5afe7bcSIlpo Järvinen 2477b5afe7bcSIlpo Järvinen /* No forward retransmissions in Reno are possible. */ 2478b5afe7bcSIlpo Järvinen if (tcp_is_reno(tp)) 2479a2a385d6SEric Dumazet return false; 2480b5afe7bcSIlpo Järvinen 2481b5afe7bcSIlpo Järvinen /* Yeah, we have to make difficult choice between forward transmission 2482b5afe7bcSIlpo Järvinen * and retransmission... Both ways have their merits... 2483b5afe7bcSIlpo Järvinen * 2484b5afe7bcSIlpo Järvinen * For now we do not retransmit anything, while we have some new 2485b5afe7bcSIlpo Järvinen * segments to send. In the other cases, follow rule 3 for 2486b5afe7bcSIlpo Järvinen * NextSeg() specified in RFC3517. 2487b5afe7bcSIlpo Järvinen */ 2488b5afe7bcSIlpo Järvinen 2489b5afe7bcSIlpo Järvinen if (tcp_may_send_now(sk)) 2490a2a385d6SEric Dumazet return false; 2491b5afe7bcSIlpo Järvinen 2492a2a385d6SEric Dumazet return true; 2493b5afe7bcSIlpo Järvinen } 2494b5afe7bcSIlpo Järvinen 24951da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 24961da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 24971da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 24981da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 24991da177e4SLinus Torvalds * If doing SACK, the first ACK which comes back for a timeout 25001da177e4SLinus Torvalds * based retransmit packet might feed us FACK information again. 25011da177e4SLinus Torvalds * If so, we use it to avoid unnecessarily retransmissions. 25021da177e4SLinus Torvalds */ 25031da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 25041da177e4SLinus Torvalds { 25056687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 25061da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 25071da177e4SLinus Torvalds struct sk_buff *skb; 25080e1c54c2SIlpo Järvinen struct sk_buff *hole = NULL; 2509618d9f25SIlpo Järvinen u32 last_lost; 251061eb55f4SIlpo Järvinen int mib_idx; 25110e1c54c2SIlpo Järvinen int fwd_rexmitting = 0; 25126a438bbeSStephen Hemminger 251345e77d31SIlpo Järvinen if (!tp->packets_out) 251445e77d31SIlpo Järvinen return; 251545e77d31SIlpo Järvinen 251608ebd172SIlpo Järvinen if (!tp->lost_out) 251708ebd172SIlpo Järvinen tp->retransmit_high = tp->snd_una; 251808ebd172SIlpo Järvinen 2519618d9f25SIlpo Järvinen if (tp->retransmit_skb_hint) { 25206a438bbeSStephen Hemminger skb = tp->retransmit_skb_hint; 2521618d9f25SIlpo Järvinen last_lost = TCP_SKB_CB(skb)->end_seq; 2522618d9f25SIlpo Järvinen if (after(last_lost, tp->retransmit_high)) 2523618d9f25SIlpo Järvinen last_lost = tp->retransmit_high; 2524618d9f25SIlpo Järvinen } else { 2525fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 2526618d9f25SIlpo Järvinen last_lost = tp->snd_una; 2527618d9f25SIlpo Järvinen } 25281da177e4SLinus Torvalds 2529fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 25301da177e4SLinus Torvalds __u8 sacked = TCP_SKB_CB(skb)->sacked; 25311da177e4SLinus Torvalds 2532fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2533fe067e8aSDavid S. Miller break; 25346a438bbeSStephen Hemminger /* we could do better than to assign each time */ 25350e1c54c2SIlpo Järvinen if (hole == NULL) 25366a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 25376a438bbeSStephen Hemminger 25381da177e4SLinus Torvalds /* Assume this retransmit will generate 25391da177e4SLinus Torvalds * only one packet for congestion window 25401da177e4SLinus Torvalds * calculation purposes. This works because 25411da177e4SLinus Torvalds * tcp_retransmit_skb() will chop up the 25421da177e4SLinus Torvalds * packet to be MSS sized and all the 25431da177e4SLinus Torvalds * packet counting works out. 25441da177e4SLinus Torvalds */ 25451da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 25461da177e4SLinus Torvalds return; 25470e1c54c2SIlpo Järvinen 25480e1c54c2SIlpo Järvinen if (fwd_rexmitting) { 25490e1c54c2SIlpo Järvinen begin_fwd: 25500e1c54c2SIlpo Järvinen if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2551006f582cSIlpo Järvinen break; 25520e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 25530e1c54c2SIlpo Järvinen 25540e1c54c2SIlpo Järvinen } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2555618d9f25SIlpo Järvinen tp->retransmit_high = last_lost; 25560e1c54c2SIlpo Järvinen if (!tcp_can_forward_retransmit(sk)) 25570e1c54c2SIlpo Järvinen break; 25580e1c54c2SIlpo Järvinen /* Backtrack if necessary to non-L'ed skb */ 25590e1c54c2SIlpo Järvinen if (hole != NULL) { 25600e1c54c2SIlpo Järvinen skb = hole; 25610e1c54c2SIlpo Järvinen hole = NULL; 25620e1c54c2SIlpo Järvinen } 25630e1c54c2SIlpo Järvinen fwd_rexmitting = 1; 25640e1c54c2SIlpo Järvinen goto begin_fwd; 25650e1c54c2SIlpo Järvinen 25660e1c54c2SIlpo Järvinen } else if (!(sacked & TCPCB_LOST)) { 2567ac11ba75SIlpo Järvinen if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 25680e1c54c2SIlpo Järvinen hole = skb; 256961eb55f4SIlpo Järvinen continue; 25701da177e4SLinus Torvalds 25710e1c54c2SIlpo Järvinen } else { 2572618d9f25SIlpo Järvinen last_lost = TCP_SKB_CB(skb)->end_seq; 25730e1c54c2SIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Loss) 25740e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFASTRETRANS; 25750e1c54c2SIlpo Järvinen else 25760e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 25770e1c54c2SIlpo Järvinen } 25780e1c54c2SIlpo Järvinen 25790e1c54c2SIlpo Järvinen if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 258061eb55f4SIlpo Järvinen continue; 258140b215e5SPavel Emelyanov 258224ab6becSYuchung Cheng if (tcp_retransmit_skb(sk, skb)) 25831da177e4SLinus Torvalds return; 258424ab6becSYuchung Cheng 2585de0744afSPavel Emelyanov NET_INC_STATS_BH(sock_net(sk), mib_idx); 25861da177e4SLinus Torvalds 2587684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 2588a262f0cdSNandita Dukkipati tp->prr_out += tcp_skb_pcount(skb); 2589a262f0cdSNandita Dukkipati 2590fe067e8aSDavid S. Miller if (skb == tcp_write_queue_head(sk)) 2591463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 25923f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 25933f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 25941da177e4SLinus Torvalds } 25951da177e4SLinus Torvalds } 25961da177e4SLinus Torvalds 25971da177e4SLinus Torvalds /* Send a fin. The caller locks the socket for us. This cannot be 25981da177e4SLinus Torvalds * allowed to fail queueing a FIN frame under any circumstances. 25991da177e4SLinus Torvalds */ 26001da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 26011da177e4SLinus Torvalds { 26021da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2603fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_write_queue_tail(sk); 26041da177e4SLinus Torvalds int mss_now; 26051da177e4SLinus Torvalds 26061da177e4SLinus Torvalds /* Optimization, tack on the FIN if we have a queue of 26071da177e4SLinus Torvalds * unsent frames. But be careful about outgoing SACKS 26081da177e4SLinus Torvalds * and IP options. 26091da177e4SLinus Torvalds */ 26100c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 26111da177e4SLinus Torvalds 2612fe067e8aSDavid S. Miller if (tcp_send_head(sk) != NULL) { 26134de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 26141da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq++; 26151da177e4SLinus Torvalds tp->write_seq++; 26161da177e4SLinus Torvalds } else { 26171da177e4SLinus Torvalds /* Socket is locked, keep trying until memory is available. */ 26181da177e4SLinus Torvalds for (;;) { 2619aa133076SWu Fengguang skb = alloc_skb_fclone(MAX_TCP_HEADER, 2620aa133076SWu Fengguang sk->sk_allocation); 26211da177e4SLinus Torvalds if (skb) 26221da177e4SLinus Torvalds break; 26231da177e4SLinus Torvalds yield(); 26241da177e4SLinus Torvalds } 26251da177e4SLinus Torvalds 26261da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 26271da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 26281da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2629e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tp->write_seq, 2630a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_FIN); 26311da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 26321da177e4SLinus Torvalds } 26339e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 26341da177e4SLinus Torvalds } 26351da177e4SLinus Torvalds 26361da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 26371da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 26381da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 263965bb723cSGerrit Renker * by RFC 2525, section 2.17. -DaveM 26401da177e4SLinus Torvalds */ 2641dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 26421da177e4SLinus Torvalds { 26431da177e4SLinus Torvalds struct sk_buff *skb; 26441da177e4SLinus Torvalds 26451da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 26461da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 26471da177e4SLinus Torvalds if (!skb) { 26484e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 26491da177e4SLinus Torvalds return; 26501da177e4SLinus Torvalds } 26511da177e4SLinus Torvalds 26521da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 26531da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 2654e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2655a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_RST); 26561da177e4SLinus Torvalds /* Send it off. */ 26571da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2658dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 26594e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 266026af65cbSSridhar Samudrala 266181cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 26621da177e4SLinus Torvalds } 26631da177e4SLinus Torvalds 266467edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment. 266567edfef7SAndi Kleen * WARNING: This routine must only be called when we have already sent 26661da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 26671da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 26681da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 26691da177e4SLinus Torvalds */ 26701da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 26711da177e4SLinus Torvalds { 26721da177e4SLinus Torvalds struct sk_buff *skb; 26731da177e4SLinus Torvalds 2674fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 26754de075e0SEric Dumazet if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 267691df42beSJoe Perches pr_debug("%s: wrong queue state\n", __func__); 26771da177e4SLinus Torvalds return -EFAULT; 26781da177e4SLinus Torvalds } 26794de075e0SEric Dumazet if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 26801da177e4SLinus Torvalds if (skb_cloned(skb)) { 26811da177e4SLinus Torvalds struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 26821da177e4SLinus Torvalds if (nskb == NULL) 26831da177e4SLinus Torvalds return -ENOMEM; 2684fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 26851da177e4SLinus Torvalds skb_header_release(nskb); 2686fe067e8aSDavid S. Miller __tcp_add_write_queue_head(sk, nskb); 26873ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 26883ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 26893ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 26901da177e4SLinus Torvalds skb = nskb; 26911da177e4SLinus Torvalds } 26921da177e4SLinus Torvalds 26934de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 26941da177e4SLinus Torvalds TCP_ECN_send_synack(tcp_sk(sk), skb); 26951da177e4SLinus Torvalds } 26961da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2697dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 26981da177e4SLinus Torvalds } 26991da177e4SLinus Torvalds 27004aea39c1SEric Dumazet /** 27014aea39c1SEric Dumazet * tcp_make_synack - Prepare a SYN-ACK. 27024aea39c1SEric Dumazet * sk: listener socket 27034aea39c1SEric Dumazet * dst: dst entry attached to the SYNACK 27044aea39c1SEric Dumazet * req: request_sock pointer 27054aea39c1SEric Dumazet * 27064aea39c1SEric Dumazet * Allocate one skb and build a SYNACK packet. 27074aea39c1SEric Dumazet * @dst is consumed : Caller should not use it again. 27084aea39c1SEric Dumazet */ 27091da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2710e6b4d113SWilliam Allen Simpson struct request_sock *req, 27118336886fSJerry Chu struct tcp_fastopen_cookie *foc) 27121da177e4SLinus Torvalds { 2713bd0388aeSWilliam Allen Simpson struct tcp_out_options opts; 27142e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 27151da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 27161da177e4SLinus Torvalds struct tcphdr *th; 27171da177e4SLinus Torvalds struct sk_buff *skb; 2718cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 2719bd0388aeSWilliam Allen Simpson int tcp_header_size; 2720f5fff5dcSTom Quetchenbach int mss; 27211da177e4SLinus Torvalds 2722eb8895deSPhil Oester skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 27234aea39c1SEric Dumazet if (unlikely(!skb)) { 27244aea39c1SEric Dumazet dst_release(dst); 27251da177e4SLinus Torvalds return NULL; 27264aea39c1SEric Dumazet } 27271da177e4SLinus Torvalds /* Reserve space for headers. */ 27281da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 27291da177e4SLinus Torvalds 27304aea39c1SEric Dumazet skb_dst_set(skb, dst); 2731ca10b9e9SEric Dumazet security_skb_owned_by(skb, sk); 27321da177e4SLinus Torvalds 27330dbaee3bSDavid S. Miller mss = dst_metric_advmss(dst); 2734f5fff5dcSTom Quetchenbach if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2735f5fff5dcSTom Quetchenbach mss = tp->rx_opt.user_mss; 2736f5fff5dcSTom Quetchenbach 273733ad798cSAdam Langley if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 273833ad798cSAdam Langley __u8 rcv_wscale; 273933ad798cSAdam Langley /* Set this up on the first call only */ 274033ad798cSAdam Langley req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2741e88c64f0SHagen Paul Pfeifer 2742e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 2743e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2744e88c64f0SHagen Paul Pfeifer (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 2745e88c64f0SHagen Paul Pfeifer req->window_clamp = tcp_full_space(sk); 2746e88c64f0SHagen Paul Pfeifer 274733ad798cSAdam Langley /* tcp_full_space because it is guaranteed to be the first packet */ 274833ad798cSAdam Langley tcp_select_initial_window(tcp_full_space(sk), 2749f5fff5dcSTom Quetchenbach mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 275033ad798cSAdam Langley &req->rcv_wnd, 275133ad798cSAdam Langley &req->window_clamp, 275233ad798cSAdam Langley ireq->wscale_ok, 275331d12926Slaurent chavey &rcv_wscale, 275431d12926Slaurent chavey dst_metric(dst, RTAX_INITRWND)); 275533ad798cSAdam Langley ireq->rcv_wscale = rcv_wscale; 275633ad798cSAdam Langley } 2757cfb6eeb4SYOSHIFUJI Hideaki 275833ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 27598b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES 27608b5f12d0SFlorian Westphal if (unlikely(req->cookie_ts)) 27618b5f12d0SFlorian Westphal TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 27628b5f12d0SFlorian Westphal else 27638b5f12d0SFlorian Westphal #endif 276433ad798cSAdam Langley TCP_SKB_CB(skb)->when = tcp_time_stamp; 27651a2c6181SChristoph Paasch tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 27661a2c6181SChristoph Paasch foc) + sizeof(*th); 276733ad798cSAdam Langley 2768aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 2769aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 27701da177e4SLinus Torvalds 2771aa8223c7SArnaldo Carvalho de Melo th = tcp_hdr(skb); 27721da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 27731da177e4SLinus Torvalds th->syn = 1; 27741da177e4SLinus Torvalds th->ack = 1; 27751da177e4SLinus Torvalds TCP_ECN_make_synack(req, th); 2776b44084c2SEric Dumazet th->source = htons(ireq->ir_num); 2777634fb979SEric Dumazet th->dest = ireq->ir_rmt_port; 2778e870a8efSIlpo Järvinen /* Setting of flags are superfluous here for callers (and ECE is 2779e870a8efSIlpo Järvinen * not even correctly set) 2780e870a8efSIlpo Järvinen */ 2781e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2782a3433f35SChangli Gao TCPHDR_SYN | TCPHDR_ACK); 27834957faadSWilliam Allen Simpson 27841da177e4SLinus Torvalds th->seq = htonl(TCP_SKB_CB(skb)->seq); 27858336886fSJerry Chu /* XXX data is queued and acked as is. No buffer/window check */ 27868336886fSJerry Chu th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 27871da177e4SLinus Torvalds 27881da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2789600ff0c2SIlpo Järvinen th->window = htons(min(req->rcv_wnd, 65535U)); 2790bd0388aeSWilliam Allen Simpson tcp_options_write((__be32 *)(th + 1), tp, &opts); 27911da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 2792aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2793cfb6eeb4SYOSHIFUJI Hideaki 2794cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2795cfb6eeb4SYOSHIFUJI Hideaki /* Okay, we have all we need - do the md5 hash if needed */ 2796cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 2797bd0388aeSWilliam Allen Simpson tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 279849a72dfbSAdam Langley md5, NULL, req, skb); 2799cfb6eeb4SYOSHIFUJI Hideaki } 2800cfb6eeb4SYOSHIFUJI Hideaki #endif 2801cfb6eeb4SYOSHIFUJI Hideaki 28021da177e4SLinus Torvalds return skb; 28031da177e4SLinus Torvalds } 28044bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack); 28051da177e4SLinus Torvalds 280667edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */ 2807f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk) 28081da177e4SLinus Torvalds { 2809cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 28101da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 28111da177e4SLinus Torvalds __u8 rcv_wscale; 28121da177e4SLinus Torvalds 28131da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 28141da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 28151da177e4SLinus Torvalds */ 28161da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr) + 2817bb5b7c11SDavid S. Miller (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 28181da177e4SLinus Torvalds 2819cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2820cfb6eeb4SYOSHIFUJI Hideaki if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2821cfb6eeb4SYOSHIFUJI Hideaki tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2822cfb6eeb4SYOSHIFUJI Hideaki #endif 2823cfb6eeb4SYOSHIFUJI Hideaki 28241da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 28251da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 28261da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 28271da177e4SLinus Torvalds tp->max_window = 0; 28285d424d5aSJohn Heffner tcp_mtup_init(sk); 28291da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 28301da177e4SLinus Torvalds 28311da177e4SLinus Torvalds if (!tp->window_clamp) 28321da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 28330dbaee3bSDavid S. Miller tp->advmss = dst_metric_advmss(dst); 2834f5fff5dcSTom Quetchenbach if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2835f5fff5dcSTom Quetchenbach tp->advmss = tp->rx_opt.user_mss; 2836f5fff5dcSTom Quetchenbach 28371da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 28381da177e4SLinus Torvalds 2839e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 2840e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2841e88c64f0SHagen Paul Pfeifer (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 2842e88c64f0SHagen Paul Pfeifer tp->window_clamp = tcp_full_space(sk); 2843e88c64f0SHagen Paul Pfeifer 28441da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 28451da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 28461da177e4SLinus Torvalds &tp->rcv_wnd, 28471da177e4SLinus Torvalds &tp->window_clamp, 2848bb5b7c11SDavid S. Miller sysctl_tcp_window_scaling, 284931d12926Slaurent chavey &rcv_wscale, 285031d12926Slaurent chavey dst_metric(dst, RTAX_INITRWND)); 28511da177e4SLinus Torvalds 28521da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 28531da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 28541da177e4SLinus Torvalds 28551da177e4SLinus Torvalds sk->sk_err = 0; 28561da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 28571da177e4SLinus Torvalds tp->snd_wnd = 0; 2858ee7537b6SHantzis Fotis tcp_init_wl(tp, 0); 28591da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 28601da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 286133f5f57eSIlpo Järvinen tp->snd_up = tp->write_seq; 2862370816aeSPavel Emelyanov tp->snd_nxt = tp->write_seq; 2863ee995283SPavel Emelyanov 2864ee995283SPavel Emelyanov if (likely(!tp->repair)) 28651da177e4SLinus Torvalds tp->rcv_nxt = 0; 2866c7781a6eSAndrew Vagin else 2867c7781a6eSAndrew Vagin tp->rcv_tstamp = tcp_time_stamp; 2868ee995283SPavel Emelyanov tp->rcv_wup = tp->rcv_nxt; 2869ee995283SPavel Emelyanov tp->copied_seq = tp->rcv_nxt; 28701da177e4SLinus Torvalds 2871463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2872463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 28731da177e4SLinus Torvalds tcp_clear_retrans(tp); 28741da177e4SLinus Torvalds } 28751da177e4SLinus Torvalds 2876783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 2877783237e8SYuchung Cheng { 2878783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 2879783237e8SYuchung Cheng struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 2880783237e8SYuchung Cheng 2881783237e8SYuchung Cheng tcb->end_seq += skb->len; 2882783237e8SYuchung Cheng skb_header_release(skb); 2883783237e8SYuchung Cheng __tcp_add_write_queue_tail(sk, skb); 2884783237e8SYuchung Cheng sk->sk_wmem_queued += skb->truesize; 2885783237e8SYuchung Cheng sk_mem_charge(sk, skb->truesize); 2886783237e8SYuchung Cheng tp->write_seq = tcb->end_seq; 2887783237e8SYuchung Cheng tp->packets_out += tcp_skb_pcount(skb); 2888783237e8SYuchung Cheng } 2889783237e8SYuchung Cheng 2890783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However, 2891783237e8SYuchung Cheng * queue a data-only packet after the regular SYN, such that regular SYNs 2892783237e8SYuchung Cheng * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 2893783237e8SYuchung Cheng * only the SYN sequence, the data are retransmitted in the first ACK. 2894783237e8SYuchung Cheng * If cookie is not cached or other error occurs, falls back to send a 2895783237e8SYuchung Cheng * regular SYN with Fast Open cookie request option. 2896783237e8SYuchung Cheng */ 2897783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 2898783237e8SYuchung Cheng { 2899783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 2900783237e8SYuchung Cheng struct tcp_fastopen_request *fo = tp->fastopen_req; 2901aab48743SYuchung Cheng int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; 2902783237e8SYuchung Cheng struct sk_buff *syn_data = NULL, *data; 2903aab48743SYuchung Cheng unsigned long last_syn_loss = 0; 2904783237e8SYuchung Cheng 290567da22d2SYuchung Cheng tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 2906aab48743SYuchung Cheng tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, 2907aab48743SYuchung Cheng &syn_loss, &last_syn_loss); 2908aab48743SYuchung Cheng /* Recurring FO SYN losses: revert to regular handshake temporarily */ 2909aab48743SYuchung Cheng if (syn_loss > 1 && 2910aab48743SYuchung Cheng time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { 2911aab48743SYuchung Cheng fo->cookie.len = -1; 2912aab48743SYuchung Cheng goto fallback; 2913aab48743SYuchung Cheng } 2914aab48743SYuchung Cheng 291567da22d2SYuchung Cheng if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) 291667da22d2SYuchung Cheng fo->cookie.len = -1; 291767da22d2SYuchung Cheng else if (fo->cookie.len <= 0) 2918783237e8SYuchung Cheng goto fallback; 2919783237e8SYuchung Cheng 2920783237e8SYuchung Cheng /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 2921783237e8SYuchung Cheng * user-MSS. Reserve maximum option space for middleboxes that add 2922783237e8SYuchung Cheng * private TCP options. The cost is reduced data space in SYN :( 2923783237e8SYuchung Cheng */ 2924783237e8SYuchung Cheng if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 2925783237e8SYuchung Cheng tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 29261b63edd6SYuchung Cheng space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2927783237e8SYuchung Cheng MAX_TCP_OPTION_SPACE; 2928783237e8SYuchung Cheng 2929783237e8SYuchung Cheng syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2930783237e8SYuchung Cheng sk->sk_allocation); 2931783237e8SYuchung Cheng if (syn_data == NULL) 2932783237e8SYuchung Cheng goto fallback; 2933783237e8SYuchung Cheng 2934783237e8SYuchung Cheng for (i = 0; i < iovlen && syn_data->len < space; ++i) { 2935783237e8SYuchung Cheng struct iovec *iov = &fo->data->msg_iov[i]; 2936783237e8SYuchung Cheng unsigned char __user *from = iov->iov_base; 2937783237e8SYuchung Cheng int len = iov->iov_len; 2938783237e8SYuchung Cheng 2939783237e8SYuchung Cheng if (syn_data->len + len > space) 2940783237e8SYuchung Cheng len = space - syn_data->len; 2941783237e8SYuchung Cheng else if (i + 1 == iovlen) 2942783237e8SYuchung Cheng /* No more data pending in inet_wait_for_connect() */ 2943783237e8SYuchung Cheng fo->data = NULL; 2944783237e8SYuchung Cheng 2945783237e8SYuchung Cheng if (skb_add_data(syn_data, from, len)) 2946783237e8SYuchung Cheng goto fallback; 2947783237e8SYuchung Cheng } 2948783237e8SYuchung Cheng 2949783237e8SYuchung Cheng /* Queue a data-only packet after the regular SYN for retransmission */ 2950783237e8SYuchung Cheng data = pskb_copy(syn_data, sk->sk_allocation); 2951783237e8SYuchung Cheng if (data == NULL) 2952783237e8SYuchung Cheng goto fallback; 2953783237e8SYuchung Cheng TCP_SKB_CB(data)->seq++; 2954783237e8SYuchung Cheng TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; 2955783237e8SYuchung Cheng TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); 2956783237e8SYuchung Cheng tcp_connect_queue_skb(sk, data); 2957783237e8SYuchung Cheng fo->copied = data->len; 2958783237e8SYuchung Cheng 2959783237e8SYuchung Cheng if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { 296067da22d2SYuchung Cheng tp->syn_data = (fo->copied > 0); 2961783237e8SYuchung Cheng NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 2962783237e8SYuchung Cheng goto done; 2963783237e8SYuchung Cheng } 2964783237e8SYuchung Cheng syn_data = NULL; 2965783237e8SYuchung Cheng 2966783237e8SYuchung Cheng fallback: 2967783237e8SYuchung Cheng /* Send a regular SYN with Fast Open cookie request option */ 2968783237e8SYuchung Cheng if (fo->cookie.len > 0) 2969783237e8SYuchung Cheng fo->cookie.len = 0; 2970783237e8SYuchung Cheng err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 2971783237e8SYuchung Cheng if (err) 2972783237e8SYuchung Cheng tp->syn_fastopen = 0; 2973783237e8SYuchung Cheng kfree_skb(syn_data); 2974783237e8SYuchung Cheng done: 2975783237e8SYuchung Cheng fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 2976783237e8SYuchung Cheng return err; 2977783237e8SYuchung Cheng } 2978783237e8SYuchung Cheng 297967edfef7SAndi Kleen /* Build a SYN and send it off. */ 29801da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 29811da177e4SLinus Torvalds { 29821da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29831da177e4SLinus Torvalds struct sk_buff *buff; 2984ee586811SEric Paris int err; 29851da177e4SLinus Torvalds 29861da177e4SLinus Torvalds tcp_connect_init(sk); 29871da177e4SLinus Torvalds 29882b916477SAndrey Vagin if (unlikely(tp->repair)) { 29892b916477SAndrey Vagin tcp_finish_connect(sk, NULL); 29902b916477SAndrey Vagin return 0; 29912b916477SAndrey Vagin } 29922b916477SAndrey Vagin 2993d179cd12SDavid S. Miller buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 29941da177e4SLinus Torvalds if (unlikely(buff == NULL)) 29951da177e4SLinus Torvalds return -ENOBUFS; 29961da177e4SLinus Torvalds 29971da177e4SLinus Torvalds /* Reserve space for headers. */ 29981da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 29991da177e4SLinus Torvalds 3000a3433f35SChangli Gao tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 3001783237e8SYuchung Cheng tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; 3002783237e8SYuchung Cheng tcp_connect_queue_skb(sk, buff); 3003e870a8efSIlpo Järvinen TCP_ECN_send_syn(sk, buff); 30041da177e4SLinus Torvalds 3005783237e8SYuchung Cheng /* Send off SYN; include data in Fast Open. */ 3006783237e8SYuchung Cheng err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3007783237e8SYuchung Cheng tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 3008ee586811SEric Paris if (err == -ECONNREFUSED) 3009ee586811SEric Paris return err; 3010bd37a088SWei Yongjun 3011bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 3012bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 3013bd37a088SWei Yongjun */ 3014bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 3015bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 301681cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 30171da177e4SLinus Torvalds 30181da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 30193f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 30203f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 30211da177e4SLinus Torvalds return 0; 30221da177e4SLinus Torvalds } 30234bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect); 30241da177e4SLinus Torvalds 30251da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 30261da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 30271da177e4SLinus Torvalds * for details. 30281da177e4SLinus Torvalds */ 30291da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 30301da177e4SLinus Torvalds { 3031463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 3032463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 30331da177e4SLinus Torvalds unsigned long timeout; 30341da177e4SLinus Torvalds 30351da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 3036463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 30371da177e4SLinus Torvalds int max_ato = HZ / 2; 30381da177e4SLinus Torvalds 3039056834d9SIlpo Järvinen if (icsk->icsk_ack.pingpong || 3040056834d9SIlpo Järvinen (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 30411da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 30421da177e4SLinus Torvalds 30431da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 30441da177e4SLinus Torvalds 30451da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 3046463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 30471da177e4SLinus Torvalds * directly. 30481da177e4SLinus Torvalds */ 3049*740b0f18SEric Dumazet if (tp->srtt_us) { 3050*740b0f18SEric Dumazet int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), 3051*740b0f18SEric Dumazet TCP_DELACK_MIN); 30521da177e4SLinus Torvalds 30531da177e4SLinus Torvalds if (rtt < max_ato) 30541da177e4SLinus Torvalds max_ato = rtt; 30551da177e4SLinus Torvalds } 30561da177e4SLinus Torvalds 30571da177e4SLinus Torvalds ato = min(ato, max_ato); 30581da177e4SLinus Torvalds } 30591da177e4SLinus Torvalds 30601da177e4SLinus Torvalds /* Stay within the limit we were given */ 30611da177e4SLinus Torvalds timeout = jiffies + ato; 30621da177e4SLinus Torvalds 30631da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 3064463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 30651da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 30661da177e4SLinus Torvalds * send ACK now. 30671da177e4SLinus Torvalds */ 3068463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 3069463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 30701da177e4SLinus Torvalds tcp_send_ack(sk); 30711da177e4SLinus Torvalds return; 30721da177e4SLinus Torvalds } 30731da177e4SLinus Torvalds 3074463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 3075463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 30761da177e4SLinus Torvalds } 3077463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3078463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 3079463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 30801da177e4SLinus Torvalds } 30811da177e4SLinus Torvalds 30821da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 30831da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk) 30841da177e4SLinus Torvalds { 30851da177e4SLinus Torvalds struct sk_buff *buff; 30861da177e4SLinus Torvalds 3087058dc334SIlpo Järvinen /* If we have been reset, we may not send again. */ 3088058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3089058dc334SIlpo Järvinen return; 3090058dc334SIlpo Järvinen 30911da177e4SLinus Torvalds /* We are not putting this on the write queue, so 30921da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 30931da177e4SLinus Torvalds * sock. 30941da177e4SLinus Torvalds */ 309599a1dec7SMel Gorman buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 30961da177e4SLinus Torvalds if (buff == NULL) { 3097463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 3098463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 30993f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 31003f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 31011da177e4SLinus Torvalds return; 31021da177e4SLinus Torvalds } 31031da177e4SLinus Torvalds 31041da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 31051da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 3106a3433f35SChangli Gao tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 31071da177e4SLinus Torvalds 31081da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 31091da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 311099a1dec7SMel Gorman tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 31111da177e4SLinus Torvalds } 31121da177e4SLinus Torvalds 31131da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 31141da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 31151da177e4SLinus Torvalds * 31161da177e4SLinus Torvalds * Question: what should we make while urgent mode? 31171da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 31181da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 31191da177e4SLinus Torvalds * 31201da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 31211da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 31221da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 31231da177e4SLinus Torvalds */ 31241da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 31251da177e4SLinus Torvalds { 31261da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31271da177e4SLinus Torvalds struct sk_buff *skb; 31281da177e4SLinus Torvalds 31291da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 313099a1dec7SMel Gorman skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 31311da177e4SLinus Torvalds if (skb == NULL) 31321da177e4SLinus Torvalds return -1; 31331da177e4SLinus Torvalds 31341da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 31351da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 31361da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 31371da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 31381da177e4SLinus Torvalds * send it. 31391da177e4SLinus Torvalds */ 3140a3433f35SChangli Gao tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 31411da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 3142dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 31431da177e4SLinus Torvalds } 31441da177e4SLinus Torvalds 3145ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk) 3146ee995283SPavel Emelyanov { 3147ee995283SPavel Emelyanov if (sk->sk_state == TCP_ESTABLISHED) { 3148ee995283SPavel Emelyanov tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3149ee995283SPavel Emelyanov tcp_xmit_probe_skb(sk, 0); 3150ee995283SPavel Emelyanov } 3151ee995283SPavel Emelyanov } 3152ee995283SPavel Emelyanov 315367edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */ 31541da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk) 31551da177e4SLinus Torvalds { 31561da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31571da177e4SLinus Torvalds struct sk_buff *skb; 31581da177e4SLinus Torvalds 3159058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3160058dc334SIlpo Järvinen return -1; 3161058dc334SIlpo Järvinen 3162fe067e8aSDavid S. Miller if ((skb = tcp_send_head(sk)) != NULL && 316390840defSIlpo Järvinen before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 31641da177e4SLinus Torvalds int err; 31650c54b85fSIlpo Järvinen unsigned int mss = tcp_current_mss(sk); 316690840defSIlpo Järvinen unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 31671da177e4SLinus Torvalds 31681da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 31691da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 31701da177e4SLinus Torvalds 31711da177e4SLinus Torvalds /* We are probing the opening of a window 31721da177e4SLinus Torvalds * but the window size is != 0 31731da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 31741da177e4SLinus Torvalds */ 31751da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 31761da177e4SLinus Torvalds skb->len > mss) { 31771da177e4SLinus Torvalds seg_size = min(seg_size, mss); 31784de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3179846998aeSDavid S. Miller if (tcp_fragment(sk, skb, seg_size, mss)) 31801da177e4SLinus Torvalds return -1; 31811da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 3182846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss); 31831da177e4SLinus Torvalds 31844de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 31851da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 3186dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 318766f5fe62SIlpo Järvinen if (!err) 318866f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 31891da177e4SLinus Torvalds return err; 31901da177e4SLinus Torvalds } else { 319133f5f57eSIlpo Järvinen if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 31924828e7f4SIlpo Järvinen tcp_xmit_probe_skb(sk, 1); 31931da177e4SLinus Torvalds return tcp_xmit_probe_skb(sk, 0); 31941da177e4SLinus Torvalds } 31951da177e4SLinus Torvalds } 31961da177e4SLinus Torvalds 31971da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 31981da177e4SLinus Torvalds * a partial packet else a zero probe. 31991da177e4SLinus Torvalds */ 32001da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 32011da177e4SLinus Torvalds { 3202463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 32031da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 32041da177e4SLinus Torvalds int err; 32051da177e4SLinus Torvalds 32061da177e4SLinus Torvalds err = tcp_write_wakeup(sk); 32071da177e4SLinus Torvalds 3208fe067e8aSDavid S. Miller if (tp->packets_out || !tcp_send_head(sk)) { 32091da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 32106687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 3211463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 32121da177e4SLinus Torvalds return; 32131da177e4SLinus Torvalds } 32141da177e4SLinus Torvalds 32151da177e4SLinus Torvalds if (err <= 0) { 3216463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_backoff < sysctl_tcp_retries2) 3217463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 32186687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out++; 3219463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 32203f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 32213f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 32221da177e4SLinus Torvalds } else { 32231da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 32246687e988SArnaldo Carvalho de Melo * do not backoff and do not remember icsk_probes_out. 32251da177e4SLinus Torvalds * Let local senders to fight for local resources. 32261da177e4SLinus Torvalds * 32271da177e4SLinus Torvalds * Use accumulated backoff yet. 32281da177e4SLinus Torvalds */ 32296687e988SArnaldo Carvalho de Melo if (!icsk->icsk_probes_out) 32306687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 1; 3231463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3232463c84b9SArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, 32333f421baaSArnaldo Carvalho de Melo TCP_RESOURCE_PROBE_INTERVAL), 32343f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 32351da177e4SLinus Torvalds } 32361da177e4SLinus Torvalds } 3237