11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 802c30a84SJesper Juhl * Authors: Ross Biro 91da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 101da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 111da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 121da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 131da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 141da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 151da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 161da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 171da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 181da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 191da177e4SLinus Torvalds */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds /* 221da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 231da177e4SLinus Torvalds * : Fragmentation on mtu decrease 241da177e4SLinus Torvalds * : Segment collapse on retransmit 251da177e4SLinus Torvalds * : AF independence 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 281da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 291da177e4SLinus Torvalds * during syn/ack processing. 301da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 311da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 321da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 331da177e4SLinus Torvalds * J Hadi Salim : ECN support 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt 3891df42beSJoe Perches 391da177e4SLinus Torvalds #include <net/tcp.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/compiler.h> 425a0e3ad6STejun Heo #include <linux/gfp.h> 431da177e4SLinus Torvalds #include <linux/module.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */ 46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1; 471da177e4SLinus Torvalds 4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that 4915d99e02SRick Jones * interpret the window field as a signed quantity. 5015d99e02SRick Jones */ 51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 5215d99e02SRick Jones 5346d3ceabSEric Dumazet /* Default TSQ limit of two TSO segments */ 5446d3ceabSEric Dumazet int sysctl_tcp_limit_output_bytes __read_mostly = 131072; 5546d3ceabSEric Dumazet 561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we 571da177e4SLinus Torvalds * will allow a single TSO frame to consume. Building TSO frames 581da177e4SLinus Torvalds * which are too large can cause TCP streams to be bursty. 591da177e4SLinus Torvalds */ 60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3; 611da177e4SLinus Torvalds 62ab32ea5dSBrian Haley int sysctl_tcp_mtu_probing __read_mostly = 0; 6397b1ce25SShan Wei int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; 645d424d5aSJohn Heffner 6535089bb2SDavid S. Miller /* By default, RFC2861 behavior. */ 66ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 6735089bb2SDavid S. Miller 6846d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 6946d3ceabSEric Dumazet int push_one, gfp_t gfp); 70519855c5SWilliam Allen Simpson 7167edfef7SAndi Kleen /* Account for new data that has been sent to the network. */ 72cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 736ff03ac3SIlpo Järvinen { 746ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 756ff03ac3SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 7666f5fe62SIlpo Järvinen unsigned int prior_packets = tp->packets_out; 779e412ba7SIlpo Järvinen 78fe067e8aSDavid S. Miller tcp_advance_send_head(sk, skb); 791da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 808512430eSIlpo Järvinen 8166f5fe62SIlpo Järvinen tp->packets_out += tcp_skb_pcount(skb); 826ba8a3b1SNandita Dukkipati if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 836ba8a3b1SNandita Dukkipati icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 84750ea2baSYuchung Cheng tcp_rearm_rto(sk); 851da177e4SLinus Torvalds } 861da177e4SLinus Torvalds 871da177e4SLinus Torvalds /* SND.NXT, if window was not shrunk. 881da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 891da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 901da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 911da177e4SLinus Torvalds * invalid. OK, let's make this for now: 921da177e4SLinus Torvalds */ 93cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk) 941da177e4SLinus Torvalds { 95cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 969e412ba7SIlpo Järvinen 9790840defSIlpo Järvinen if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 981da177e4SLinus Torvalds return tp->snd_nxt; 991da177e4SLinus Torvalds else 10090840defSIlpo Järvinen return tcp_wnd_end(tp); 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds 1031da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 1041da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 1051da177e4SLinus Torvalds * 1061da177e4SLinus Torvalds * 1. It is independent of path mtu. 1071da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 1081da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 1091da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 1101da177e4SLinus Torvalds * large MSS. 1111da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 1121da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 1131da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1141da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1151da177e4SLinus Torvalds * probably even Jumbo". 1161da177e4SLinus Torvalds */ 1171da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1181da177e4SLinus Torvalds { 1191da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 120cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1211da177e4SLinus Torvalds int mss = tp->advmss; 1221da177e4SLinus Torvalds 1230dbaee3bSDavid S. Miller if (dst) { 1240dbaee3bSDavid S. Miller unsigned int metric = dst_metric_advmss(dst); 1250dbaee3bSDavid S. Miller 1260dbaee3bSDavid S. Miller if (metric < mss) { 1270dbaee3bSDavid S. Miller mss = metric; 1281da177e4SLinus Torvalds tp->advmss = mss; 1291da177e4SLinus Torvalds } 1300dbaee3bSDavid S. Miller } 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds return (__u16)mss; 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1361da177e4SLinus Torvalds * This is the first part of cwnd validation mechanism. */ 137cf533ea5SEric Dumazet static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) 1381da177e4SLinus Torvalds { 139463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1401da177e4SLinus Torvalds s32 delta = tcp_time_stamp - tp->lsndtime; 1411da177e4SLinus Torvalds u32 restart_cwnd = tcp_init_cwnd(tp, dst); 1421da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1431da177e4SLinus Torvalds 1446687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1451da177e4SLinus Torvalds 1466687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1471da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1481da177e4SLinus Torvalds 149463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1501da177e4SLinus Torvalds cwnd >>= 1; 1511da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 1521da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 1531da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1541da177e4SLinus Torvalds } 1551da177e4SLinus Torvalds 15667edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */ 15740efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 158cf533ea5SEric Dumazet struct sock *sk) 1591da177e4SLinus Torvalds { 160463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 161463c84b9SArnaldo Carvalho de Melo const u32 now = tcp_time_stamp; 1621da177e4SLinus Torvalds 16335089bb2SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 16435089bb2SDavid S. Miller (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 165463c84b9SArnaldo Carvalho de Melo tcp_cwnd_restart(sk, __sk_dst_get(sk)); 1661da177e4SLinus Torvalds 1671da177e4SLinus Torvalds tp->lsndtime = now; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds /* If it is a reply for ato after last received 1701da177e4SLinus Torvalds * packet, enter pingpong mode. 1711da177e4SLinus Torvalds */ 172463c84b9SArnaldo Carvalho de Melo if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 173463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 1; 1741da177e4SLinus Torvalds } 1751da177e4SLinus Torvalds 17667edfef7SAndi Kleen /* Account for an ACK we sent. */ 17740efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 1781da177e4SLinus Torvalds { 179463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 180463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1811da177e4SLinus Torvalds } 1821da177e4SLinus Torvalds 1831da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 1841da177e4SLinus Torvalds * Based on the assumption that the given amount of space 1851da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 1861da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 1871da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 1881da177e4SLinus Torvalds * This MUST be enforced by all callers. 1891da177e4SLinus Torvalds */ 1901da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss, 1911da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 19231d12926Slaurent chavey int wscale_ok, __u8 *rcv_wscale, 19331d12926Slaurent chavey __u32 init_rcv_wnd) 1941da177e4SLinus Torvalds { 1951da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 1981da177e4SLinus Torvalds if (*window_clamp == 0) 1991da177e4SLinus Torvalds (*window_clamp) = (65535 << 14); 2001da177e4SLinus Torvalds space = min(*window_clamp, space); 2011da177e4SLinus Torvalds 2021da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 2031da177e4SLinus Torvalds if (space > mss) 2041da177e4SLinus Torvalds space = (space / mss) * mss; 2051da177e4SLinus Torvalds 2061da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 20715d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 20815d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 20915d99e02SRick Jones * we will truncate our initial window offering to 32K-1 21015d99e02SRick Jones * unless the remote has sent us a window scaling option, 21115d99e02SRick Jones * which we interpret as a sign the remote TCP is not 21215d99e02SRick Jones * misinterpreting the window field as a signed quantity. 2131da177e4SLinus Torvalds */ 21415d99e02SRick Jones if (sysctl_tcp_workaround_signed_windows) 2151da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 21615d99e02SRick Jones else 21715d99e02SRick Jones (*rcv_wnd) = space; 21815d99e02SRick Jones 2191da177e4SLinus Torvalds (*rcv_wscale) = 0; 2201da177e4SLinus Torvalds if (wscale_ok) { 2211da177e4SLinus Torvalds /* Set window scaling on max possible window 2221da177e4SLinus Torvalds * See RFC1323 for an explanation of the limit to 14 2231da177e4SLinus Torvalds */ 2241da177e4SLinus Torvalds space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 225316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 2261da177e4SLinus Torvalds while (space > 65535 && (*rcv_wscale) < 14) { 2271da177e4SLinus Torvalds space >>= 1; 2281da177e4SLinus Torvalds (*rcv_wscale)++; 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds } 2311da177e4SLinus Torvalds 232356f0398SNandita Dukkipati /* Set initial window to a value enough for senders starting with 233356f0398SNandita Dukkipati * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place 234356f0398SNandita Dukkipati * a limit on the initial window when mss is larger than 1460. 235356f0398SNandita Dukkipati */ 2361da177e4SLinus Torvalds if (mss > (1 << *rcv_wscale)) { 237356f0398SNandita Dukkipati int init_cwnd = TCP_DEFAULT_INIT_RCVWND; 238356f0398SNandita Dukkipati if (mss > 1460) 239356f0398SNandita Dukkipati init_cwnd = 240356f0398SNandita Dukkipati max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 24131d12926Slaurent chavey /* when initializing use the value from init_rcv_wnd 24231d12926Slaurent chavey * rather than the default from above 24331d12926Slaurent chavey */ 244b1afde60SNandita Dukkipati if (init_rcv_wnd) 245b1afde60SNandita Dukkipati *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 246b1afde60SNandita Dukkipati else 247b1afde60SNandita Dukkipati *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 2511da177e4SLinus Torvalds (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 2521da177e4SLinus Torvalds } 2534bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window); 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2561da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2571da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2581da177e4SLinus Torvalds * frame. 2591da177e4SLinus Torvalds */ 26040efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2611da177e4SLinus Torvalds { 2621da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2631da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2641da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2651da177e4SLinus Torvalds 2661da177e4SLinus Torvalds /* Never shrink the offered window */ 2671da177e4SLinus Torvalds if (new_win < cur_win) { 2681da177e4SLinus Torvalds /* Danger Will Robinson! 2691da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2701da177e4SLinus Torvalds * we will not be able to advertise a zero 2711da177e4SLinus Torvalds * window in time. --DaveM 2721da177e4SLinus Torvalds * 2731da177e4SLinus Torvalds * Relax Will Robinson. 2741da177e4SLinus Torvalds */ 275607bfbf2SPatrick McHardy new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 2761da177e4SLinus Torvalds } 2771da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2781da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2791da177e4SLinus Torvalds 2801da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2811da177e4SLinus Torvalds * scaled window. 2821da177e4SLinus Torvalds */ 28315d99e02SRick Jones if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 2841da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2851da177e4SLinus Torvalds else 2861da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds /* RFC1323 scaling applied */ 2891da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds /* If we advertise zero window, disable fast path. */ 2921da177e4SLinus Torvalds if (new_win == 0) 2931da177e4SLinus Torvalds tp->pred_flags = 0; 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds return new_win; 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds 29867edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */ 299cf533ea5SEric Dumazet static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 300bdf1ee5dSIlpo Järvinen { 3014de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 302bdf1ee5dSIlpo Järvinen if (!(tp->ecn_flags & TCP_ECN_OK)) 3034de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 304bdf1ee5dSIlpo Järvinen } 305bdf1ee5dSIlpo Järvinen 30667edfef7SAndi Kleen /* Packet ECN state for a SYN. */ 307bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 308bdf1ee5dSIlpo Järvinen { 309bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 310bdf1ee5dSIlpo Järvinen 311bdf1ee5dSIlpo Järvinen tp->ecn_flags = 0; 3125d134f1cSHannes Frederic Sowa if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { 3134de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 314bdf1ee5dSIlpo Järvinen tp->ecn_flags = TCP_ECN_OK; 315bdf1ee5dSIlpo Järvinen } 316bdf1ee5dSIlpo Järvinen } 317bdf1ee5dSIlpo Järvinen 318bdf1ee5dSIlpo Järvinen static __inline__ void 319cf533ea5SEric Dumazet TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 320bdf1ee5dSIlpo Järvinen { 321bdf1ee5dSIlpo Järvinen if (inet_rsk(req)->ecn_ok) 322bdf1ee5dSIlpo Järvinen th->ece = 1; 323bdf1ee5dSIlpo Järvinen } 324bdf1ee5dSIlpo Järvinen 32567edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 32667edfef7SAndi Kleen * be sent. 32767edfef7SAndi Kleen */ 328bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 329bdf1ee5dSIlpo Järvinen int tcp_header_len) 330bdf1ee5dSIlpo Järvinen { 331bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 332bdf1ee5dSIlpo Järvinen 333bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 334bdf1ee5dSIlpo Järvinen /* Not-retransmitted data segment: set ECT and inject CWR. */ 335bdf1ee5dSIlpo Järvinen if (skb->len != tcp_header_len && 336bdf1ee5dSIlpo Järvinen !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 337bdf1ee5dSIlpo Järvinen INET_ECN_xmit(sk); 338bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 339bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 340bdf1ee5dSIlpo Järvinen tcp_hdr(skb)->cwr = 1; 341bdf1ee5dSIlpo Järvinen skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 342bdf1ee5dSIlpo Järvinen } 343bdf1ee5dSIlpo Järvinen } else { 344bdf1ee5dSIlpo Järvinen /* ACK or retransmitted segment: clear ECT|CE */ 345bdf1ee5dSIlpo Järvinen INET_ECN_dontxmit(sk); 346bdf1ee5dSIlpo Järvinen } 347bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 348bdf1ee5dSIlpo Järvinen tcp_hdr(skb)->ece = 1; 349bdf1ee5dSIlpo Järvinen } 350bdf1ee5dSIlpo Järvinen } 351bdf1ee5dSIlpo Järvinen 352e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present, 353e870a8efSIlpo Järvinen * auto increment end seqno. 354e870a8efSIlpo Järvinen */ 355e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 356e870a8efSIlpo Järvinen { 3572e8e18efSDavid S. Miller skb->ip_summed = CHECKSUM_PARTIAL; 358e870a8efSIlpo Järvinen skb->csum = 0; 359e870a8efSIlpo Järvinen 3604de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags; 361e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->sacked = 0; 362e870a8efSIlpo Järvinen 363e870a8efSIlpo Järvinen skb_shinfo(skb)->gso_segs = 1; 364e870a8efSIlpo Järvinen skb_shinfo(skb)->gso_size = 0; 365e870a8efSIlpo Järvinen skb_shinfo(skb)->gso_type = 0; 366e870a8efSIlpo Järvinen 367e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->seq = seq; 368a3433f35SChangli Gao if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 369e870a8efSIlpo Järvinen seq++; 370e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->end_seq = seq; 371e870a8efSIlpo Järvinen } 372e870a8efSIlpo Järvinen 373a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp) 37433f5f57eSIlpo Järvinen { 37533f5f57eSIlpo Järvinen return tp->snd_una != tp->snd_up; 37633f5f57eSIlpo Järvinen } 37733f5f57eSIlpo Järvinen 37833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE (1 << 0) 37933ad798cSAdam Langley #define OPTION_TS (1 << 1) 38033ad798cSAdam Langley #define OPTION_MD5 (1 << 2) 38189e95a61SOri Finkelman #define OPTION_WSCALE (1 << 3) 3822100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE (1 << 8) 38333ad798cSAdam Langley 38433ad798cSAdam Langley struct tcp_out_options { 3852100c8d2SYuchung Cheng u16 options; /* bit field of OPTION_* */ 3862100c8d2SYuchung Cheng u16 mss; /* 0 to disable */ 38733ad798cSAdam Langley u8 ws; /* window scale, 0 to disable */ 38833ad798cSAdam Langley u8 num_sack_blocks; /* number of SACK blocks to include */ 389bd0388aeSWilliam Allen Simpson u8 hash_size; /* bytes in hash_location */ 390bd0388aeSWilliam Allen Simpson __u8 *hash_location; /* temporary pointer, overloaded */ 3912100c8d2SYuchung Cheng __u32 tsval, tsecr; /* need to include OPTION_TS */ 3922100c8d2SYuchung Cheng struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 39333ad798cSAdam Langley }; 39433ad798cSAdam Langley 39567edfef7SAndi Kleen /* Write previously computed TCP options to the packet. 39667edfef7SAndi Kleen * 39767edfef7SAndi Kleen * Beware: Something in the Internet is very sensitive to the ordering of 398fd6149d3SIlpo Järvinen * TCP options, we learned this through the hard way, so be careful here. 399fd6149d3SIlpo Järvinen * Luckily we can at least blame others for their non-compliance but from 400fd6149d3SIlpo Järvinen * inter-operatibility perspective it seems that we're somewhat stuck with 401fd6149d3SIlpo Järvinen * the ordering which we have been using if we want to keep working with 402fd6149d3SIlpo Järvinen * those broken things (not that it currently hurts anybody as there isn't 403fd6149d3SIlpo Järvinen * particular reason why the ordering would need to be changed). 404fd6149d3SIlpo Järvinen * 405fd6149d3SIlpo Järvinen * At least SACK_PERM as the first option is known to lead to a disaster 406fd6149d3SIlpo Järvinen * (but it may well be that other scenarios fail similarly). 407fd6149d3SIlpo Järvinen */ 40833ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 409bd0388aeSWilliam Allen Simpson struct tcp_out_options *opts) 410bd0388aeSWilliam Allen Simpson { 4112100c8d2SYuchung Cheng u16 options = opts->options; /* mungable copy */ 412bd0388aeSWilliam Allen Simpson 413bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_MD5 & options)) { 4141a2c6181SChristoph Paasch *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 4151a2c6181SChristoph Paasch (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 416bd0388aeSWilliam Allen Simpson /* overload cookie hash location */ 417bd0388aeSWilliam Allen Simpson opts->hash_location = (__u8 *)ptr; 41833ad798cSAdam Langley ptr += 4; 41933ad798cSAdam Langley } 42033ad798cSAdam Langley 421fd6149d3SIlpo Järvinen if (unlikely(opts->mss)) { 422fd6149d3SIlpo Järvinen *ptr++ = htonl((TCPOPT_MSS << 24) | 423fd6149d3SIlpo Järvinen (TCPOLEN_MSS << 16) | 424fd6149d3SIlpo Järvinen opts->mss); 425fd6149d3SIlpo Järvinen } 426fd6149d3SIlpo Järvinen 427bd0388aeSWilliam Allen Simpson if (likely(OPTION_TS & options)) { 428bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 42933ad798cSAdam Langley *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 43033ad798cSAdam Langley (TCPOLEN_SACK_PERM << 16) | 43133ad798cSAdam Langley (TCPOPT_TIMESTAMP << 8) | 43233ad798cSAdam Langley TCPOLEN_TIMESTAMP); 433bd0388aeSWilliam Allen Simpson options &= ~OPTION_SACK_ADVERTISE; 43433ad798cSAdam Langley } else { 435496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 43640efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 43740efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 43840efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 43940efc6faSStephen Hemminger } 44033ad798cSAdam Langley *ptr++ = htonl(opts->tsval); 44133ad798cSAdam Langley *ptr++ = htonl(opts->tsecr); 44233ad798cSAdam Langley } 44333ad798cSAdam Langley 444bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 44533ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 44633ad798cSAdam Langley (TCPOPT_NOP << 16) | 44733ad798cSAdam Langley (TCPOPT_SACK_PERM << 8) | 44833ad798cSAdam Langley TCPOLEN_SACK_PERM); 44933ad798cSAdam Langley } 45033ad798cSAdam Langley 451bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_WSCALE & options)) { 45233ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 45333ad798cSAdam Langley (TCPOPT_WINDOW << 16) | 45433ad798cSAdam Langley (TCPOLEN_WINDOW << 8) | 45533ad798cSAdam Langley opts->ws); 45633ad798cSAdam Langley } 45733ad798cSAdam Langley 45833ad798cSAdam Langley if (unlikely(opts->num_sack_blocks)) { 45933ad798cSAdam Langley struct tcp_sack_block *sp = tp->rx_opt.dsack ? 46033ad798cSAdam Langley tp->duplicate_sack : tp->selective_acks; 46140efc6faSStephen Hemminger int this_sack; 46240efc6faSStephen Hemminger 46340efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 46440efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 46540efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 46633ad798cSAdam Langley (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 46740efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 4682de979bdSStephen Hemminger 46933ad798cSAdam Langley for (this_sack = 0; this_sack < opts->num_sack_blocks; 47033ad798cSAdam Langley ++this_sack) { 47140efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 47240efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 47340efc6faSStephen Hemminger } 4742de979bdSStephen Hemminger 47540efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 47640efc6faSStephen Hemminger } 4772100c8d2SYuchung Cheng 4782100c8d2SYuchung Cheng if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 4792100c8d2SYuchung Cheng struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 4802100c8d2SYuchung Cheng 4812100c8d2SYuchung Cheng *ptr++ = htonl((TCPOPT_EXP << 24) | 4822100c8d2SYuchung Cheng ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | 4832100c8d2SYuchung Cheng TCPOPT_FASTOPEN_MAGIC); 4842100c8d2SYuchung Cheng 4852100c8d2SYuchung Cheng memcpy(ptr, foc->val, foc->len); 4862100c8d2SYuchung Cheng if ((foc->len & 3) == 2) { 4872100c8d2SYuchung Cheng u8 *align = ((u8 *)ptr) + foc->len; 4882100c8d2SYuchung Cheng align[0] = align[1] = TCPOPT_NOP; 4892100c8d2SYuchung Cheng } 4902100c8d2SYuchung Cheng ptr += (foc->len + 3) >> 2; 4912100c8d2SYuchung Cheng } 49240efc6faSStephen Hemminger } 49340efc6faSStephen Hemminger 49467edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final 49567edfef7SAndi Kleen * network wire format yet. 49667edfef7SAndi Kleen */ 49795c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 49833ad798cSAdam Langley struct tcp_out_options *opts, 499cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 500cf533ea5SEric Dumazet { 50133ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 50295c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 503783237e8SYuchung Cheng struct tcp_fastopen_request *fastopen = tp->fastopen_req; 50433ad798cSAdam Langley 505cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 50633ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 50733ad798cSAdam Langley if (*md5) { 50833ad798cSAdam Langley opts->options |= OPTION_MD5; 509bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 510cfb6eeb4SYOSHIFUJI Hideaki } 51133ad798cSAdam Langley #else 51233ad798cSAdam Langley *md5 = NULL; 513cfb6eeb4SYOSHIFUJI Hideaki #endif 51433ad798cSAdam Langley 51533ad798cSAdam Langley /* We always get an MSS option. The option bytes which will be seen in 51633ad798cSAdam Langley * normal data packets should timestamps be used, must be in the MSS 51733ad798cSAdam Langley * advertised. But we subtract them from tp->mss_cache so that 51833ad798cSAdam Langley * calculations in tcp_sendmsg are simpler etc. So account for this 51933ad798cSAdam Langley * fact here if necessary. If we don't do this correctly, as a 52033ad798cSAdam Langley * receiver we won't recognize data packets as being full sized when we 52133ad798cSAdam Langley * should, and thus we won't abide by the delayed ACK rules correctly. 52233ad798cSAdam Langley * SACKs don't matter, we never delay an ACK when we have any of those 52333ad798cSAdam Langley * going out. */ 52433ad798cSAdam Langley opts->mss = tcp_advertise_mss(sk); 525bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 52633ad798cSAdam Langley 527bb5b7c11SDavid S. Miller if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 52833ad798cSAdam Langley opts->options |= OPTION_TS; 529ee684b6fSAndrey Vagin opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; 53033ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 531bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 53233ad798cSAdam Langley } 533bb5b7c11SDavid S. Miller if (likely(sysctl_tcp_window_scaling)) { 53433ad798cSAdam Langley opts->ws = tp->rx_opt.rcv_wscale; 53589e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 536bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 53733ad798cSAdam Langley } 538bb5b7c11SDavid S. Miller if (likely(sysctl_tcp_sack)) { 53933ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 540b32d1310SDavid S. Miller if (unlikely(!(OPTION_TS & opts->options))) 541bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 54233ad798cSAdam Langley } 54333ad798cSAdam Langley 544783237e8SYuchung Cheng if (fastopen && fastopen->cookie.len >= 0) { 545783237e8SYuchung Cheng u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; 546783237e8SYuchung Cheng need = (need + 3) & ~3U; /* Align to 32 bits */ 547783237e8SYuchung Cheng if (remaining >= need) { 548783237e8SYuchung Cheng opts->options |= OPTION_FAST_OPEN_COOKIE; 549783237e8SYuchung Cheng opts->fastopen_cookie = &fastopen->cookie; 550783237e8SYuchung Cheng remaining -= need; 551783237e8SYuchung Cheng tp->syn_fastopen = 1; 552783237e8SYuchung Cheng } 553783237e8SYuchung Cheng } 554bd0388aeSWilliam Allen Simpson 555bd0388aeSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 55633ad798cSAdam Langley } 55733ad798cSAdam Langley 55867edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */ 55995c96174SEric Dumazet static unsigned int tcp_synack_options(struct sock *sk, 56033ad798cSAdam Langley struct request_sock *req, 56195c96174SEric Dumazet unsigned int mss, struct sk_buff *skb, 56233ad798cSAdam Langley struct tcp_out_options *opts, 5634957faadSWilliam Allen Simpson struct tcp_md5sig_key **md5, 5648336886fSJerry Chu struct tcp_fastopen_cookie *foc) 5654957faadSWilliam Allen Simpson { 56633ad798cSAdam Langley struct inet_request_sock *ireq = inet_rsk(req); 56795c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 56833ad798cSAdam Langley 56933ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 57033ad798cSAdam Langley *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 57133ad798cSAdam Langley if (*md5) { 57233ad798cSAdam Langley opts->options |= OPTION_MD5; 5734957faadSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 5744957faadSWilliam Allen Simpson 5754957faadSWilliam Allen Simpson /* We can't fit any SACK blocks in a packet with MD5 + TS 5764957faadSWilliam Allen Simpson * options. There was discussion about disabling SACK 5774957faadSWilliam Allen Simpson * rather than TS in order to fit in better with old, 5784957faadSWilliam Allen Simpson * buggy kernels, but that was deemed to be unnecessary. 5794957faadSWilliam Allen Simpson */ 580de213e5eSEric Dumazet ireq->tstamp_ok &= !ireq->sack_ok; 58133ad798cSAdam Langley } 58233ad798cSAdam Langley #else 58333ad798cSAdam Langley *md5 = NULL; 58433ad798cSAdam Langley #endif 58533ad798cSAdam Langley 5864957faadSWilliam Allen Simpson /* We always send an MSS option. */ 58733ad798cSAdam Langley opts->mss = mss; 5884957faadSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 58933ad798cSAdam Langley 59033ad798cSAdam Langley if (likely(ireq->wscale_ok)) { 59133ad798cSAdam Langley opts->ws = ireq->rcv_wscale; 59289e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 5934957faadSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 59433ad798cSAdam Langley } 595de213e5eSEric Dumazet if (likely(ireq->tstamp_ok)) { 59633ad798cSAdam Langley opts->options |= OPTION_TS; 59733ad798cSAdam Langley opts->tsval = TCP_SKB_CB(skb)->when; 59833ad798cSAdam Langley opts->tsecr = req->ts_recent; 5994957faadSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 60033ad798cSAdam Langley } 60133ad798cSAdam Langley if (likely(ireq->sack_ok)) { 60233ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 603de213e5eSEric Dumazet if (unlikely(!ireq->tstamp_ok)) 6044957faadSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 60533ad798cSAdam Langley } 6068336886fSJerry Chu if (foc != NULL) { 6078336886fSJerry Chu u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 6088336886fSJerry Chu need = (need + 3) & ~3U; /* Align to 32 bits */ 6098336886fSJerry Chu if (remaining >= need) { 6108336886fSJerry Chu opts->options |= OPTION_FAST_OPEN_COOKIE; 6118336886fSJerry Chu opts->fastopen_cookie = foc; 6128336886fSJerry Chu remaining -= need; 6138336886fSJerry Chu } 6148336886fSJerry Chu } 6154957faadSWilliam Allen Simpson 6164957faadSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 61733ad798cSAdam Langley } 61833ad798cSAdam Langley 61967edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the 62067edfef7SAndi Kleen * final wire format yet. 62167edfef7SAndi Kleen */ 62295c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 62333ad798cSAdam Langley struct tcp_out_options *opts, 624cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 625cf533ea5SEric Dumazet { 62633ad798cSAdam Langley struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 62733ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 62895c96174SEric Dumazet unsigned int size = 0; 629cabeccbdSIlpo Järvinen unsigned int eff_sacks; 63033ad798cSAdam Langley 63133ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 63233ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 63333ad798cSAdam Langley if (unlikely(*md5)) { 63433ad798cSAdam Langley opts->options |= OPTION_MD5; 63533ad798cSAdam Langley size += TCPOLEN_MD5SIG_ALIGNED; 63633ad798cSAdam Langley } 63733ad798cSAdam Langley #else 63833ad798cSAdam Langley *md5 = NULL; 63933ad798cSAdam Langley #endif 64033ad798cSAdam Langley 64133ad798cSAdam Langley if (likely(tp->rx_opt.tstamp_ok)) { 64233ad798cSAdam Langley opts->options |= OPTION_TS; 643ee684b6fSAndrey Vagin opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; 64433ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 64533ad798cSAdam Langley size += TCPOLEN_TSTAMP_ALIGNED; 64633ad798cSAdam Langley } 64733ad798cSAdam Langley 648cabeccbdSIlpo Järvinen eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 649cabeccbdSIlpo Järvinen if (unlikely(eff_sacks)) { 65095c96174SEric Dumazet const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 65133ad798cSAdam Langley opts->num_sack_blocks = 65295c96174SEric Dumazet min_t(unsigned int, eff_sacks, 65333ad798cSAdam Langley (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 65433ad798cSAdam Langley TCPOLEN_SACK_PERBLOCK); 65533ad798cSAdam Langley size += TCPOLEN_SACK_BASE_ALIGNED + 65633ad798cSAdam Langley opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 65733ad798cSAdam Langley } 65833ad798cSAdam Langley 65933ad798cSAdam Langley return size; 66040efc6faSStephen Hemminger } 6611da177e4SLinus Torvalds 66246d3ceabSEric Dumazet 66346d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ) 66446d3ceabSEric Dumazet * 66546d3ceabSEric Dumazet * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 66646d3ceabSEric Dumazet * to reduce RTT and bufferbloat. 66746d3ceabSEric Dumazet * We do this using a special skb destructor (tcp_wfree). 66846d3ceabSEric Dumazet * 66946d3ceabSEric Dumazet * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 67046d3ceabSEric Dumazet * needs to be reallocated in a driver. 67146d3ceabSEric Dumazet * The invariant being skb->truesize substracted from sk->sk_wmem_alloc 67246d3ceabSEric Dumazet * 67346d3ceabSEric Dumazet * Since transmit from skb destructor is forbidden, we use a tasklet 67446d3ceabSEric Dumazet * to process all sockets that eventually need to send more skbs. 67546d3ceabSEric Dumazet * We use one tasklet per cpu, with its own queue of sockets. 67646d3ceabSEric Dumazet */ 67746d3ceabSEric Dumazet struct tsq_tasklet { 67846d3ceabSEric Dumazet struct tasklet_struct tasklet; 67946d3ceabSEric Dumazet struct list_head head; /* queue of tcp sockets */ 68046d3ceabSEric Dumazet }; 68146d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 68246d3ceabSEric Dumazet 6836f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk) 6846f458dfbSEric Dumazet { 6856f458dfbSEric Dumazet if ((1 << sk->sk_state) & 6866f458dfbSEric Dumazet (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 6876f458dfbSEric Dumazet TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 6886f458dfbSEric Dumazet tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); 6896f458dfbSEric Dumazet } 69046d3ceabSEric Dumazet /* 69146d3ceabSEric Dumazet * One tasklest per cpu tries to send more skbs. 69246d3ceabSEric Dumazet * We run in tasklet context but need to disable irqs when 69346d3ceabSEric Dumazet * transfering tsq->head because tcp_wfree() might 69446d3ceabSEric Dumazet * interrupt us (non NAPI drivers) 69546d3ceabSEric Dumazet */ 69646d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data) 69746d3ceabSEric Dumazet { 69846d3ceabSEric Dumazet struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 69946d3ceabSEric Dumazet LIST_HEAD(list); 70046d3ceabSEric Dumazet unsigned long flags; 70146d3ceabSEric Dumazet struct list_head *q, *n; 70246d3ceabSEric Dumazet struct tcp_sock *tp; 70346d3ceabSEric Dumazet struct sock *sk; 70446d3ceabSEric Dumazet 70546d3ceabSEric Dumazet local_irq_save(flags); 70646d3ceabSEric Dumazet list_splice_init(&tsq->head, &list); 70746d3ceabSEric Dumazet local_irq_restore(flags); 70846d3ceabSEric Dumazet 70946d3ceabSEric Dumazet list_for_each_safe(q, n, &list) { 71046d3ceabSEric Dumazet tp = list_entry(q, struct tcp_sock, tsq_node); 71146d3ceabSEric Dumazet list_del(&tp->tsq_node); 71246d3ceabSEric Dumazet 71346d3ceabSEric Dumazet sk = (struct sock *)tp; 71446d3ceabSEric Dumazet bh_lock_sock(sk); 71546d3ceabSEric Dumazet 71646d3ceabSEric Dumazet if (!sock_owned_by_user(sk)) { 7176f458dfbSEric Dumazet tcp_tsq_handler(sk); 71846d3ceabSEric Dumazet } else { 71946d3ceabSEric Dumazet /* defer the work to tcp_release_cb() */ 7206f458dfbSEric Dumazet set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); 72146d3ceabSEric Dumazet } 72246d3ceabSEric Dumazet bh_unlock_sock(sk); 72346d3ceabSEric Dumazet 72446d3ceabSEric Dumazet clear_bit(TSQ_QUEUED, &tp->tsq_flags); 72546d3ceabSEric Dumazet sk_free(sk); 72646d3ceabSEric Dumazet } 72746d3ceabSEric Dumazet } 72846d3ceabSEric Dumazet 7296f458dfbSEric Dumazet #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ 7306f458dfbSEric Dumazet (1UL << TCP_WRITE_TIMER_DEFERRED) | \ 731563d34d0SEric Dumazet (1UL << TCP_DELACK_TIMER_DEFERRED) | \ 732563d34d0SEric Dumazet (1UL << TCP_MTU_REDUCED_DEFERRED)) 73346d3ceabSEric Dumazet /** 73446d3ceabSEric Dumazet * tcp_release_cb - tcp release_sock() callback 73546d3ceabSEric Dumazet * @sk: socket 73646d3ceabSEric Dumazet * 73746d3ceabSEric Dumazet * called from release_sock() to perform protocol dependent 73846d3ceabSEric Dumazet * actions before socket release. 73946d3ceabSEric Dumazet */ 74046d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk) 74146d3ceabSEric Dumazet { 74246d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 7436f458dfbSEric Dumazet unsigned long flags, nflags; 74446d3ceabSEric Dumazet 7456f458dfbSEric Dumazet /* perform an atomic operation only if at least one flag is set */ 7466f458dfbSEric Dumazet do { 7476f458dfbSEric Dumazet flags = tp->tsq_flags; 7486f458dfbSEric Dumazet if (!(flags & TCP_DEFERRED_ALL)) 7496f458dfbSEric Dumazet return; 7506f458dfbSEric Dumazet nflags = flags & ~TCP_DEFERRED_ALL; 7516f458dfbSEric Dumazet } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); 7526f458dfbSEric Dumazet 7536f458dfbSEric Dumazet if (flags & (1UL << TCP_TSQ_DEFERRED)) 7546f458dfbSEric Dumazet tcp_tsq_handler(sk); 7556f458dfbSEric Dumazet 756144d56e9SEric Dumazet if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 7576f458dfbSEric Dumazet tcp_write_timer_handler(sk); 758144d56e9SEric Dumazet __sock_put(sk); 759144d56e9SEric Dumazet } 760144d56e9SEric Dumazet if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { 7616f458dfbSEric Dumazet tcp_delack_timer_handler(sk); 762144d56e9SEric Dumazet __sock_put(sk); 763144d56e9SEric Dumazet } 764144d56e9SEric Dumazet if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { 765563d34d0SEric Dumazet sk->sk_prot->mtu_reduced(sk); 766144d56e9SEric Dumazet __sock_put(sk); 767144d56e9SEric Dumazet } 76846d3ceabSEric Dumazet } 76946d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb); 77046d3ceabSEric Dumazet 77146d3ceabSEric Dumazet void __init tcp_tasklet_init(void) 77246d3ceabSEric Dumazet { 77346d3ceabSEric Dumazet int i; 77446d3ceabSEric Dumazet 77546d3ceabSEric Dumazet for_each_possible_cpu(i) { 77646d3ceabSEric Dumazet struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 77746d3ceabSEric Dumazet 77846d3ceabSEric Dumazet INIT_LIST_HEAD(&tsq->head); 77946d3ceabSEric Dumazet tasklet_init(&tsq->tasklet, 78046d3ceabSEric Dumazet tcp_tasklet_func, 78146d3ceabSEric Dumazet (unsigned long)tsq); 78246d3ceabSEric Dumazet } 78346d3ceabSEric Dumazet } 78446d3ceabSEric Dumazet 78546d3ceabSEric Dumazet /* 78646d3ceabSEric Dumazet * Write buffer destructor automatically called from kfree_skb. 78746d3ceabSEric Dumazet * We cant xmit new skbs from this context, as we might already 78846d3ceabSEric Dumazet * hold qdisc lock. 78946d3ceabSEric Dumazet */ 7908e7dfbc8SSilviu-Mihai Popescu static void tcp_wfree(struct sk_buff *skb) 79146d3ceabSEric Dumazet { 79246d3ceabSEric Dumazet struct sock *sk = skb->sk; 79346d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 79446d3ceabSEric Dumazet 79546d3ceabSEric Dumazet if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && 79646d3ceabSEric Dumazet !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { 79746d3ceabSEric Dumazet unsigned long flags; 79846d3ceabSEric Dumazet struct tsq_tasklet *tsq; 79946d3ceabSEric Dumazet 80046d3ceabSEric Dumazet /* Keep a ref on socket. 80146d3ceabSEric Dumazet * This last ref will be released in tcp_tasklet_func() 80246d3ceabSEric Dumazet */ 80346d3ceabSEric Dumazet atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); 80446d3ceabSEric Dumazet 80546d3ceabSEric Dumazet /* queue this socket to tasklet queue */ 80646d3ceabSEric Dumazet local_irq_save(flags); 80746d3ceabSEric Dumazet tsq = &__get_cpu_var(tsq_tasklet); 80846d3ceabSEric Dumazet list_add(&tp->tsq_node, &tsq->head); 80946d3ceabSEric Dumazet tasklet_schedule(&tsq->tasklet); 81046d3ceabSEric Dumazet local_irq_restore(flags); 81146d3ceabSEric Dumazet } else { 81246d3ceabSEric Dumazet sock_wfree(skb); 81346d3ceabSEric Dumazet } 81446d3ceabSEric Dumazet } 81546d3ceabSEric Dumazet 8161da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 8171da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 8181da177e4SLinus Torvalds * transmission and possible later retransmissions. 8191da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 8201da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 8211da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 8221da177e4SLinus Torvalds * device. 8231da177e4SLinus Torvalds * 8241da177e4SLinus Torvalds * We are working here with either a clone of the original 8251da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 8261da177e4SLinus Torvalds */ 827056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 828056834d9SIlpo Järvinen gfp_t gfp_mask) 8291da177e4SLinus Torvalds { 8306687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 831dfb4b9dcSDavid S. Miller struct inet_sock *inet; 832dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 833dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 83433ad798cSAdam Langley struct tcp_out_options opts; 83595c96174SEric Dumazet unsigned int tcp_options_size, tcp_header_size; 836cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 8371da177e4SLinus Torvalds struct tcphdr *th; 8381da177e4SLinus Torvalds int err; 8391da177e4SLinus Torvalds 840dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 841dfb4b9dcSDavid S. Miller 842dfb4b9dcSDavid S. Miller /* If congestion control is doing timestamping, we must 843dfb4b9dcSDavid S. Miller * take such a timestamp before we potentially clone/copy. 844dfb4b9dcSDavid S. Miller */ 845164891aaSStephen Hemminger if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 846dfb4b9dcSDavid S. Miller __net_timestamp(skb); 847dfb4b9dcSDavid S. Miller 848dfb4b9dcSDavid S. Miller if (likely(clone_it)) { 849dfb4b9dcSDavid S. Miller if (unlikely(skb_cloned(skb))) 850dfb4b9dcSDavid S. Miller skb = pskb_copy(skb, gfp_mask); 851dfb4b9dcSDavid S. Miller else 852dfb4b9dcSDavid S. Miller skb = skb_clone(skb, gfp_mask); 853dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 854dfb4b9dcSDavid S. Miller return -ENOBUFS; 855dfb4b9dcSDavid S. Miller } 856dfb4b9dcSDavid S. Miller 857dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 858dfb4b9dcSDavid S. Miller tp = tcp_sk(sk); 859dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 86033ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 8611da177e4SLinus Torvalds 8624de075e0SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 86333ad798cSAdam Langley tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 86433ad798cSAdam Langley else 86533ad798cSAdam Langley tcp_options_size = tcp_established_options(sk, skb, &opts, 86633ad798cSAdam Langley &md5); 86733ad798cSAdam Langley tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 8681da177e4SLinus Torvalds 8693853b584STom Herbert if (tcp_packets_in_flight(tp) == 0) { 8706687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_TX_START); 8713853b584STom Herbert skb->ooo_okay = 1; 8723853b584STom Herbert } else 8733853b584STom Herbert skb->ooo_okay = 0; 8741da177e4SLinus Torvalds 875aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 876aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 87746d3ceabSEric Dumazet 87846d3ceabSEric Dumazet skb_orphan(skb); 87946d3ceabSEric Dumazet skb->sk = sk; 88046d3ceabSEric Dumazet skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? 88146d3ceabSEric Dumazet tcp_wfree : sock_wfree; 88246d3ceabSEric Dumazet atomic_add(skb->truesize, &sk->sk_wmem_alloc); 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 885aa8223c7SArnaldo Carvalho de Melo th = tcp_hdr(skb); 886c720c7e8SEric Dumazet th->source = inet->inet_sport; 887c720c7e8SEric Dumazet th->dest = inet->inet_dport; 8881da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 8891da177e4SLinus Torvalds th->ack_seq = htonl(tp->rcv_nxt); 890df7a3b07SAl Viro *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 8914de075e0SEric Dumazet tcb->tcp_flags); 892dfb4b9dcSDavid S. Miller 8934de075e0SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 8941da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments 8951da177e4SLinus Torvalds * is never scaled. 8961da177e4SLinus Torvalds */ 897600ff0c2SIlpo Järvinen th->window = htons(min(tp->rcv_wnd, 65535U)); 8981da177e4SLinus Torvalds } else { 8991da177e4SLinus Torvalds th->window = htons(tcp_select_window(sk)); 9001da177e4SLinus Torvalds } 9011da177e4SLinus Torvalds th->check = 0; 9021da177e4SLinus Torvalds th->urg_ptr = 0; 9031da177e4SLinus Torvalds 90433f5f57eSIlpo Järvinen /* The urg_mode check is necessary during a below snd_una win probe */ 9057691367dSHerbert Xu if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 9067691367dSHerbert Xu if (before(tp->snd_up, tcb->seq + 0x10000)) { 9071da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up - tcb->seq); 9081da177e4SLinus Torvalds th->urg = 1; 9097691367dSHerbert Xu } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 9100eae88f3SEric Dumazet th->urg_ptr = htons(0xFFFF); 9117691367dSHerbert Xu th->urg = 1; 9127691367dSHerbert Xu } 9131da177e4SLinus Torvalds } 9141da177e4SLinus Torvalds 915bd0388aeSWilliam Allen Simpson tcp_options_write((__be32 *)(th + 1), tp, &opts); 9164de075e0SEric Dumazet if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) 9179e412ba7SIlpo Järvinen TCP_ECN_send(sk, skb, tcp_header_size); 918dfb4b9dcSDavid S. Miller 919cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 920cfb6eeb4SYOSHIFUJI Hideaki /* Calculate the MD5 hash, as we have all we need now */ 921cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 922a465419bSEric Dumazet sk_nocaps_add(sk, NETIF_F_GSO_MASK); 923bd0388aeSWilliam Allen Simpson tp->af_specific->calc_md5_hash(opts.hash_location, 92449a72dfbSAdam Langley md5, sk, NULL, skb); 925cfb6eeb4SYOSHIFUJI Hideaki } 926cfb6eeb4SYOSHIFUJI Hideaki #endif 927cfb6eeb4SYOSHIFUJI Hideaki 928bb296246SHerbert Xu icsk->icsk_af_ops->send_check(sk, skb); 9291da177e4SLinus Torvalds 9304de075e0SEric Dumazet if (likely(tcb->tcp_flags & TCPHDR_ACK)) 931fc6415bcSDavid S. Miller tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 9321da177e4SLinus Torvalds 9331da177e4SLinus Torvalds if (skb->len != tcp_header_size) 934cf533ea5SEric Dumazet tcp_event_data_sent(tp, sk); 9351da177e4SLinus Torvalds 936bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 937aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 938aa2ea058STom Herbert tcp_skb_pcount(skb)); 9391da177e4SLinus Torvalds 940d9d8da80SDavid S. Miller err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 94183de47cdSHua Zhong if (likely(err <= 0)) 9421da177e4SLinus Torvalds return err; 9431da177e4SLinus Torvalds 9443cfe3baaSIlpo Järvinen tcp_enter_cwr(sk, 1); 9451da177e4SLinus Torvalds 946b9df3cb8SGerrit Renker return net_xmit_eval(err); 9471da177e4SLinus Torvalds } 9481da177e4SLinus Torvalds 94967edfef7SAndi Kleen /* This routine just queues the buffer for sending. 9501da177e4SLinus Torvalds * 9511da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 9521da177e4SLinus Torvalds * otherwise socket can stall. 9531da177e4SLinus Torvalds */ 9541da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 9551da177e4SLinus Torvalds { 9561da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 9571da177e4SLinus Torvalds 9581da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 9591da177e4SLinus Torvalds tp->write_seq = TCP_SKB_CB(skb)->end_seq; 9601da177e4SLinus Torvalds skb_header_release(skb); 961fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 9623ab224beSHideo Aoki sk->sk_wmem_queued += skb->truesize; 9633ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 9641da177e4SLinus Torvalds } 9651da177e4SLinus Torvalds 96667edfef7SAndi Kleen /* Initialize TSO segments for a packet. */ 967cf533ea5SEric Dumazet static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 968056834d9SIlpo Järvinen unsigned int mss_now) 969f6302d1dSDavid S. Miller { 9708e5b9ddaSHerbert Xu if (skb->len <= mss_now || !sk_can_gso(sk) || 9718e5b9ddaSHerbert Xu skb->ip_summed == CHECKSUM_NONE) { 972f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 973f6302d1dSDavid S. Miller * non-TSO case. 974f6302d1dSDavid S. Miller */ 9757967168cSHerbert Xu skb_shinfo(skb)->gso_segs = 1; 9767967168cSHerbert Xu skb_shinfo(skb)->gso_size = 0; 977c9af6db4SPravin B Shelar skb_shinfo(skb)->gso_type = 0; 978f6302d1dSDavid S. Miller } else { 979356f89e1SIlpo Järvinen skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 9807967168cSHerbert Xu skb_shinfo(skb)->gso_size = mss_now; 981c9af6db4SPravin B Shelar skb_shinfo(skb)->gso_type = sk->sk_gso_type; 9821da177e4SLinus Torvalds } 9831da177e4SLinus Torvalds } 9841da177e4SLinus Torvalds 98591fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check 98668f8353bSIlpo Järvinen * skb is counted to fackets_out or not. 98791fed7a1SIlpo Järvinen */ 988cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 98991fed7a1SIlpo Järvinen int decr) 99091fed7a1SIlpo Järvinen { 991a47e5a98SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 992a47e5a98SIlpo Järvinen 993dc86967bSIlpo Järvinen if (!tp->sacked_out || tcp_is_reno(tp)) 99491fed7a1SIlpo Järvinen return; 99591fed7a1SIlpo Järvinen 9966859d494SIlpo Järvinen if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 99791fed7a1SIlpo Järvinen tp->fackets_out -= decr; 99891fed7a1SIlpo Järvinen } 99991fed7a1SIlpo Järvinen 1000797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various 1001797108d1SIlpo Järvinen * tweaks to fix counters 1002797108d1SIlpo Järvinen */ 1003cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1004797108d1SIlpo Järvinen { 1005797108d1SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1006797108d1SIlpo Järvinen 1007797108d1SIlpo Järvinen tp->packets_out -= decr; 1008797108d1SIlpo Järvinen 1009797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1010797108d1SIlpo Järvinen tp->sacked_out -= decr; 1011797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1012797108d1SIlpo Järvinen tp->retrans_out -= decr; 1013797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1014797108d1SIlpo Järvinen tp->lost_out -= decr; 1015797108d1SIlpo Järvinen 1016797108d1SIlpo Järvinen /* Reno case is special. Sigh... */ 1017797108d1SIlpo Järvinen if (tcp_is_reno(tp) && decr > 0) 1018797108d1SIlpo Järvinen tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1019797108d1SIlpo Järvinen 1020797108d1SIlpo Järvinen tcp_adjust_fackets_out(sk, skb, decr); 1021797108d1SIlpo Järvinen 1022797108d1SIlpo Järvinen if (tp->lost_skb_hint && 1023797108d1SIlpo Järvinen before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 102452cf3cc8SIlpo Järvinen (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 1025797108d1SIlpo Järvinen tp->lost_cnt_hint -= decr; 1026797108d1SIlpo Järvinen 1027797108d1SIlpo Järvinen tcp_verify_left_out(tp); 1028797108d1SIlpo Järvinen } 1029797108d1SIlpo Järvinen 10301da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 10311da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 10321da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 10331da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 10341da177e4SLinus Torvalds */ 1035056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1036056834d9SIlpo Järvinen unsigned int mss_now) 10371da177e4SLinus Torvalds { 10381da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 10391da177e4SLinus Torvalds struct sk_buff *buff; 10406475be16SDavid S. Miller int nsize, old_factor; 1041b60b49eaSHerbert Xu int nlen; 10429ce01461SIlpo Järvinen u8 flags; 10431da177e4SLinus Torvalds 10442fceec13SIlpo Järvinen if (WARN_ON(len > skb->len)) 10452fceec13SIlpo Järvinen return -EINVAL; 10466a438bbeSStephen Hemminger 10471da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 10481da177e4SLinus Torvalds if (nsize < 0) 10491da177e4SLinus Torvalds nsize = 0; 10501da177e4SLinus Torvalds 10511da177e4SLinus Torvalds if (skb_cloned(skb) && 10521da177e4SLinus Torvalds skb_is_nonlinear(skb) && 10531da177e4SLinus Torvalds pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 10541da177e4SLinus Torvalds return -ENOMEM; 10551da177e4SLinus Torvalds 10561da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 10571da177e4SLinus Torvalds buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 10581da177e4SLinus Torvalds if (buff == NULL) 10591da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 1060ef5cb973SHerbert Xu 10613ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 10623ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1063b60b49eaSHerbert Xu nlen = skb->len - len - nsize; 1064b60b49eaSHerbert Xu buff->truesize += nlen; 1065b60b49eaSHerbert Xu skb->truesize -= nlen; 10661da177e4SLinus Torvalds 10671da177e4SLinus Torvalds /* Correct the sequence numbers. */ 10681da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 10691da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 10701da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 10711da177e4SLinus Torvalds 10721da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 10734de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 10744de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 10754de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1076e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 10771da177e4SLinus Torvalds 107884fa7933SPatrick McHardy if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 10791da177e4SLinus Torvalds /* Copy and checksum data tail into the new buffer. */ 1080056834d9SIlpo Järvinen buff->csum = csum_partial_copy_nocheck(skb->data + len, 1081056834d9SIlpo Järvinen skb_put(buff, nsize), 10821da177e4SLinus Torvalds nsize, 0); 10831da177e4SLinus Torvalds 10841da177e4SLinus Torvalds skb_trim(skb, len); 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds skb->csum = csum_block_sub(skb->csum, buff->csum, len); 10871da177e4SLinus Torvalds } else { 108884fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 10891da177e4SLinus Torvalds skb_split(skb, buff, len); 10901da177e4SLinus Torvalds } 10911da177e4SLinus Torvalds 10921da177e4SLinus Torvalds buff->ip_summed = skb->ip_summed; 10931da177e4SLinus Torvalds 10941da177e4SLinus Torvalds /* Looks stupid, but our code really uses when of 10951da177e4SLinus Torvalds * skbs, which it never sent before. --ANK 10961da177e4SLinus Torvalds */ 10971da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1098a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 10991da177e4SLinus Torvalds 11006475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 11016475be16SDavid S. Miller 11021da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 1103846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1104846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 11051da177e4SLinus Torvalds 11066475be16SDavid S. Miller /* If this packet has been sent out already, we must 11076475be16SDavid S. Miller * adjust the various packet counters. 11086475be16SDavid S. Miller */ 1109cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 11106475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 11116475be16SDavid S. Miller tcp_skb_pcount(buff); 11121da177e4SLinus Torvalds 1113797108d1SIlpo Järvinen if (diff) 1114797108d1SIlpo Järvinen tcp_adjust_pcount(sk, skb, diff); 11151da177e4SLinus Torvalds } 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 1118f44b5271SDavid S. Miller skb_header_release(buff); 1119fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 11201da177e4SLinus Torvalds 11211da177e4SLinus Torvalds return 0; 11221da177e4SLinus Torvalds } 11231da177e4SLinus Torvalds 11241da177e4SLinus Torvalds /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 11251da177e4SLinus Torvalds * eventually). The difference is that pulled data not copied, but 11261da177e4SLinus Torvalds * immediately discarded. 11271da177e4SLinus Torvalds */ 1128f2911969SHerbert Xu ~{PmVHI~} static void __pskb_trim_head(struct sk_buff *skb, int len) 11291da177e4SLinus Torvalds { 11301da177e4SLinus Torvalds int i, k, eat; 11311da177e4SLinus Torvalds 11324fa48bf3SEric Dumazet eat = min_t(int, len, skb_headlen(skb)); 11334fa48bf3SEric Dumazet if (eat) { 11344fa48bf3SEric Dumazet __skb_pull(skb, eat); 11354fa48bf3SEric Dumazet len -= eat; 11364fa48bf3SEric Dumazet if (!len) 11374fa48bf3SEric Dumazet return; 11384fa48bf3SEric Dumazet } 11391da177e4SLinus Torvalds eat = len; 11401da177e4SLinus Torvalds k = 0; 11411da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 11429e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 11439e903e08SEric Dumazet 11449e903e08SEric Dumazet if (size <= eat) { 1145aff65da0SIan Campbell skb_frag_unref(skb, i); 11469e903e08SEric Dumazet eat -= size; 11471da177e4SLinus Torvalds } else { 11481da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 11491da177e4SLinus Torvalds if (eat) { 11501da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 11519e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 11521da177e4SLinus Torvalds eat = 0; 11531da177e4SLinus Torvalds } 11541da177e4SLinus Torvalds k++; 11551da177e4SLinus Torvalds } 11561da177e4SLinus Torvalds } 11571da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 11581da177e4SLinus Torvalds 115927a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 11601da177e4SLinus Torvalds skb->data_len -= len; 11611da177e4SLinus Torvalds skb->len = skb->data_len; 11621da177e4SLinus Torvalds } 11631da177e4SLinus Torvalds 116467edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */ 11651da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 11661da177e4SLinus Torvalds { 116714bbd6a5SPravin B Shelar if (skb_unclone(skb, GFP_ATOMIC)) 11681da177e4SLinus Torvalds return -ENOMEM; 11691da177e4SLinus Torvalds 11704fa48bf3SEric Dumazet __pskb_trim_head(skb, len); 11711da177e4SLinus Torvalds 11721da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 117384fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 11741da177e4SLinus Torvalds 11751da177e4SLinus Torvalds skb->truesize -= len; 11761da177e4SLinus Torvalds sk->sk_wmem_queued -= len; 11773ab224beSHideo Aoki sk_mem_uncharge(sk, len); 11781da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 11791da177e4SLinus Torvalds 11805b35e1e6SNeal Cardwell /* Any change of skb->len requires recalculation of tso factor. */ 11811da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 11825b35e1e6SNeal Cardwell tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); 11831da177e4SLinus Torvalds 11841da177e4SLinus Torvalds return 0; 11851da177e4SLinus Torvalds } 11861da177e4SLinus Torvalds 11871b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options. */ 11881b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 11895d424d5aSJohn Heffner { 1190cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1191cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 11925d424d5aSJohn Heffner int mss_now; 11935d424d5aSJohn Heffner 11945d424d5aSJohn Heffner /* Calculate base mss without TCP options: 11955d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 11965d424d5aSJohn Heffner */ 11975d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 11985d424d5aSJohn Heffner 119967469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 120067469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 120167469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 120267469601SEric Dumazet 120367469601SEric Dumazet if (dst && dst_allfrag(dst)) 120467469601SEric Dumazet mss_now -= icsk->icsk_af_ops->net_frag_header_len; 120567469601SEric Dumazet } 120667469601SEric Dumazet 12075d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 12085d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 12095d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 12105d424d5aSJohn Heffner 12115d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 12125d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 12135d424d5aSJohn Heffner 12145d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 12155d424d5aSJohn Heffner if (mss_now < 48) 12165d424d5aSJohn Heffner mss_now = 48; 12175d424d5aSJohn Heffner return mss_now; 12185d424d5aSJohn Heffner } 12195d424d5aSJohn Heffner 12201b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here. */ 12211b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu) 12221b63edd6SYuchung Cheng { 12231b63edd6SYuchung Cheng /* Subtract TCP options size, not including SACKs */ 12241b63edd6SYuchung Cheng return __tcp_mtu_to_mss(sk, pmtu) - 12251b63edd6SYuchung Cheng (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 12261b63edd6SYuchung Cheng } 12271b63edd6SYuchung Cheng 12285d424d5aSJohn Heffner /* Inverse of above */ 122967469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss) 12305d424d5aSJohn Heffner { 1231cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1232cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 12335d424d5aSJohn Heffner int mtu; 12345d424d5aSJohn Heffner 12355d424d5aSJohn Heffner mtu = mss + 12365d424d5aSJohn Heffner tp->tcp_header_len + 12375d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 12385d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 12395d424d5aSJohn Heffner 124067469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 124167469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 124267469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 124367469601SEric Dumazet 124467469601SEric Dumazet if (dst && dst_allfrag(dst)) 124567469601SEric Dumazet mtu += icsk->icsk_af_ops->net_frag_header_len; 124667469601SEric Dumazet } 12475d424d5aSJohn Heffner return mtu; 12485d424d5aSJohn Heffner } 12495d424d5aSJohn Heffner 125067edfef7SAndi Kleen /* MTU probing init per socket */ 12515d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 12525d424d5aSJohn Heffner { 12535d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 12545d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 12555d424d5aSJohn Heffner 12565d424d5aSJohn Heffner icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 12575d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 12585d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 12595d424d5aSJohn Heffner icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 12605d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 12615d424d5aSJohn Heffner } 12624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init); 12635d424d5aSJohn Heffner 12641da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 12651da177e4SLinus Torvalds 12661da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 12671da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 12681da177e4SLinus Torvalds 12691da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1270caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 12711da177e4SLinus Torvalds It also does not include TCP options. 12721da177e4SLinus Torvalds 1273d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 12741da177e4SLinus Torvalds 12751da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 12761da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 12771da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 12781da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 12791da177e4SLinus Torvalds 12801da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 12811da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 12821da177e4SLinus Torvalds 1283d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1284d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 12851da177e4SLinus Torvalds */ 12861da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 12871da177e4SLinus Torvalds { 12881da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1289d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 12905d424d5aSJohn Heffner int mss_now; 12911da177e4SLinus Torvalds 12925d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 12935d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 12941da177e4SLinus Torvalds 12955d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 1296409d22b4SIlpo Järvinen mss_now = tcp_bound_to_half_wnd(tp, mss_now); 12971da177e4SLinus Torvalds 12981da177e4SLinus Torvalds /* And store cached results */ 1299d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 13005d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 13015d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1302c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 13031da177e4SLinus Torvalds 13041da177e4SLinus Torvalds return mss_now; 13051da177e4SLinus Torvalds } 13064bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss); 13071da177e4SLinus Torvalds 13081da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 13091da177e4SLinus Torvalds * and even PMTU discovery events into account. 13101da177e4SLinus Torvalds */ 13110c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk) 13121da177e4SLinus Torvalds { 1313cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1314cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1315c1b4a7e6SDavid S. Miller u32 mss_now; 131695c96174SEric Dumazet unsigned int header_len; 131733ad798cSAdam Langley struct tcp_out_options opts; 131833ad798cSAdam Langley struct tcp_md5sig_key *md5; 13191da177e4SLinus Torvalds 1320c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 1321c1b4a7e6SDavid S. Miller 13221da177e4SLinus Torvalds if (dst) { 13231da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 1324d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 13251da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 13261da177e4SLinus Torvalds } 13271da177e4SLinus Torvalds 132833ad798cSAdam Langley header_len = tcp_established_options(sk, NULL, &opts, &md5) + 132933ad798cSAdam Langley sizeof(struct tcphdr); 133033ad798cSAdam Langley /* The mss_cache is sized based on tp->tcp_header_len, which assumes 133133ad798cSAdam Langley * some common options. If this is an odd packet (because we have SACK 133233ad798cSAdam Langley * blocks etc) then our calculated header_len will be different, and 133333ad798cSAdam Langley * we have to adjust mss_now correspondingly */ 133433ad798cSAdam Langley if (header_len != tp->tcp_header_len) { 133533ad798cSAdam Langley int delta = (int) header_len - tp->tcp_header_len; 133633ad798cSAdam Langley mss_now -= delta; 133733ad798cSAdam Langley } 1338cfb6eeb4SYOSHIFUJI Hideaki 13391da177e4SLinus Torvalds return mss_now; 13401da177e4SLinus Torvalds } 13411da177e4SLinus Torvalds 1342a762a980SDavid S. Miller /* Congestion window validation. (RFC2861) */ 13439e412ba7SIlpo Järvinen static void tcp_cwnd_validate(struct sock *sk) 1344a762a980SDavid S. Miller { 13459e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1346a762a980SDavid S. Miller 1347d436d686SIlpo Järvinen if (tp->packets_out >= tp->snd_cwnd) { 1348a762a980SDavid S. Miller /* Network is feed fully. */ 1349a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 1350a762a980SDavid S. Miller tp->snd_cwnd_stamp = tcp_time_stamp; 1351a762a980SDavid S. Miller } else { 1352a762a980SDavid S. Miller /* Network starves. */ 1353a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 1354a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 1355a762a980SDavid S. Miller 135615d33c07SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 135715d33c07SDavid S. Miller (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1358a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 1359a762a980SDavid S. Miller } 1360a762a980SDavid S. Miller } 1361a762a980SDavid S. Miller 13620e3a4803SIlpo Järvinen /* Returns the portion of skb which can be sent right away without 13630e3a4803SIlpo Järvinen * introducing MSS oddities to segment boundaries. In rare cases where 13640e3a4803SIlpo Järvinen * mss_now != mss_cache, we will request caller to create a small skb 13650e3a4803SIlpo Järvinen * per input skb which could be mostly avoided here (if desired). 13665ea3a748SIlpo Järvinen * 13675ea3a748SIlpo Järvinen * We explicitly want to create a request for splitting write queue tail 13685ea3a748SIlpo Järvinen * to a small skb for Nagle purposes while avoiding unnecessary modulos, 13695ea3a748SIlpo Järvinen * thus all the complexity (cwnd_len is always MSS multiple which we 13705ea3a748SIlpo Järvinen * return whenever allowed by the other factors). Basically we need the 13715ea3a748SIlpo Järvinen * modulo only when the receiver window alone is the limiting factor or 13725ea3a748SIlpo Järvinen * when we would be allowed to send the split-due-to-Nagle skb fully. 13730e3a4803SIlpo Järvinen */ 1374cf533ea5SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 13751485348dSBen Hutchings unsigned int mss_now, unsigned int max_segs) 1376c1b4a7e6SDavid S. Miller { 1377cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 13781485348dSBen Hutchings u32 needed, window, max_len; 1379c1b4a7e6SDavid S. Miller 138090840defSIlpo Järvinen window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 13811485348dSBen Hutchings max_len = mss_now * max_segs; 13820e3a4803SIlpo Järvinen 13831485348dSBen Hutchings if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 13841485348dSBen Hutchings return max_len; 13850e3a4803SIlpo Järvinen 13865ea3a748SIlpo Järvinen needed = min(skb->len, window); 13875ea3a748SIlpo Järvinen 13881485348dSBen Hutchings if (max_len <= needed) 13891485348dSBen Hutchings return max_len; 13900e3a4803SIlpo Järvinen 13910e3a4803SIlpo Järvinen return needed - needed % mss_now; 1392c1b4a7e6SDavid S. Miller } 1393c1b4a7e6SDavid S. Miller 1394c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 1395c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 1396c1b4a7e6SDavid S. Miller */ 1397cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1398cf533ea5SEric Dumazet const struct sk_buff *skb) 1399c1b4a7e6SDavid S. Miller { 1400c1b4a7e6SDavid S. Miller u32 in_flight, cwnd; 1401c1b4a7e6SDavid S. Miller 1402c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 14034de075e0SEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 14044de075e0SEric Dumazet tcp_skb_pcount(skb) == 1) 1405c1b4a7e6SDavid S. Miller return 1; 1406c1b4a7e6SDavid S. Miller 1407c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1408c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 1409c1b4a7e6SDavid S. Miller if (in_flight < cwnd) 1410c1b4a7e6SDavid S. Miller return (cwnd - in_flight); 1411c1b4a7e6SDavid S. Miller 1412c1b4a7e6SDavid S. Miller return 0; 1413c1b4a7e6SDavid S. Miller } 1414c1b4a7e6SDavid S. Miller 1415b595076aSUwe Kleine-König /* Initialize TSO state of a skb. 141667edfef7SAndi Kleen * This must be invoked the first time we consider transmitting 1417c1b4a7e6SDavid S. Miller * SKB onto the wire. 1418c1b4a7e6SDavid S. Miller */ 1419cf533ea5SEric Dumazet static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, 1420056834d9SIlpo Järvinen unsigned int mss_now) 1421c1b4a7e6SDavid S. Miller { 1422c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 1423c1b4a7e6SDavid S. Miller 1424f8269a49SIlpo Järvinen if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1425846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1426c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 1427c1b4a7e6SDavid S. Miller } 1428c1b4a7e6SDavid S. Miller return tso_segs; 1429c1b4a7e6SDavid S. Miller } 1430c1b4a7e6SDavid S. Miller 143167edfef7SAndi Kleen /* Minshall's variant of the Nagle send check. */ 1432a2a385d6SEric Dumazet static inline bool tcp_minshall_check(const struct tcp_sock *tp) 1433c1b4a7e6SDavid S. Miller { 1434c1b4a7e6SDavid S. Miller return after(tp->snd_sml, tp->snd_una) && 1435c1b4a7e6SDavid S. Miller !after(tp->snd_sml, tp->snd_nxt); 1436c1b4a7e6SDavid S. Miller } 1437c1b4a7e6SDavid S. Miller 1438a2a385d6SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules: 1439c1b4a7e6SDavid S. Miller * 1. It is full sized. 1440c1b4a7e6SDavid S. Miller * 2. Or it contains FIN. (already checked by caller) 14416d67e9beSFeng King * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1442c1b4a7e6SDavid S. Miller * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1443c1b4a7e6SDavid S. Miller * With Minshall's modification: all sent small packets are ACKed. 1444c1b4a7e6SDavid S. Miller */ 1445a2a385d6SEric Dumazet static inline bool tcp_nagle_check(const struct tcp_sock *tp, 1446c1b4a7e6SDavid S. Miller const struct sk_buff *skb, 144795c96174SEric Dumazet unsigned int mss_now, int nonagle) 1448c1b4a7e6SDavid S. Miller { 1449a02cec21SEric Dumazet return skb->len < mss_now && 1450c1b4a7e6SDavid S. Miller ((nonagle & TCP_NAGLE_CORK) || 1451a02cec21SEric Dumazet (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1452c1b4a7e6SDavid S. Miller } 1453c1b4a7e6SDavid S. Miller 1454a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be 1455c1b4a7e6SDavid S. Miller * sent now. 1456c1b4a7e6SDavid S. Miller */ 1457a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1458c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1459c1b4a7e6SDavid S. Miller { 1460c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 1461c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 1462c1b4a7e6SDavid S. Miller * 1463c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 1464c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 1465c1b4a7e6SDavid S. Miller */ 1466c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 1467a2a385d6SEric Dumazet return true; 1468c1b4a7e6SDavid S. Miller 1469*9b44190dSYuchung Cheng /* Don't use the nagle rule for urgent data (or for the final FIN). */ 1470*9b44190dSYuchung Cheng if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1471a2a385d6SEric Dumazet return true; 1472c1b4a7e6SDavid S. Miller 1473c1b4a7e6SDavid S. Miller if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1474a2a385d6SEric Dumazet return true; 1475c1b4a7e6SDavid S. Miller 1476a2a385d6SEric Dumazet return false; 1477c1b4a7e6SDavid S. Miller } 1478c1b4a7e6SDavid S. Miller 1479c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 1480a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1481a2a385d6SEric Dumazet const struct sk_buff *skb, 1482056834d9SIlpo Järvinen unsigned int cur_mss) 1483c1b4a7e6SDavid S. Miller { 1484c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1485c1b4a7e6SDavid S. Miller 1486c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 1487c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1488c1b4a7e6SDavid S. Miller 148990840defSIlpo Järvinen return !after(end_seq, tcp_wnd_end(tp)); 1490c1b4a7e6SDavid S. Miller } 1491c1b4a7e6SDavid S. Miller 1492fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1493c1b4a7e6SDavid S. Miller * should be put on the wire right now. If so, it returns the number of 1494c1b4a7e6SDavid S. Miller * packets allowed by the congestion window. 1495c1b4a7e6SDavid S. Miller */ 1496cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1497c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1498c1b4a7e6SDavid S. Miller { 1499cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1500c1b4a7e6SDavid S. Miller unsigned int cwnd_quota; 1501c1b4a7e6SDavid S. Miller 1502846998aeSDavid S. Miller tcp_init_tso_segs(sk, skb, cur_mss); 1503c1b4a7e6SDavid S. Miller 1504c1b4a7e6SDavid S. Miller if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1505c1b4a7e6SDavid S. Miller return 0; 1506c1b4a7e6SDavid S. Miller 1507c1b4a7e6SDavid S. Miller cwnd_quota = tcp_cwnd_test(tp, skb); 1508056834d9SIlpo Järvinen if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1509c1b4a7e6SDavid S. Miller cwnd_quota = 0; 1510c1b4a7e6SDavid S. Miller 1511c1b4a7e6SDavid S. Miller return cwnd_quota; 1512c1b4a7e6SDavid S. Miller } 1513c1b4a7e6SDavid S. Miller 151467edfef7SAndi Kleen /* Test if sending is allowed right now. */ 1515a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk) 1516c1b4a7e6SDavid S. Miller { 1517cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1518fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 1519c1b4a7e6SDavid S. Miller 1520a02cec21SEric Dumazet return skb && 15210c54b85fSIlpo Järvinen tcp_snd_test(sk, skb, tcp_current_mss(sk), 1522c1b4a7e6SDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1523a02cec21SEric Dumazet tp->nonagle : TCP_NAGLE_PUSH)); 1524c1b4a7e6SDavid S. Miller } 1525c1b4a7e6SDavid S. Miller 1526c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1527c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 1528c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 1529c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 1530c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 1531c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 1532c1b4a7e6SDavid S. Miller */ 1533056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1534c4ead4c5SEric Dumazet unsigned int mss_now, gfp_t gfp) 1535c1b4a7e6SDavid S. Miller { 1536c1b4a7e6SDavid S. Miller struct sk_buff *buff; 1537c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 15389ce01461SIlpo Järvinen u8 flags; 1539c1b4a7e6SDavid S. Miller 1540c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 1541c8ac3774SHerbert Xu if (skb->len != skb->data_len) 1542c8ac3774SHerbert Xu return tcp_fragment(sk, skb, len, mss_now); 1543c1b4a7e6SDavid S. Miller 1544c4ead4c5SEric Dumazet buff = sk_stream_alloc_skb(sk, 0, gfp); 1545c1b4a7e6SDavid S. Miller if (unlikely(buff == NULL)) 1546c1b4a7e6SDavid S. Miller return -ENOMEM; 1547c1b4a7e6SDavid S. Miller 15483ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 15493ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1550b60b49eaSHerbert Xu buff->truesize += nlen; 1551c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 1552c1b4a7e6SDavid S. Miller 1553c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 1554c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1555c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1556c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1557c1b4a7e6SDavid S. Miller 1558c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 15594de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 15604de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 15614de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1562c1b4a7e6SDavid S. Miller 1563c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 1564c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 1565c1b4a7e6SDavid S. Miller 156684fa7933SPatrick McHardy buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1567c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 1568c1b4a7e6SDavid S. Miller 1569c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 1570846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss_now); 1571846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, buff, mss_now); 1572c1b4a7e6SDavid S. Miller 1573c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 1574c1b4a7e6SDavid S. Miller skb_header_release(buff); 1575fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 1576c1b4a7e6SDavid S. Miller 1577c1b4a7e6SDavid S. Miller return 0; 1578c1b4a7e6SDavid S. Miller } 1579c1b4a7e6SDavid S. Miller 1580c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 1581c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 1582c1b4a7e6SDavid S. Miller * 1583c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 1584c1b4a7e6SDavid S. Miller */ 1585a2a385d6SEric Dumazet static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1586c1b4a7e6SDavid S. Miller { 15879e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 15886687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1589c1b4a7e6SDavid S. Miller u32 send_win, cong_win, limit, in_flight; 1590ad9f4f50SEric Dumazet int win_divisor; 1591c1b4a7e6SDavid S. Miller 15924de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1593ae8064acSJohn Heffner goto send_now; 1594c1b4a7e6SDavid S. Miller 15956687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 1596ae8064acSJohn Heffner goto send_now; 1597ae8064acSJohn Heffner 1598ae8064acSJohn Heffner /* Defer for less than two clock ticks. */ 1599bd515c3eSIlpo Järvinen if (tp->tso_deferred && 1600a2acde07SIlpo Järvinen (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1601ae8064acSJohn Heffner goto send_now; 1602908a75c1SDavid S. Miller 1603c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1604c1b4a7e6SDavid S. Miller 1605056834d9SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1606c1b4a7e6SDavid S. Miller 160790840defSIlpo Järvinen send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1608c1b4a7e6SDavid S. Miller 1609c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 1610c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1611c1b4a7e6SDavid S. Miller 1612c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 1613c1b4a7e6SDavid S. Miller 1614ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 16151485348dSBen Hutchings if (limit >= min_t(unsigned int, sk->sk_gso_max_size, 16161485348dSBen Hutchings sk->sk_gso_max_segs * tp->mss_cache)) 1617ae8064acSJohn Heffner goto send_now; 1618ba244fe9SDavid S. Miller 161962ad2761SIlpo Järvinen /* Middle in queue won't get any more data, full sendable already? */ 162062ad2761SIlpo Järvinen if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 162162ad2761SIlpo Järvinen goto send_now; 162262ad2761SIlpo Järvinen 1623ad9f4f50SEric Dumazet win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1624ad9f4f50SEric Dumazet if (win_divisor) { 1625c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1626c1b4a7e6SDavid S. Miller 1627c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 1628c1b4a7e6SDavid S. Miller * just use it. 1629c1b4a7e6SDavid S. Miller */ 1630ad9f4f50SEric Dumazet chunk /= win_divisor; 1631c1b4a7e6SDavid S. Miller if (limit >= chunk) 1632ae8064acSJohn Heffner goto send_now; 1633c1b4a7e6SDavid S. Miller } else { 1634c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 1635c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 1636c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 1637c1b4a7e6SDavid S. Miller * then send now. 1638c1b4a7e6SDavid S. Miller */ 16396b5a5c0dSNeal Cardwell if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1640ae8064acSJohn Heffner goto send_now; 1641c1b4a7e6SDavid S. Miller } 1642c1b4a7e6SDavid S. Miller 1643c1b4a7e6SDavid S. Miller /* Ok, it looks like it is advisable to defer. */ 1644ae8064acSJohn Heffner tp->tso_deferred = 1 | (jiffies << 1); 1645ae8064acSJohn Heffner 1646a2a385d6SEric Dumazet return true; 1647ae8064acSJohn Heffner 1648ae8064acSJohn Heffner send_now: 1649ae8064acSJohn Heffner tp->tso_deferred = 0; 1650a2a385d6SEric Dumazet return false; 1651c1b4a7e6SDavid S. Miller } 1652c1b4a7e6SDavid S. Miller 16535d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 165467edfef7SAndi Kleen * MTU probe is regularly attempting to increase the path MTU by 165567edfef7SAndi Kleen * deliberately sending larger packets. This discovers routing 165667edfef7SAndi Kleen * changes resulting in larger path MTUs. 165767edfef7SAndi Kleen * 16585d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 16595d424d5aSJohn Heffner * 1 if a probe was sent, 1660056834d9SIlpo Järvinen * -1 otherwise 1661056834d9SIlpo Järvinen */ 16625d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 16635d424d5aSJohn Heffner { 16645d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 16655d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 16665d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 16675d424d5aSJohn Heffner int len; 16685d424d5aSJohn Heffner int probe_size; 166991cc17c0SIlpo Järvinen int size_needed; 16705d424d5aSJohn Heffner int copy; 16715d424d5aSJohn Heffner int mss_now; 16725d424d5aSJohn Heffner 16735d424d5aSJohn Heffner /* Not currently probing/verifying, 16745d424d5aSJohn Heffner * not in recovery, 16755d424d5aSJohn Heffner * have enough cwnd, and 16765d424d5aSJohn Heffner * not SACKing (the variable headers throw things off) */ 16775d424d5aSJohn Heffner if (!icsk->icsk_mtup.enabled || 16785d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 16795d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 16805d424d5aSJohn Heffner tp->snd_cwnd < 11 || 1681cabeccbdSIlpo Järvinen tp->rx_opt.num_sacks || tp->rx_opt.dsack) 16825d424d5aSJohn Heffner return -1; 16835d424d5aSJohn Heffner 16845d424d5aSJohn Heffner /* Very simple search strategy: just double the MSS. */ 16850c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 16865d424d5aSJohn Heffner probe_size = 2 * tp->mss_cache; 168791cc17c0SIlpo Järvinen size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 16885d424d5aSJohn Heffner if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 16895d424d5aSJohn Heffner /* TODO: set timer for probe_converge_event */ 16905d424d5aSJohn Heffner return -1; 16915d424d5aSJohn Heffner } 16925d424d5aSJohn Heffner 16935d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 16947f9c33e5SIlpo Järvinen if (tp->write_seq - tp->snd_nxt < size_needed) 16955d424d5aSJohn Heffner return -1; 16965d424d5aSJohn Heffner 169791cc17c0SIlpo Järvinen if (tp->snd_wnd < size_needed) 16985d424d5aSJohn Heffner return -1; 169990840defSIlpo Järvinen if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 17005d424d5aSJohn Heffner return 0; 17015d424d5aSJohn Heffner 1702d67c58e9SIlpo Järvinen /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1703d67c58e9SIlpo Järvinen if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1704d67c58e9SIlpo Järvinen if (!tcp_packets_in_flight(tp)) 17055d424d5aSJohn Heffner return -1; 17065d424d5aSJohn Heffner else 17075d424d5aSJohn Heffner return 0; 17085d424d5aSJohn Heffner } 17095d424d5aSJohn Heffner 17105d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 17115d424d5aSJohn Heffner if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 17125d424d5aSJohn Heffner return -1; 17133ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 17143ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 17155d424d5aSJohn Heffner 1716fe067e8aSDavid S. Miller skb = tcp_send_head(sk); 17175d424d5aSJohn Heffner 17185d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 17195d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 17204de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 17215d424d5aSJohn Heffner TCP_SKB_CB(nskb)->sacked = 0; 17225d424d5aSJohn Heffner nskb->csum = 0; 172384fa7933SPatrick McHardy nskb->ip_summed = skb->ip_summed; 17245d424d5aSJohn Heffner 172550c4817eSIlpo Järvinen tcp_insert_write_queue_before(nskb, skb, sk); 172650c4817eSIlpo Järvinen 17275d424d5aSJohn Heffner len = 0; 1728234b6860SIlpo Järvinen tcp_for_write_queue_from_safe(skb, next, sk) { 17295d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 17305d424d5aSJohn Heffner if (nskb->ip_summed) 17315d424d5aSJohn Heffner skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 17325d424d5aSJohn Heffner else 17335d424d5aSJohn Heffner nskb->csum = skb_copy_and_csum_bits(skb, 0, 1734056834d9SIlpo Järvinen skb_put(nskb, copy), 1735056834d9SIlpo Järvinen copy, nskb->csum); 17365d424d5aSJohn Heffner 17375d424d5aSJohn Heffner if (skb->len <= copy) { 17385d424d5aSJohn Heffner /* We've eaten all the data from this skb. 17395d424d5aSJohn Heffner * Throw it away. */ 17404de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1741fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 17423ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 17435d424d5aSJohn Heffner } else { 17444de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 1745a3433f35SChangli Gao ~(TCPHDR_FIN|TCPHDR_PSH); 17465d424d5aSJohn Heffner if (!skb_shinfo(skb)->nr_frags) { 17475d424d5aSJohn Heffner skb_pull(skb, copy); 174884fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 1749056834d9SIlpo Järvinen skb->csum = csum_partial(skb->data, 1750056834d9SIlpo Järvinen skb->len, 0); 17515d424d5aSJohn Heffner } else { 17525d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 17535d424d5aSJohn Heffner tcp_set_skb_tso_segs(sk, skb, mss_now); 17545d424d5aSJohn Heffner } 17555d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 17565d424d5aSJohn Heffner } 17575d424d5aSJohn Heffner 17585d424d5aSJohn Heffner len += copy; 1759234b6860SIlpo Järvinen 1760234b6860SIlpo Järvinen if (len >= probe_size) 1761234b6860SIlpo Järvinen break; 17625d424d5aSJohn Heffner } 17635d424d5aSJohn Heffner tcp_init_tso_segs(sk, nskb, nskb->len); 17645d424d5aSJohn Heffner 17655d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 17665d424d5aSJohn Heffner * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 17675d424d5aSJohn Heffner TCP_SKB_CB(nskb)->when = tcp_time_stamp; 17685d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 17695d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 17705d424d5aSJohn Heffner * effectively two packets. */ 17715d424d5aSJohn Heffner tp->snd_cwnd--; 177266f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, nskb); 17735d424d5aSJohn Heffner 17745d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 17750e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 17760e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 17775d424d5aSJohn Heffner 17785d424d5aSJohn Heffner return 1; 17795d424d5aSJohn Heffner } 17805d424d5aSJohn Heffner 17815d424d5aSJohn Heffner return -1; 17825d424d5aSJohn Heffner } 17835d424d5aSJohn Heffner 17841da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 17851da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 17861da177e4SLinus Torvalds * window for us. 17871da177e4SLinus Torvalds * 1788f8269a49SIlpo Järvinen * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1789f8269a49SIlpo Järvinen * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1790f8269a49SIlpo Järvinen * account rare use of URG, this is not a big flaw. 1791f8269a49SIlpo Järvinen * 17926ba8a3b1SNandita Dukkipati * Send at most one packet when push_one > 0. Temporarily ignore 17936ba8a3b1SNandita Dukkipati * cwnd limit to force at most one packet out when push_one == 2. 17946ba8a3b1SNandita Dukkipati 1795a2a385d6SEric Dumazet * Returns true, if no segments are in flight and we have queued segments, 1796a2a385d6SEric Dumazet * but cannot send anything now because of SWS or another problem. 17971da177e4SLinus Torvalds */ 1798a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1799d5dd9175SIlpo Järvinen int push_one, gfp_t gfp) 18001da177e4SLinus Torvalds { 18011da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 180292df7b51SDavid S. Miller struct sk_buff *skb; 1803c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 1804c1b4a7e6SDavid S. Miller int cwnd_quota; 18055d424d5aSJohn Heffner int result; 18061da177e4SLinus Torvalds 1807c1b4a7e6SDavid S. Miller sent_pkts = 0; 18085d424d5aSJohn Heffner 1809d5dd9175SIlpo Järvinen if (!push_one) { 18105d424d5aSJohn Heffner /* Do MTU probing. */ 1811d5dd9175SIlpo Järvinen result = tcp_mtu_probe(sk); 1812d5dd9175SIlpo Järvinen if (!result) { 1813a2a385d6SEric Dumazet return false; 18145d424d5aSJohn Heffner } else if (result > 0) { 18155d424d5aSJohn Heffner sent_pkts = 1; 18165d424d5aSJohn Heffner } 1817d5dd9175SIlpo Järvinen } 18185d424d5aSJohn Heffner 1819fe067e8aSDavid S. Miller while ((skb = tcp_send_head(sk))) { 1820c8ac3774SHerbert Xu unsigned int limit; 1821c8ac3774SHerbert Xu 182246d3ceabSEric Dumazet 1823b68e9f85SHerbert Xu tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1824c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 1825c1b4a7e6SDavid S. Miller 1826ec342325SAndrew Vagin if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) 1827ec342325SAndrew Vagin goto repair; /* Skip network transmission */ 1828ec342325SAndrew Vagin 1829b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 18306ba8a3b1SNandita Dukkipati if (!cwnd_quota) { 18316ba8a3b1SNandita Dukkipati if (push_one == 2) 18326ba8a3b1SNandita Dukkipati /* Force out a loss probe pkt. */ 18336ba8a3b1SNandita Dukkipati cwnd_quota = 1; 18346ba8a3b1SNandita Dukkipati else 1835b68e9f85SHerbert Xu break; 18366ba8a3b1SNandita Dukkipati } 1837b68e9f85SHerbert Xu 1838b68e9f85SHerbert Xu if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1839b68e9f85SHerbert Xu break; 1840b68e9f85SHerbert Xu 1841c1b4a7e6SDavid S. Miller if (tso_segs == 1) { 1842aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1843aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1844aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 1845aa93466bSDavid S. Miller break; 1846c1b4a7e6SDavid S. Miller } else { 1847d5dd9175SIlpo Järvinen if (!push_one && tcp_tso_should_defer(sk, skb)) 1848aa93466bSDavid S. Miller break; 1849c1b4a7e6SDavid S. Miller } 1850aa93466bSDavid S. Miller 185146d3ceabSEric Dumazet /* TSQ : sk_wmem_alloc accounts skb truesize, 185246d3ceabSEric Dumazet * including skb overhead. But thats OK. 185346d3ceabSEric Dumazet */ 185446d3ceabSEric Dumazet if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { 185546d3ceabSEric Dumazet set_bit(TSQ_THROTTLED, &tp->tsq_flags); 185646d3ceabSEric Dumazet break; 185746d3ceabSEric Dumazet } 1858c8ac3774SHerbert Xu limit = mss_now; 1859f8269a49SIlpo Järvinen if (tso_segs > 1 && !tcp_urg_mode(tp)) 18600e3a4803SIlpo Järvinen limit = tcp_mss_split_point(sk, skb, mss_now, 18611485348dSBen Hutchings min_t(unsigned int, 18621485348dSBen Hutchings cwnd_quota, 18631485348dSBen Hutchings sk->sk_gso_max_segs)); 1864c8ac3774SHerbert Xu 1865c8ac3774SHerbert Xu if (skb->len > limit && 1866c4ead4c5SEric Dumazet unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 18671da177e4SLinus Torvalds break; 18681da177e4SLinus Torvalds 18691da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 1870c1b4a7e6SDavid S. Miller 1871d5dd9175SIlpo Järvinen if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 18721da177e4SLinus Torvalds break; 18731da177e4SLinus Torvalds 1874ec342325SAndrew Vagin repair: 18751da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 18761da177e4SLinus Torvalds * This call will increment packets_out. 18771da177e4SLinus Torvalds */ 187866f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 18791da177e4SLinus Torvalds 18801da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 1881a262f0cdSNandita Dukkipati sent_pkts += tcp_skb_pcount(skb); 1882d5dd9175SIlpo Järvinen 1883d5dd9175SIlpo Järvinen if (push_one) 1884d5dd9175SIlpo Järvinen break; 18851da177e4SLinus Torvalds } 18861da177e4SLinus Torvalds 1887aa93466bSDavid S. Miller if (likely(sent_pkts)) { 1888684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 1889684bad11SYuchung Cheng tp->prr_out += sent_pkts; 18906ba8a3b1SNandita Dukkipati 18916ba8a3b1SNandita Dukkipati /* Send one loss probe per tail loss episode. */ 18926ba8a3b1SNandita Dukkipati if (push_one != 2) 18936ba8a3b1SNandita Dukkipati tcp_schedule_loss_probe(sk); 18949e412ba7SIlpo Järvinen tcp_cwnd_validate(sk); 1895a2a385d6SEric Dumazet return false; 18961da177e4SLinus Torvalds } 18976ba8a3b1SNandita Dukkipati return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); 18986ba8a3b1SNandita Dukkipati } 18996ba8a3b1SNandita Dukkipati 19006ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk) 19016ba8a3b1SNandita Dukkipati { 19026ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 19036ba8a3b1SNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 19046ba8a3b1SNandita Dukkipati u32 timeout, tlp_time_stamp, rto_time_stamp; 19056ba8a3b1SNandita Dukkipati u32 rtt = tp->srtt >> 3; 19066ba8a3b1SNandita Dukkipati 19076ba8a3b1SNandita Dukkipati if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) 19086ba8a3b1SNandita Dukkipati return false; 19096ba8a3b1SNandita Dukkipati /* No consecutive loss probes. */ 19106ba8a3b1SNandita Dukkipati if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 19116ba8a3b1SNandita Dukkipati tcp_rearm_rto(sk); 19126ba8a3b1SNandita Dukkipati return false; 19136ba8a3b1SNandita Dukkipati } 19146ba8a3b1SNandita Dukkipati /* Don't do any loss probe on a Fast Open connection before 3WHS 19156ba8a3b1SNandita Dukkipati * finishes. 19166ba8a3b1SNandita Dukkipati */ 19176ba8a3b1SNandita Dukkipati if (sk->sk_state == TCP_SYN_RECV) 19186ba8a3b1SNandita Dukkipati return false; 19196ba8a3b1SNandita Dukkipati 19206ba8a3b1SNandita Dukkipati /* TLP is only scheduled when next timer event is RTO. */ 19216ba8a3b1SNandita Dukkipati if (icsk->icsk_pending != ICSK_TIME_RETRANS) 19226ba8a3b1SNandita Dukkipati return false; 19236ba8a3b1SNandita Dukkipati 19246ba8a3b1SNandita Dukkipati /* Schedule a loss probe in 2*RTT for SACK capable connections 19256ba8a3b1SNandita Dukkipati * in Open state, that are either limited by cwnd or application. 19266ba8a3b1SNandita Dukkipati */ 19276ba8a3b1SNandita Dukkipati if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || 19286ba8a3b1SNandita Dukkipati !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 19296ba8a3b1SNandita Dukkipati return false; 19306ba8a3b1SNandita Dukkipati 19316ba8a3b1SNandita Dukkipati if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && 19326ba8a3b1SNandita Dukkipati tcp_send_head(sk)) 19336ba8a3b1SNandita Dukkipati return false; 19346ba8a3b1SNandita Dukkipati 19356ba8a3b1SNandita Dukkipati /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account 19366ba8a3b1SNandita Dukkipati * for delayed ack when there's one outstanding packet. 19376ba8a3b1SNandita Dukkipati */ 19386ba8a3b1SNandita Dukkipati timeout = rtt << 1; 19396ba8a3b1SNandita Dukkipati if (tp->packets_out == 1) 19406ba8a3b1SNandita Dukkipati timeout = max_t(u32, timeout, 19416ba8a3b1SNandita Dukkipati (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 19426ba8a3b1SNandita Dukkipati timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 19436ba8a3b1SNandita Dukkipati 19446ba8a3b1SNandita Dukkipati /* If RTO is shorter, just schedule TLP in its place. */ 19456ba8a3b1SNandita Dukkipati tlp_time_stamp = tcp_time_stamp + timeout; 19466ba8a3b1SNandita Dukkipati rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 19476ba8a3b1SNandita Dukkipati if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 19486ba8a3b1SNandita Dukkipati s32 delta = rto_time_stamp - tcp_time_stamp; 19496ba8a3b1SNandita Dukkipati if (delta > 0) 19506ba8a3b1SNandita Dukkipati timeout = delta; 19516ba8a3b1SNandita Dukkipati } 19526ba8a3b1SNandita Dukkipati 19536ba8a3b1SNandita Dukkipati inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 19546ba8a3b1SNandita Dukkipati TCP_RTO_MAX); 19556ba8a3b1SNandita Dukkipati return true; 19566ba8a3b1SNandita Dukkipati } 19576ba8a3b1SNandita Dukkipati 19586ba8a3b1SNandita Dukkipati /* When probe timeout (PTO) fires, send a new segment if one exists, else 19596ba8a3b1SNandita Dukkipati * retransmit the last segment. 19606ba8a3b1SNandita Dukkipati */ 19616ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk) 19626ba8a3b1SNandita Dukkipati { 19639b717a8dSNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 19646ba8a3b1SNandita Dukkipati struct sk_buff *skb; 19656ba8a3b1SNandita Dukkipati int pcount; 19666ba8a3b1SNandita Dukkipati int mss = tcp_current_mss(sk); 19676ba8a3b1SNandita Dukkipati int err = -1; 19686ba8a3b1SNandita Dukkipati 19696ba8a3b1SNandita Dukkipati if (tcp_send_head(sk) != NULL) { 19706ba8a3b1SNandita Dukkipati err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 19716ba8a3b1SNandita Dukkipati goto rearm_timer; 19726ba8a3b1SNandita Dukkipati } 19736ba8a3b1SNandita Dukkipati 19749b717a8dSNandita Dukkipati /* At most one outstanding TLP retransmission. */ 19759b717a8dSNandita Dukkipati if (tp->tlp_high_seq) 19769b717a8dSNandita Dukkipati goto rearm_timer; 19779b717a8dSNandita Dukkipati 19786ba8a3b1SNandita Dukkipati /* Retransmit last segment. */ 19796ba8a3b1SNandita Dukkipati skb = tcp_write_queue_tail(sk); 19806ba8a3b1SNandita Dukkipati if (WARN_ON(!skb)) 19816ba8a3b1SNandita Dukkipati goto rearm_timer; 19826ba8a3b1SNandita Dukkipati 19836ba8a3b1SNandita Dukkipati pcount = tcp_skb_pcount(skb); 19846ba8a3b1SNandita Dukkipati if (WARN_ON(!pcount)) 19856ba8a3b1SNandita Dukkipati goto rearm_timer; 19866ba8a3b1SNandita Dukkipati 19876ba8a3b1SNandita Dukkipati if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 19886ba8a3b1SNandita Dukkipati if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 19896ba8a3b1SNandita Dukkipati goto rearm_timer; 19906ba8a3b1SNandita Dukkipati skb = tcp_write_queue_tail(sk); 19916ba8a3b1SNandita Dukkipati } 19926ba8a3b1SNandita Dukkipati 19936ba8a3b1SNandita Dukkipati if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 19946ba8a3b1SNandita Dukkipati goto rearm_timer; 19956ba8a3b1SNandita Dukkipati 19966ba8a3b1SNandita Dukkipati /* Probe with zero data doesn't trigger fast recovery. */ 19976ba8a3b1SNandita Dukkipati if (skb->len > 0) 19986ba8a3b1SNandita Dukkipati err = __tcp_retransmit_skb(sk, skb); 19996ba8a3b1SNandita Dukkipati 20009b717a8dSNandita Dukkipati /* Record snd_nxt for loss detection. */ 20019b717a8dSNandita Dukkipati if (likely(!err)) 20029b717a8dSNandita Dukkipati tp->tlp_high_seq = tp->snd_nxt; 20039b717a8dSNandita Dukkipati 20046ba8a3b1SNandita Dukkipati rearm_timer: 20056ba8a3b1SNandita Dukkipati inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 20066ba8a3b1SNandita Dukkipati inet_csk(sk)->icsk_rto, 20076ba8a3b1SNandita Dukkipati TCP_RTO_MAX); 20086ba8a3b1SNandita Dukkipati 20096ba8a3b1SNandita Dukkipati if (likely(!err)) 20106ba8a3b1SNandita Dukkipati NET_INC_STATS_BH(sock_net(sk), 20116ba8a3b1SNandita Dukkipati LINUX_MIB_TCPLOSSPROBES); 20126ba8a3b1SNandita Dukkipati return; 20131da177e4SLinus Torvalds } 20141da177e4SLinus Torvalds 2015a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 2016a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 2017a762a980SDavid S. Miller * The socket must be locked by the caller. 2018a762a980SDavid S. Miller */ 20199e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 20209e412ba7SIlpo Järvinen int nonagle) 2021a762a980SDavid S. Miller { 2022726e07a8SIlpo Järvinen /* If we are closed, the bytes will have to remain here. 2023726e07a8SIlpo Järvinen * In time closedown will finish, we empty the write queue and 2024726e07a8SIlpo Järvinen * all will be happy. 2025726e07a8SIlpo Järvinen */ 2026726e07a8SIlpo Järvinen if (unlikely(sk->sk_state == TCP_CLOSE)) 2027726e07a8SIlpo Järvinen return; 2028726e07a8SIlpo Järvinen 202999a1dec7SMel Gorman if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 203099a1dec7SMel Gorman sk_gfp_atomic(sk, GFP_ATOMIC))) 20319e412ba7SIlpo Järvinen tcp_check_probe_timer(sk); 2032a762a980SDavid S. Miller } 2033a762a980SDavid S. Miller 2034c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 2035c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 2036c1b4a7e6SDavid S. Miller */ 2037c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 2038c1b4a7e6SDavid S. Miller { 2039fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 2040c1b4a7e6SDavid S. Miller 2041c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 2042c1b4a7e6SDavid S. Miller 2043d5dd9175SIlpo Järvinen tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2044c1b4a7e6SDavid S. Miller } 2045c1b4a7e6SDavid S. Miller 20461da177e4SLinus Torvalds /* This function returns the amount that we can raise the 20471da177e4SLinus Torvalds * usable window based on the following constraints 20481da177e4SLinus Torvalds * 20491da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 20501da177e4SLinus Torvalds * 2. We limit memory per socket 20511da177e4SLinus Torvalds * 20521da177e4SLinus Torvalds * RFC 1122: 20531da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 20541da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 20551da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 20561da177e4SLinus Torvalds * 20571da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 20581da177e4SLinus Torvalds * it at least MSS bytes. 20591da177e4SLinus Torvalds * 20601da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 20611da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 20621da177e4SLinus Torvalds * 20631da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 20641da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 20651da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 20661da177e4SLinus Torvalds * window to always advance by a single byte. 20671da177e4SLinus Torvalds * 20681da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 20691da177e4SLinus Torvalds * then this will not be a problem. 20701da177e4SLinus Torvalds * 20711da177e4SLinus Torvalds * BSD seems to make the following compromise: 20721da177e4SLinus Torvalds * 20731da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 20741da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 20751da177e4SLinus Torvalds * then set the window to 0. 20761da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 20771da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 20781da177e4SLinus Torvalds * and from being larger than the largest representable value. 20791da177e4SLinus Torvalds * 20801da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 20811da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 20821da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 20831da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 20841da177e4SLinus Torvalds * because the pipeline is full. 20851da177e4SLinus Torvalds * 20861da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 20871da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 20881da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 20891da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 20901da177e4SLinus Torvalds * of having a fixed window size at almost all times. 20911da177e4SLinus Torvalds * 20921da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 20931da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 20941da177e4SLinus Torvalds * 20951da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 20961da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 20971da177e4SLinus Torvalds */ 20981da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 20991da177e4SLinus Torvalds { 2100463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 21011da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2102caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 21031da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 21041da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 21051da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 21061da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 21071da177e4SLinus Torvalds */ 2108463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 21091da177e4SLinus Torvalds int free_space = tcp_space(sk); 21101da177e4SLinus Torvalds int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 21111da177e4SLinus Torvalds int window; 21121da177e4SLinus Torvalds 21131da177e4SLinus Torvalds if (mss > full_space) 21141da177e4SLinus Torvalds mss = full_space; 21151da177e4SLinus Torvalds 2116b92edbe0SEric Dumazet if (free_space < (full_space >> 1)) { 2117463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 21181da177e4SLinus Torvalds 2119180d8cd9SGlauber Costa if (sk_under_memory_pressure(sk)) 2120056834d9SIlpo Järvinen tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2121056834d9SIlpo Järvinen 4U * tp->advmss); 21221da177e4SLinus Torvalds 21231da177e4SLinus Torvalds if (free_space < mss) 21241da177e4SLinus Torvalds return 0; 21251da177e4SLinus Torvalds } 21261da177e4SLinus Torvalds 21271da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 21281da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 21291da177e4SLinus Torvalds 21301da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 21311da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 21321da177e4SLinus Torvalds */ 21331da177e4SLinus Torvalds window = tp->rcv_wnd; 21341da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 21351da177e4SLinus Torvalds window = free_space; 21361da177e4SLinus Torvalds 21371da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 21381da177e4SLinus Torvalds * Import case: prevent zero window announcement if 21391da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 21401da177e4SLinus Torvalds */ 21411da177e4SLinus Torvalds if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 21421da177e4SLinus Torvalds window = (((window >> tp->rx_opt.rcv_wscale) + 1) 21431da177e4SLinus Torvalds << tp->rx_opt.rcv_wscale); 21441da177e4SLinus Torvalds } else { 21451da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 21461da177e4SLinus Torvalds * Window clamp already applied above. 21471da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 21481da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 21491da177e4SLinus Torvalds * and multiply from happening most of the time. 21501da177e4SLinus Torvalds * We also don't do any window rounding when the free space 21511da177e4SLinus Torvalds * is too small. 21521da177e4SLinus Torvalds */ 21531da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 21541da177e4SLinus Torvalds window = (free_space / mss) * mss; 215584565070SJohn Heffner else if (mss == full_space && 2156b92edbe0SEric Dumazet free_space > window + (full_space >> 1)) 215784565070SJohn Heffner window = free_space; 21581da177e4SLinus Torvalds } 21591da177e4SLinus Torvalds 21601da177e4SLinus Torvalds return window; 21611da177e4SLinus Torvalds } 21621da177e4SLinus Torvalds 21634a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */ 21644a17fc3aSIlpo Järvinen static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 21651da177e4SLinus Torvalds { 21661da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2167fe067e8aSDavid S. Miller struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 2168058dc334SIlpo Järvinen int skb_size, next_skb_size; 21691da177e4SLinus Torvalds 2170058dc334SIlpo Järvinen skb_size = skb->len; 2171058dc334SIlpo Järvinen next_skb_size = next_skb->len; 21721da177e4SLinus Torvalds 2173058dc334SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 21741da177e4SLinus Torvalds 21756859d494SIlpo Järvinen tcp_highest_sack_combine(sk, next_skb, skb); 2176a6963a6bSIlpo Järvinen 2177fe067e8aSDavid S. Miller tcp_unlink_write_queue(next_skb, sk); 21781da177e4SLinus Torvalds 2179058dc334SIlpo Järvinen skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 21801a4e2d09SArnaldo Carvalho de Melo next_skb_size); 21811da177e4SLinus Torvalds 218252d570aaSJarek Poplawski if (next_skb->ip_summed == CHECKSUM_PARTIAL) 218352d570aaSJarek Poplawski skb->ip_summed = CHECKSUM_PARTIAL; 21841da177e4SLinus Torvalds 218584fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 21861da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 21871da177e4SLinus Torvalds 21881da177e4SLinus Torvalds /* Update sequence range on original skb. */ 21891da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 21901da177e4SLinus Torvalds 2191e6c7d085SIlpo Järvinen /* Merge over control information. This moves PSH/FIN etc. over */ 21924de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 21931da177e4SLinus Torvalds 21941da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 21951da177e4SLinus Torvalds * packet counting does not break. 21961da177e4SLinus Torvalds */ 21974828e7f4SIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2198b7689205SIlpo Järvinen 2199b7689205SIlpo Järvinen /* changed transmit queue under us so clear hints */ 2200ef9da47cSIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 2201ef9da47cSIlpo Järvinen if (next_skb == tp->retransmit_skb_hint) 2202ef9da47cSIlpo Järvinen tp->retransmit_skb_hint = skb; 2203b7689205SIlpo Järvinen 2204797108d1SIlpo Järvinen tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2205797108d1SIlpo Järvinen 22063ab224beSHideo Aoki sk_wmem_free_skb(sk, next_skb); 22071da177e4SLinus Torvalds } 22081da177e4SLinus Torvalds 220967edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */ 2210a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 22114a17fc3aSIlpo Järvinen { 22124a17fc3aSIlpo Järvinen if (tcp_skb_pcount(skb) > 1) 2213a2a385d6SEric Dumazet return false; 22144a17fc3aSIlpo Järvinen /* TODO: SACK collapsing could be used to remove this condition */ 22154a17fc3aSIlpo Järvinen if (skb_shinfo(skb)->nr_frags != 0) 2216a2a385d6SEric Dumazet return false; 22174a17fc3aSIlpo Järvinen if (skb_cloned(skb)) 2218a2a385d6SEric Dumazet return false; 22194a17fc3aSIlpo Järvinen if (skb == tcp_send_head(sk)) 2220a2a385d6SEric Dumazet return false; 22214a17fc3aSIlpo Järvinen /* Some heurestics for collapsing over SACK'd could be invented */ 22224a17fc3aSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2223a2a385d6SEric Dumazet return false; 22244a17fc3aSIlpo Järvinen 2225a2a385d6SEric Dumazet return true; 22264a17fc3aSIlpo Järvinen } 22274a17fc3aSIlpo Järvinen 222867edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create 222967edfef7SAndi Kleen * less packets on the wire. This is only done on retransmission. 223067edfef7SAndi Kleen */ 22314a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 22324a17fc3aSIlpo Järvinen int space) 22334a17fc3aSIlpo Järvinen { 22344a17fc3aSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 22354a17fc3aSIlpo Järvinen struct sk_buff *skb = to, *tmp; 2236a2a385d6SEric Dumazet bool first = true; 22374a17fc3aSIlpo Järvinen 22384a17fc3aSIlpo Järvinen if (!sysctl_tcp_retrans_collapse) 22394a17fc3aSIlpo Järvinen return; 22404de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 22414a17fc3aSIlpo Järvinen return; 22424a17fc3aSIlpo Järvinen 22434a17fc3aSIlpo Järvinen tcp_for_write_queue_from_safe(skb, tmp, sk) { 22444a17fc3aSIlpo Järvinen if (!tcp_can_collapse(sk, skb)) 22454a17fc3aSIlpo Järvinen break; 22464a17fc3aSIlpo Järvinen 22474a17fc3aSIlpo Järvinen space -= skb->len; 22484a17fc3aSIlpo Järvinen 22494a17fc3aSIlpo Järvinen if (first) { 2250a2a385d6SEric Dumazet first = false; 22514a17fc3aSIlpo Järvinen continue; 22524a17fc3aSIlpo Järvinen } 22534a17fc3aSIlpo Järvinen 22544a17fc3aSIlpo Järvinen if (space < 0) 22554a17fc3aSIlpo Järvinen break; 22564a17fc3aSIlpo Järvinen /* Punt if not enough space exists in the first SKB for 22574a17fc3aSIlpo Järvinen * the data in the second 22584a17fc3aSIlpo Järvinen */ 2259a21d4572SEric Dumazet if (skb->len > skb_availroom(to)) 22604a17fc3aSIlpo Järvinen break; 22614a17fc3aSIlpo Järvinen 22624a17fc3aSIlpo Järvinen if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 22634a17fc3aSIlpo Järvinen break; 22644a17fc3aSIlpo Järvinen 22654a17fc3aSIlpo Järvinen tcp_collapse_retrans(sk, to); 22664a17fc3aSIlpo Järvinen } 22674a17fc3aSIlpo Järvinen } 22684a17fc3aSIlpo Järvinen 22691da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 22701da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 22711da177e4SLinus Torvalds * error occurred which prevented the send. 22721da177e4SLinus Torvalds */ 227393b174adSYuchung Cheng int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 22741da177e4SLinus Torvalds { 22751da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 22765d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 22777d227cd2SSridhar Samudrala unsigned int cur_mss; 22781da177e4SLinus Torvalds 22795d424d5aSJohn Heffner /* Inconslusive MTU probe */ 22805d424d5aSJohn Heffner if (icsk->icsk_mtup.probe_size) { 22815d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 22825d424d5aSJohn Heffner } 22835d424d5aSJohn Heffner 22841da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 2285caa20d9aSStephen Hemminger * copying overhead: fragmentation, tunneling, mangling etc. 22861da177e4SLinus Torvalds */ 22871da177e4SLinus Torvalds if (atomic_read(&sk->sk_wmem_alloc) > 22881da177e4SLinus Torvalds min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 22891da177e4SLinus Torvalds return -EAGAIN; 22901da177e4SLinus Torvalds 22911da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 22921da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 22931da177e4SLinus Torvalds BUG(); 22941da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 22951da177e4SLinus Torvalds return -ENOMEM; 22961da177e4SLinus Torvalds } 22971da177e4SLinus Torvalds 22987d227cd2SSridhar Samudrala if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 22997d227cd2SSridhar Samudrala return -EHOSTUNREACH; /* Routing failure or similar. */ 23007d227cd2SSridhar Samudrala 23010c54b85fSIlpo Järvinen cur_mss = tcp_current_mss(sk); 23027d227cd2SSridhar Samudrala 23031da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 23041da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 23051da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 23061da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 23071da177e4SLinus Torvalds */ 23089d4fb27dSJoe Perches if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 23099d4fb27dSJoe Perches TCP_SKB_CB(skb)->seq != tp->snd_una) 23101da177e4SLinus Torvalds return -EAGAIN; 23111da177e4SLinus Torvalds 23121da177e4SLinus Torvalds if (skb->len > cur_mss) { 2313846998aeSDavid S. Miller if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 23141da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 231502276f3cSIlpo Järvinen } else { 23169eb9362eSIlpo Järvinen int oldpcount = tcp_skb_pcount(skb); 23179eb9362eSIlpo Järvinen 23189eb9362eSIlpo Järvinen if (unlikely(oldpcount > 1)) { 231902276f3cSIlpo Järvinen tcp_init_tso_segs(sk, skb, cur_mss); 23209eb9362eSIlpo Järvinen tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 23219eb9362eSIlpo Järvinen } 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds 23241da177e4SLinus Torvalds tcp_retrans_try_collapse(sk, skb, cur_mss); 23251da177e4SLinus Torvalds 23261da177e4SLinus Torvalds /* Some Solaris stacks overoptimize and ignore the FIN on a 23271da177e4SLinus Torvalds * retransmit when old data is attached. So strip it off 23281da177e4SLinus Torvalds * since it is cheap to do so and saves bytes on the network. 23291da177e4SLinus Torvalds */ 23301da177e4SLinus Torvalds if (skb->len > 0 && 23314de075e0SEric Dumazet (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 23321da177e4SLinus Torvalds tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 23331da177e4SLinus Torvalds if (!pskb_trim(skb, 0)) { 2334e870a8efSIlpo Järvinen /* Reuse, even though it does some unnecessary work */ 2335e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 23364de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags); 23371da177e4SLinus Torvalds skb->ip_summed = CHECKSUM_NONE; 23381da177e4SLinus Torvalds } 23391da177e4SLinus Torvalds } 23401da177e4SLinus Torvalds 23411da177e4SLinus Torvalds /* Make a copy, if the first transmission SKB clone we made 23421da177e4SLinus Torvalds * is still in somebody's hands, else make a clone. 23431da177e4SLinus Torvalds */ 23441da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 23451da177e4SLinus Torvalds 2346117632e6SEric Dumazet /* make sure skb->data is aligned on arches that require it */ 2347117632e6SEric Dumazet if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { 2348117632e6SEric Dumazet struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2349117632e6SEric Dumazet GFP_ATOMIC); 235093b174adSYuchung Cheng return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2351117632e6SEric Dumazet -ENOBUFS; 2352117632e6SEric Dumazet } else { 235393b174adSYuchung Cheng return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2354117632e6SEric Dumazet } 235593b174adSYuchung Cheng } 235693b174adSYuchung Cheng 235793b174adSYuchung Cheng int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 235893b174adSYuchung Cheng { 235993b174adSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 236093b174adSYuchung Cheng int err = __tcp_retransmit_skb(sk, skb); 23611da177e4SLinus Torvalds 23621da177e4SLinus Torvalds if (err == 0) { 23631da177e4SLinus Torvalds /* Update global TCP statistics. */ 236481cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 23651da177e4SLinus Torvalds 23661da177e4SLinus Torvalds tp->total_retrans++; 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 23691da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2370e87cc472SJoe Perches net_dbg_ratelimited("retrans_out leaked\n"); 23711da177e4SLinus Torvalds } 23721da177e4SLinus Torvalds #endif 2373b08d6cb2SIlpo Järvinen if (!tp->retrans_out) 2374b08d6cb2SIlpo Järvinen tp->lost_retrans_low = tp->snd_nxt; 23751da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 23761da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 23771da177e4SLinus Torvalds 23781da177e4SLinus Torvalds /* Save stamp of the first retransmit. */ 23791da177e4SLinus Torvalds if (!tp->retrans_stamp) 23801da177e4SLinus Torvalds tp->retrans_stamp = TCP_SKB_CB(skb)->when; 23811da177e4SLinus Torvalds 2382c24f691bSYuchung Cheng tp->undo_retrans += tcp_skb_pcount(skb); 23831da177e4SLinus Torvalds 23841da177e4SLinus Torvalds /* snd_nxt is stored to detect loss of retransmitted segment, 23851da177e4SLinus Torvalds * see tcp_input.c tcp_sacktag_write_queue(). 23861da177e4SLinus Torvalds */ 23871da177e4SLinus Torvalds TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 23881da177e4SLinus Torvalds } 23891da177e4SLinus Torvalds return err; 23901da177e4SLinus Torvalds } 23911da177e4SLinus Torvalds 239267edfef7SAndi Kleen /* Check if we forward retransmits are possible in the current 239367edfef7SAndi Kleen * window/congestion state. 239467edfef7SAndi Kleen */ 2395a2a385d6SEric Dumazet static bool tcp_can_forward_retransmit(struct sock *sk) 2396b5afe7bcSIlpo Järvinen { 2397b5afe7bcSIlpo Järvinen const struct inet_connection_sock *icsk = inet_csk(sk); 2398cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 2399b5afe7bcSIlpo Järvinen 2400b5afe7bcSIlpo Järvinen /* Forward retransmissions are possible only during Recovery. */ 2401b5afe7bcSIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Recovery) 2402a2a385d6SEric Dumazet return false; 2403b5afe7bcSIlpo Järvinen 2404b5afe7bcSIlpo Järvinen /* No forward retransmissions in Reno are possible. */ 2405b5afe7bcSIlpo Järvinen if (tcp_is_reno(tp)) 2406a2a385d6SEric Dumazet return false; 2407b5afe7bcSIlpo Järvinen 2408b5afe7bcSIlpo Järvinen /* Yeah, we have to make difficult choice between forward transmission 2409b5afe7bcSIlpo Järvinen * and retransmission... Both ways have their merits... 2410b5afe7bcSIlpo Järvinen * 2411b5afe7bcSIlpo Järvinen * For now we do not retransmit anything, while we have some new 2412b5afe7bcSIlpo Järvinen * segments to send. In the other cases, follow rule 3 for 2413b5afe7bcSIlpo Järvinen * NextSeg() specified in RFC3517. 2414b5afe7bcSIlpo Järvinen */ 2415b5afe7bcSIlpo Järvinen 2416b5afe7bcSIlpo Järvinen if (tcp_may_send_now(sk)) 2417a2a385d6SEric Dumazet return false; 2418b5afe7bcSIlpo Järvinen 2419a2a385d6SEric Dumazet return true; 2420b5afe7bcSIlpo Järvinen } 2421b5afe7bcSIlpo Järvinen 24221da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 24231da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 24241da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 24251da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 24261da177e4SLinus Torvalds * If doing SACK, the first ACK which comes back for a timeout 24271da177e4SLinus Torvalds * based retransmit packet might feed us FACK information again. 24281da177e4SLinus Torvalds * If so, we use it to avoid unnecessarily retransmissions. 24291da177e4SLinus Torvalds */ 24301da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 24311da177e4SLinus Torvalds { 24326687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 24331da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 24341da177e4SLinus Torvalds struct sk_buff *skb; 24350e1c54c2SIlpo Järvinen struct sk_buff *hole = NULL; 2436618d9f25SIlpo Järvinen u32 last_lost; 243761eb55f4SIlpo Järvinen int mib_idx; 24380e1c54c2SIlpo Järvinen int fwd_rexmitting = 0; 24396a438bbeSStephen Hemminger 244045e77d31SIlpo Järvinen if (!tp->packets_out) 244145e77d31SIlpo Järvinen return; 244245e77d31SIlpo Järvinen 244308ebd172SIlpo Järvinen if (!tp->lost_out) 244408ebd172SIlpo Järvinen tp->retransmit_high = tp->snd_una; 244508ebd172SIlpo Järvinen 2446618d9f25SIlpo Järvinen if (tp->retransmit_skb_hint) { 24476a438bbeSStephen Hemminger skb = tp->retransmit_skb_hint; 2448618d9f25SIlpo Järvinen last_lost = TCP_SKB_CB(skb)->end_seq; 2449618d9f25SIlpo Järvinen if (after(last_lost, tp->retransmit_high)) 2450618d9f25SIlpo Järvinen last_lost = tp->retransmit_high; 2451618d9f25SIlpo Järvinen } else { 2452fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 2453618d9f25SIlpo Järvinen last_lost = tp->snd_una; 2454618d9f25SIlpo Järvinen } 24551da177e4SLinus Torvalds 2456fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 24571da177e4SLinus Torvalds __u8 sacked = TCP_SKB_CB(skb)->sacked; 24581da177e4SLinus Torvalds 2459fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2460fe067e8aSDavid S. Miller break; 24616a438bbeSStephen Hemminger /* we could do better than to assign each time */ 24620e1c54c2SIlpo Järvinen if (hole == NULL) 24636a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 24646a438bbeSStephen Hemminger 24651da177e4SLinus Torvalds /* Assume this retransmit will generate 24661da177e4SLinus Torvalds * only one packet for congestion window 24671da177e4SLinus Torvalds * calculation purposes. This works because 24681da177e4SLinus Torvalds * tcp_retransmit_skb() will chop up the 24691da177e4SLinus Torvalds * packet to be MSS sized and all the 24701da177e4SLinus Torvalds * packet counting works out. 24711da177e4SLinus Torvalds */ 24721da177e4SLinus Torvalds if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 24731da177e4SLinus Torvalds return; 24740e1c54c2SIlpo Järvinen 24750e1c54c2SIlpo Järvinen if (fwd_rexmitting) { 24760e1c54c2SIlpo Järvinen begin_fwd: 24770e1c54c2SIlpo Järvinen if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2478006f582cSIlpo Järvinen break; 24790e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 24800e1c54c2SIlpo Järvinen 24810e1c54c2SIlpo Järvinen } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2482618d9f25SIlpo Järvinen tp->retransmit_high = last_lost; 24830e1c54c2SIlpo Järvinen if (!tcp_can_forward_retransmit(sk)) 24840e1c54c2SIlpo Järvinen break; 24850e1c54c2SIlpo Järvinen /* Backtrack if necessary to non-L'ed skb */ 24860e1c54c2SIlpo Järvinen if (hole != NULL) { 24870e1c54c2SIlpo Järvinen skb = hole; 24880e1c54c2SIlpo Järvinen hole = NULL; 24890e1c54c2SIlpo Järvinen } 24900e1c54c2SIlpo Järvinen fwd_rexmitting = 1; 24910e1c54c2SIlpo Järvinen goto begin_fwd; 24920e1c54c2SIlpo Järvinen 24930e1c54c2SIlpo Järvinen } else if (!(sacked & TCPCB_LOST)) { 2494ac11ba75SIlpo Järvinen if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 24950e1c54c2SIlpo Järvinen hole = skb; 249661eb55f4SIlpo Järvinen continue; 24971da177e4SLinus Torvalds 24980e1c54c2SIlpo Järvinen } else { 2499618d9f25SIlpo Järvinen last_lost = TCP_SKB_CB(skb)->end_seq; 25000e1c54c2SIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Loss) 25010e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFASTRETRANS; 25020e1c54c2SIlpo Järvinen else 25030e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 25040e1c54c2SIlpo Järvinen } 25050e1c54c2SIlpo Järvinen 25060e1c54c2SIlpo Järvinen if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 250761eb55f4SIlpo Järvinen continue; 250840b215e5SPavel Emelyanov 250909e9b813SEric Dumazet if (tcp_retransmit_skb(sk, skb)) { 251009e9b813SEric Dumazet NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 25111da177e4SLinus Torvalds return; 251209e9b813SEric Dumazet } 2513de0744afSPavel Emelyanov NET_INC_STATS_BH(sock_net(sk), mib_idx); 25141da177e4SLinus Torvalds 2515684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 2516a262f0cdSNandita Dukkipati tp->prr_out += tcp_skb_pcount(skb); 2517a262f0cdSNandita Dukkipati 2518fe067e8aSDavid S. Miller if (skb == tcp_write_queue_head(sk)) 2519463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 25203f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 25213f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 25221da177e4SLinus Torvalds } 25231da177e4SLinus Torvalds } 25241da177e4SLinus Torvalds 25251da177e4SLinus Torvalds /* Send a fin. The caller locks the socket for us. This cannot be 25261da177e4SLinus Torvalds * allowed to fail queueing a FIN frame under any circumstances. 25271da177e4SLinus Torvalds */ 25281da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 25291da177e4SLinus Torvalds { 25301da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2531fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_write_queue_tail(sk); 25321da177e4SLinus Torvalds int mss_now; 25331da177e4SLinus Torvalds 25341da177e4SLinus Torvalds /* Optimization, tack on the FIN if we have a queue of 25351da177e4SLinus Torvalds * unsent frames. But be careful about outgoing SACKS 25361da177e4SLinus Torvalds * and IP options. 25371da177e4SLinus Torvalds */ 25380c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 25391da177e4SLinus Torvalds 2540fe067e8aSDavid S. Miller if (tcp_send_head(sk) != NULL) { 25414de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 25421da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq++; 25431da177e4SLinus Torvalds tp->write_seq++; 25441da177e4SLinus Torvalds } else { 25451da177e4SLinus Torvalds /* Socket is locked, keep trying until memory is available. */ 25461da177e4SLinus Torvalds for (;;) { 2547aa133076SWu Fengguang skb = alloc_skb_fclone(MAX_TCP_HEADER, 2548aa133076SWu Fengguang sk->sk_allocation); 25491da177e4SLinus Torvalds if (skb) 25501da177e4SLinus Torvalds break; 25511da177e4SLinus Torvalds yield(); 25521da177e4SLinus Torvalds } 25531da177e4SLinus Torvalds 25541da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 25551da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 25561da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2557e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tp->write_seq, 2558a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_FIN); 25591da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 25601da177e4SLinus Torvalds } 25619e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 25621da177e4SLinus Torvalds } 25631da177e4SLinus Torvalds 25641da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 25651da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 25661da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 256765bb723cSGerrit Renker * by RFC 2525, section 2.17. -DaveM 25681da177e4SLinus Torvalds */ 2569dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 25701da177e4SLinus Torvalds { 25711da177e4SLinus Torvalds struct sk_buff *skb; 25721da177e4SLinus Torvalds 25731da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 25741da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 25751da177e4SLinus Torvalds if (!skb) { 25764e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 25771da177e4SLinus Torvalds return; 25781da177e4SLinus Torvalds } 25791da177e4SLinus Torvalds 25801da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 25811da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 2582e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2583a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_RST); 25841da177e4SLinus Torvalds /* Send it off. */ 25851da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2586dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 25874e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 258826af65cbSSridhar Samudrala 258981cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 25901da177e4SLinus Torvalds } 25911da177e4SLinus Torvalds 259267edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment. 259367edfef7SAndi Kleen * WARNING: This routine must only be called when we have already sent 25941da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 25951da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 25961da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 25971da177e4SLinus Torvalds */ 25981da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 25991da177e4SLinus Torvalds { 26001da177e4SLinus Torvalds struct sk_buff *skb; 26011da177e4SLinus Torvalds 2602fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 26034de075e0SEric Dumazet if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 260491df42beSJoe Perches pr_debug("%s: wrong queue state\n", __func__); 26051da177e4SLinus Torvalds return -EFAULT; 26061da177e4SLinus Torvalds } 26074de075e0SEric Dumazet if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 26081da177e4SLinus Torvalds if (skb_cloned(skb)) { 26091da177e4SLinus Torvalds struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 26101da177e4SLinus Torvalds if (nskb == NULL) 26111da177e4SLinus Torvalds return -ENOMEM; 2612fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 26131da177e4SLinus Torvalds skb_header_release(nskb); 2614fe067e8aSDavid S. Miller __tcp_add_write_queue_head(sk, nskb); 26153ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 26163ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 26173ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 26181da177e4SLinus Torvalds skb = nskb; 26191da177e4SLinus Torvalds } 26201da177e4SLinus Torvalds 26214de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 26221da177e4SLinus Torvalds TCP_ECN_send_synack(tcp_sk(sk), skb); 26231da177e4SLinus Torvalds } 26241da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 2625dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 26261da177e4SLinus Torvalds } 26271da177e4SLinus Torvalds 26284aea39c1SEric Dumazet /** 26294aea39c1SEric Dumazet * tcp_make_synack - Prepare a SYN-ACK. 26304aea39c1SEric Dumazet * sk: listener socket 26314aea39c1SEric Dumazet * dst: dst entry attached to the SYNACK 26324aea39c1SEric Dumazet * req: request_sock pointer 26334aea39c1SEric Dumazet * 26344aea39c1SEric Dumazet * Allocate one skb and build a SYNACK packet. 26354aea39c1SEric Dumazet * @dst is consumed : Caller should not use it again. 26364aea39c1SEric Dumazet */ 26371da177e4SLinus Torvalds struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2638e6b4d113SWilliam Allen Simpson struct request_sock *req, 26398336886fSJerry Chu struct tcp_fastopen_cookie *foc) 26401da177e4SLinus Torvalds { 2641bd0388aeSWilliam Allen Simpson struct tcp_out_options opts; 26422e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 26431da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 26441da177e4SLinus Torvalds struct tcphdr *th; 26451da177e4SLinus Torvalds struct sk_buff *skb; 2646cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 2647bd0388aeSWilliam Allen Simpson int tcp_header_size; 2648f5fff5dcSTom Quetchenbach int mss; 26491da177e4SLinus Torvalds 26501a2c6181SChristoph Paasch skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 26514aea39c1SEric Dumazet if (unlikely(!skb)) { 26524aea39c1SEric Dumazet dst_release(dst); 26531da177e4SLinus Torvalds return NULL; 26544aea39c1SEric Dumazet } 26551da177e4SLinus Torvalds /* Reserve space for headers. */ 26561da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 26571da177e4SLinus Torvalds 26584aea39c1SEric Dumazet skb_dst_set(skb, dst); 26591da177e4SLinus Torvalds 26600dbaee3bSDavid S. Miller mss = dst_metric_advmss(dst); 2661f5fff5dcSTom Quetchenbach if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2662f5fff5dcSTom Quetchenbach mss = tp->rx_opt.user_mss; 2663f5fff5dcSTom Quetchenbach 266433ad798cSAdam Langley if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 266533ad798cSAdam Langley __u8 rcv_wscale; 266633ad798cSAdam Langley /* Set this up on the first call only */ 266733ad798cSAdam Langley req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2668e88c64f0SHagen Paul Pfeifer 2669e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 2670e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2671e88c64f0SHagen Paul Pfeifer (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 2672e88c64f0SHagen Paul Pfeifer req->window_clamp = tcp_full_space(sk); 2673e88c64f0SHagen Paul Pfeifer 267433ad798cSAdam Langley /* tcp_full_space because it is guaranteed to be the first packet */ 267533ad798cSAdam Langley tcp_select_initial_window(tcp_full_space(sk), 2676f5fff5dcSTom Quetchenbach mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 267733ad798cSAdam Langley &req->rcv_wnd, 267833ad798cSAdam Langley &req->window_clamp, 267933ad798cSAdam Langley ireq->wscale_ok, 268031d12926Slaurent chavey &rcv_wscale, 268131d12926Slaurent chavey dst_metric(dst, RTAX_INITRWND)); 268233ad798cSAdam Langley ireq->rcv_wscale = rcv_wscale; 268333ad798cSAdam Langley } 2684cfb6eeb4SYOSHIFUJI Hideaki 268533ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 26868b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES 26878b5f12d0SFlorian Westphal if (unlikely(req->cookie_ts)) 26888b5f12d0SFlorian Westphal TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 26898b5f12d0SFlorian Westphal else 26908b5f12d0SFlorian Westphal #endif 269133ad798cSAdam Langley TCP_SKB_CB(skb)->when = tcp_time_stamp; 26921a2c6181SChristoph Paasch tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 26931a2c6181SChristoph Paasch foc) + sizeof(*th); 269433ad798cSAdam Langley 2695aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 2696aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 26971da177e4SLinus Torvalds 2698aa8223c7SArnaldo Carvalho de Melo th = tcp_hdr(skb); 26991da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 27001da177e4SLinus Torvalds th->syn = 1; 27011da177e4SLinus Torvalds th->ack = 1; 27021da177e4SLinus Torvalds TCP_ECN_make_synack(req, th); 2703a3116ac5SKOVACS Krisztian th->source = ireq->loc_port; 27042e6599cbSArnaldo Carvalho de Melo th->dest = ireq->rmt_port; 2705e870a8efSIlpo Järvinen /* Setting of flags are superfluous here for callers (and ECE is 2706e870a8efSIlpo Järvinen * not even correctly set) 2707e870a8efSIlpo Järvinen */ 2708e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2709a3433f35SChangli Gao TCPHDR_SYN | TCPHDR_ACK); 27104957faadSWilliam Allen Simpson 27111da177e4SLinus Torvalds th->seq = htonl(TCP_SKB_CB(skb)->seq); 27128336886fSJerry Chu /* XXX data is queued and acked as is. No buffer/window check */ 27138336886fSJerry Chu th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 27141da177e4SLinus Torvalds 27151da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2716600ff0c2SIlpo Järvinen th->window = htons(min(req->rcv_wnd, 65535U)); 2717bd0388aeSWilliam Allen Simpson tcp_options_write((__be32 *)(th + 1), tp, &opts); 27181da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 2719aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2720cfb6eeb4SYOSHIFUJI Hideaki 2721cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2722cfb6eeb4SYOSHIFUJI Hideaki /* Okay, we have all we need - do the md5 hash if needed */ 2723cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 2724bd0388aeSWilliam Allen Simpson tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 272549a72dfbSAdam Langley md5, NULL, req, skb); 2726cfb6eeb4SYOSHIFUJI Hideaki } 2727cfb6eeb4SYOSHIFUJI Hideaki #endif 2728cfb6eeb4SYOSHIFUJI Hideaki 27291da177e4SLinus Torvalds return skb; 27301da177e4SLinus Torvalds } 27314bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack); 27321da177e4SLinus Torvalds 273367edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */ 2734370816aeSPavel Emelyanov void tcp_connect_init(struct sock *sk) 27351da177e4SLinus Torvalds { 2736cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 27371da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 27381da177e4SLinus Torvalds __u8 rcv_wscale; 27391da177e4SLinus Torvalds 27401da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 27411da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 27421da177e4SLinus Torvalds */ 27431da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr) + 2744bb5b7c11SDavid S. Miller (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 27451da177e4SLinus Torvalds 2746cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2747cfb6eeb4SYOSHIFUJI Hideaki if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2748cfb6eeb4SYOSHIFUJI Hideaki tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2749cfb6eeb4SYOSHIFUJI Hideaki #endif 2750cfb6eeb4SYOSHIFUJI Hideaki 27511da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 27521da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 27531da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 27541da177e4SLinus Torvalds tp->max_window = 0; 27555d424d5aSJohn Heffner tcp_mtup_init(sk); 27561da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 27571da177e4SLinus Torvalds 27581da177e4SLinus Torvalds if (!tp->window_clamp) 27591da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 27600dbaee3bSDavid S. Miller tp->advmss = dst_metric_advmss(dst); 2761f5fff5dcSTom Quetchenbach if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2762f5fff5dcSTom Quetchenbach tp->advmss = tp->rx_opt.user_mss; 2763f5fff5dcSTom Quetchenbach 27641da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 27651da177e4SLinus Torvalds 2766e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 2767e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2768e88c64f0SHagen Paul Pfeifer (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 2769e88c64f0SHagen Paul Pfeifer tp->window_clamp = tcp_full_space(sk); 2770e88c64f0SHagen Paul Pfeifer 27711da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 27721da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 27731da177e4SLinus Torvalds &tp->rcv_wnd, 27741da177e4SLinus Torvalds &tp->window_clamp, 2775bb5b7c11SDavid S. Miller sysctl_tcp_window_scaling, 277631d12926Slaurent chavey &rcv_wscale, 277731d12926Slaurent chavey dst_metric(dst, RTAX_INITRWND)); 27781da177e4SLinus Torvalds 27791da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 27801da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 27811da177e4SLinus Torvalds 27821da177e4SLinus Torvalds sk->sk_err = 0; 27831da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 27841da177e4SLinus Torvalds tp->snd_wnd = 0; 2785ee7537b6SHantzis Fotis tcp_init_wl(tp, 0); 27861da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 27871da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 278833f5f57eSIlpo Järvinen tp->snd_up = tp->write_seq; 2789370816aeSPavel Emelyanov tp->snd_nxt = tp->write_seq; 2790ee995283SPavel Emelyanov 2791ee995283SPavel Emelyanov if (likely(!tp->repair)) 27921da177e4SLinus Torvalds tp->rcv_nxt = 0; 2793ee995283SPavel Emelyanov tp->rcv_wup = tp->rcv_nxt; 2794ee995283SPavel Emelyanov tp->copied_seq = tp->rcv_nxt; 27951da177e4SLinus Torvalds 2796463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2797463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 27981da177e4SLinus Torvalds tcp_clear_retrans(tp); 27991da177e4SLinus Torvalds } 28001da177e4SLinus Torvalds 2801783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 2802783237e8SYuchung Cheng { 2803783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 2804783237e8SYuchung Cheng struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 2805783237e8SYuchung Cheng 2806783237e8SYuchung Cheng tcb->end_seq += skb->len; 2807783237e8SYuchung Cheng skb_header_release(skb); 2808783237e8SYuchung Cheng __tcp_add_write_queue_tail(sk, skb); 2809783237e8SYuchung Cheng sk->sk_wmem_queued += skb->truesize; 2810783237e8SYuchung Cheng sk_mem_charge(sk, skb->truesize); 2811783237e8SYuchung Cheng tp->write_seq = tcb->end_seq; 2812783237e8SYuchung Cheng tp->packets_out += tcp_skb_pcount(skb); 2813783237e8SYuchung Cheng } 2814783237e8SYuchung Cheng 2815783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However, 2816783237e8SYuchung Cheng * queue a data-only packet after the regular SYN, such that regular SYNs 2817783237e8SYuchung Cheng * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 2818783237e8SYuchung Cheng * only the SYN sequence, the data are retransmitted in the first ACK. 2819783237e8SYuchung Cheng * If cookie is not cached or other error occurs, falls back to send a 2820783237e8SYuchung Cheng * regular SYN with Fast Open cookie request option. 2821783237e8SYuchung Cheng */ 2822783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 2823783237e8SYuchung Cheng { 2824783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 2825783237e8SYuchung Cheng struct tcp_fastopen_request *fo = tp->fastopen_req; 2826aab48743SYuchung Cheng int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; 2827783237e8SYuchung Cheng struct sk_buff *syn_data = NULL, *data; 2828aab48743SYuchung Cheng unsigned long last_syn_loss = 0; 2829783237e8SYuchung Cheng 283067da22d2SYuchung Cheng tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 2831aab48743SYuchung Cheng tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, 2832aab48743SYuchung Cheng &syn_loss, &last_syn_loss); 2833aab48743SYuchung Cheng /* Recurring FO SYN losses: revert to regular handshake temporarily */ 2834aab48743SYuchung Cheng if (syn_loss > 1 && 2835aab48743SYuchung Cheng time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { 2836aab48743SYuchung Cheng fo->cookie.len = -1; 2837aab48743SYuchung Cheng goto fallback; 2838aab48743SYuchung Cheng } 2839aab48743SYuchung Cheng 284067da22d2SYuchung Cheng if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) 284167da22d2SYuchung Cheng fo->cookie.len = -1; 284267da22d2SYuchung Cheng else if (fo->cookie.len <= 0) 2843783237e8SYuchung Cheng goto fallback; 2844783237e8SYuchung Cheng 2845783237e8SYuchung Cheng /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 2846783237e8SYuchung Cheng * user-MSS. Reserve maximum option space for middleboxes that add 2847783237e8SYuchung Cheng * private TCP options. The cost is reduced data space in SYN :( 2848783237e8SYuchung Cheng */ 2849783237e8SYuchung Cheng if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 2850783237e8SYuchung Cheng tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 28511b63edd6SYuchung Cheng space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2852783237e8SYuchung Cheng MAX_TCP_OPTION_SPACE; 2853783237e8SYuchung Cheng 2854783237e8SYuchung Cheng syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2855783237e8SYuchung Cheng sk->sk_allocation); 2856783237e8SYuchung Cheng if (syn_data == NULL) 2857783237e8SYuchung Cheng goto fallback; 2858783237e8SYuchung Cheng 2859783237e8SYuchung Cheng for (i = 0; i < iovlen && syn_data->len < space; ++i) { 2860783237e8SYuchung Cheng struct iovec *iov = &fo->data->msg_iov[i]; 2861783237e8SYuchung Cheng unsigned char __user *from = iov->iov_base; 2862783237e8SYuchung Cheng int len = iov->iov_len; 2863783237e8SYuchung Cheng 2864783237e8SYuchung Cheng if (syn_data->len + len > space) 2865783237e8SYuchung Cheng len = space - syn_data->len; 2866783237e8SYuchung Cheng else if (i + 1 == iovlen) 2867783237e8SYuchung Cheng /* No more data pending in inet_wait_for_connect() */ 2868783237e8SYuchung Cheng fo->data = NULL; 2869783237e8SYuchung Cheng 2870783237e8SYuchung Cheng if (skb_add_data(syn_data, from, len)) 2871783237e8SYuchung Cheng goto fallback; 2872783237e8SYuchung Cheng } 2873783237e8SYuchung Cheng 2874783237e8SYuchung Cheng /* Queue a data-only packet after the regular SYN for retransmission */ 2875783237e8SYuchung Cheng data = pskb_copy(syn_data, sk->sk_allocation); 2876783237e8SYuchung Cheng if (data == NULL) 2877783237e8SYuchung Cheng goto fallback; 2878783237e8SYuchung Cheng TCP_SKB_CB(data)->seq++; 2879783237e8SYuchung Cheng TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; 2880783237e8SYuchung Cheng TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); 2881783237e8SYuchung Cheng tcp_connect_queue_skb(sk, data); 2882783237e8SYuchung Cheng fo->copied = data->len; 2883783237e8SYuchung Cheng 2884783237e8SYuchung Cheng if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { 288567da22d2SYuchung Cheng tp->syn_data = (fo->copied > 0); 2886783237e8SYuchung Cheng NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 2887783237e8SYuchung Cheng goto done; 2888783237e8SYuchung Cheng } 2889783237e8SYuchung Cheng syn_data = NULL; 2890783237e8SYuchung Cheng 2891783237e8SYuchung Cheng fallback: 2892783237e8SYuchung Cheng /* Send a regular SYN with Fast Open cookie request option */ 2893783237e8SYuchung Cheng if (fo->cookie.len > 0) 2894783237e8SYuchung Cheng fo->cookie.len = 0; 2895783237e8SYuchung Cheng err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 2896783237e8SYuchung Cheng if (err) 2897783237e8SYuchung Cheng tp->syn_fastopen = 0; 2898783237e8SYuchung Cheng kfree_skb(syn_data); 2899783237e8SYuchung Cheng done: 2900783237e8SYuchung Cheng fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 2901783237e8SYuchung Cheng return err; 2902783237e8SYuchung Cheng } 2903783237e8SYuchung Cheng 290467edfef7SAndi Kleen /* Build a SYN and send it off. */ 29051da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 29061da177e4SLinus Torvalds { 29071da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29081da177e4SLinus Torvalds struct sk_buff *buff; 2909ee586811SEric Paris int err; 29101da177e4SLinus Torvalds 29111da177e4SLinus Torvalds tcp_connect_init(sk); 29121da177e4SLinus Torvalds 29132b916477SAndrey Vagin if (unlikely(tp->repair)) { 29142b916477SAndrey Vagin tcp_finish_connect(sk, NULL); 29152b916477SAndrey Vagin return 0; 29162b916477SAndrey Vagin } 29172b916477SAndrey Vagin 2918d179cd12SDavid S. Miller buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 29191da177e4SLinus Torvalds if (unlikely(buff == NULL)) 29201da177e4SLinus Torvalds return -ENOBUFS; 29211da177e4SLinus Torvalds 29221da177e4SLinus Torvalds /* Reserve space for headers. */ 29231da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 29241da177e4SLinus Torvalds 2925a3433f35SChangli Gao tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2926783237e8SYuchung Cheng tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; 2927783237e8SYuchung Cheng tcp_connect_queue_skb(sk, buff); 2928e870a8efSIlpo Järvinen TCP_ECN_send_syn(sk, buff); 29291da177e4SLinus Torvalds 2930783237e8SYuchung Cheng /* Send off SYN; include data in Fast Open. */ 2931783237e8SYuchung Cheng err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 2932783237e8SYuchung Cheng tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2933ee586811SEric Paris if (err == -ECONNREFUSED) 2934ee586811SEric Paris return err; 2935bd37a088SWei Yongjun 2936bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 2937bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 2938bd37a088SWei Yongjun */ 2939bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 2940bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 294181cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 29421da177e4SLinus Torvalds 29431da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 29443f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 29453f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 29461da177e4SLinus Torvalds return 0; 29471da177e4SLinus Torvalds } 29484bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect); 29491da177e4SLinus Torvalds 29501da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 29511da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 29521da177e4SLinus Torvalds * for details. 29531da177e4SLinus Torvalds */ 29541da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 29551da177e4SLinus Torvalds { 2956463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 2957463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 29581da177e4SLinus Torvalds unsigned long timeout; 29591da177e4SLinus Torvalds 29601da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 2961463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 29621da177e4SLinus Torvalds int max_ato = HZ / 2; 29631da177e4SLinus Torvalds 2964056834d9SIlpo Järvinen if (icsk->icsk_ack.pingpong || 2965056834d9SIlpo Järvinen (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 29661da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 29671da177e4SLinus Torvalds 29681da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 29691da177e4SLinus Torvalds 29701da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 2971463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 29721da177e4SLinus Torvalds * directly. 29731da177e4SLinus Torvalds */ 29741da177e4SLinus Torvalds if (tp->srtt) { 29751da177e4SLinus Torvalds int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 29761da177e4SLinus Torvalds 29771da177e4SLinus Torvalds if (rtt < max_ato) 29781da177e4SLinus Torvalds max_ato = rtt; 29791da177e4SLinus Torvalds } 29801da177e4SLinus Torvalds 29811da177e4SLinus Torvalds ato = min(ato, max_ato); 29821da177e4SLinus Torvalds } 29831da177e4SLinus Torvalds 29841da177e4SLinus Torvalds /* Stay within the limit we were given */ 29851da177e4SLinus Torvalds timeout = jiffies + ato; 29861da177e4SLinus Torvalds 29871da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 2988463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 29891da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 29901da177e4SLinus Torvalds * send ACK now. 29911da177e4SLinus Torvalds */ 2992463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 2993463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 29941da177e4SLinus Torvalds tcp_send_ack(sk); 29951da177e4SLinus Torvalds return; 29961da177e4SLinus Torvalds } 29971da177e4SLinus Torvalds 2998463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 2999463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 30001da177e4SLinus Torvalds } 3001463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3002463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 3003463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 30041da177e4SLinus Torvalds } 30051da177e4SLinus Torvalds 30061da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 30071da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk) 30081da177e4SLinus Torvalds { 30091da177e4SLinus Torvalds struct sk_buff *buff; 30101da177e4SLinus Torvalds 3011058dc334SIlpo Järvinen /* If we have been reset, we may not send again. */ 3012058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3013058dc334SIlpo Järvinen return; 3014058dc334SIlpo Järvinen 30151da177e4SLinus Torvalds /* We are not putting this on the write queue, so 30161da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 30171da177e4SLinus Torvalds * sock. 30181da177e4SLinus Torvalds */ 301999a1dec7SMel Gorman buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 30201da177e4SLinus Torvalds if (buff == NULL) { 3021463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 3022463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 30233f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 30243f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 30251da177e4SLinus Torvalds return; 30261da177e4SLinus Torvalds } 30271da177e4SLinus Torvalds 30281da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 30291da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 3030a3433f35SChangli Gao tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 30311da177e4SLinus Torvalds 30321da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 30331da177e4SLinus Torvalds TCP_SKB_CB(buff)->when = tcp_time_stamp; 303499a1dec7SMel Gorman tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 30351da177e4SLinus Torvalds } 30361da177e4SLinus Torvalds 30371da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 30381da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 30391da177e4SLinus Torvalds * 30401da177e4SLinus Torvalds * Question: what should we make while urgent mode? 30411da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 30421da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 30431da177e4SLinus Torvalds * 30441da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 30451da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 30461da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 30471da177e4SLinus Torvalds */ 30481da177e4SLinus Torvalds static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 30491da177e4SLinus Torvalds { 30501da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30511da177e4SLinus Torvalds struct sk_buff *skb; 30521da177e4SLinus Torvalds 30531da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 305499a1dec7SMel Gorman skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 30551da177e4SLinus Torvalds if (skb == NULL) 30561da177e4SLinus Torvalds return -1; 30571da177e4SLinus Torvalds 30581da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 30591da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 30601da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 30611da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 30621da177e4SLinus Torvalds * send it. 30631da177e4SLinus Torvalds */ 3064a3433f35SChangli Gao tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 30651da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 3066dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 30671da177e4SLinus Torvalds } 30681da177e4SLinus Torvalds 3069ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk) 3070ee995283SPavel Emelyanov { 3071ee995283SPavel Emelyanov if (sk->sk_state == TCP_ESTABLISHED) { 3072ee995283SPavel Emelyanov tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3073c0e88ff0SPavel Emelyanov tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; 3074ee995283SPavel Emelyanov tcp_xmit_probe_skb(sk, 0); 3075ee995283SPavel Emelyanov } 3076ee995283SPavel Emelyanov } 3077ee995283SPavel Emelyanov 307867edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */ 30791da177e4SLinus Torvalds int tcp_write_wakeup(struct sock *sk) 30801da177e4SLinus Torvalds { 30811da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30821da177e4SLinus Torvalds struct sk_buff *skb; 30831da177e4SLinus Torvalds 3084058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3085058dc334SIlpo Järvinen return -1; 3086058dc334SIlpo Järvinen 3087fe067e8aSDavid S. Miller if ((skb = tcp_send_head(sk)) != NULL && 308890840defSIlpo Järvinen before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 30891da177e4SLinus Torvalds int err; 30900c54b85fSIlpo Järvinen unsigned int mss = tcp_current_mss(sk); 309190840defSIlpo Järvinen unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 30921da177e4SLinus Torvalds 30931da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 30941da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 30951da177e4SLinus Torvalds 30961da177e4SLinus Torvalds /* We are probing the opening of a window 30971da177e4SLinus Torvalds * but the window size is != 0 30981da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 30991da177e4SLinus Torvalds */ 31001da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 31011da177e4SLinus Torvalds skb->len > mss) { 31021da177e4SLinus Torvalds seg_size = min(seg_size, mss); 31034de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3104846998aeSDavid S. Miller if (tcp_fragment(sk, skb, seg_size, mss)) 31051da177e4SLinus Torvalds return -1; 31061da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 3107846998aeSDavid S. Miller tcp_set_skb_tso_segs(sk, skb, mss); 31081da177e4SLinus Torvalds 31094de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 31101da177e4SLinus Torvalds TCP_SKB_CB(skb)->when = tcp_time_stamp; 3111dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 311266f5fe62SIlpo Järvinen if (!err) 311366f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 31141da177e4SLinus Torvalds return err; 31151da177e4SLinus Torvalds } else { 311633f5f57eSIlpo Järvinen if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 31174828e7f4SIlpo Järvinen tcp_xmit_probe_skb(sk, 1); 31181da177e4SLinus Torvalds return tcp_xmit_probe_skb(sk, 0); 31191da177e4SLinus Torvalds } 31201da177e4SLinus Torvalds } 31211da177e4SLinus Torvalds 31221da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 31231da177e4SLinus Torvalds * a partial packet else a zero probe. 31241da177e4SLinus Torvalds */ 31251da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 31261da177e4SLinus Torvalds { 3127463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 31281da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31291da177e4SLinus Torvalds int err; 31301da177e4SLinus Torvalds 31311da177e4SLinus Torvalds err = tcp_write_wakeup(sk); 31321da177e4SLinus Torvalds 3133fe067e8aSDavid S. Miller if (tp->packets_out || !tcp_send_head(sk)) { 31341da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 31356687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 3136463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 31371da177e4SLinus Torvalds return; 31381da177e4SLinus Torvalds } 31391da177e4SLinus Torvalds 31401da177e4SLinus Torvalds if (err <= 0) { 3141463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_backoff < sysctl_tcp_retries2) 3142463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 31436687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out++; 3144463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 31453f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 31463f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 31471da177e4SLinus Torvalds } else { 31481da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 31496687e988SArnaldo Carvalho de Melo * do not backoff and do not remember icsk_probes_out. 31501da177e4SLinus Torvalds * Let local senders to fight for local resources. 31511da177e4SLinus Torvalds * 31521da177e4SLinus Torvalds * Use accumulated backoff yet. 31531da177e4SLinus Torvalds */ 31546687e988SArnaldo Carvalho de Melo if (!icsk->icsk_probes_out) 31556687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 1; 3156463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3157463c84b9SArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, 31583f421baaSArnaldo Carvalho de Melo TCP_RESOURCE_PROBE_INTERVAL), 31593f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 31601da177e4SLinus Torvalds } 31611da177e4SLinus Torvalds } 3162