11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 802c30a84SJesper Juhl * Authors: Ross Biro 91da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 101da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 111da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 121da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 131da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 141da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 151da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 161da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 171da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 181da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 191da177e4SLinus Torvalds */ 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds /* 221da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 231da177e4SLinus Torvalds * : Fragmentation on mtu decrease 241da177e4SLinus Torvalds * : Segment collapse on retransmit 251da177e4SLinus Torvalds * : AF independence 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 281da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 291da177e4SLinus Torvalds * during syn/ack processing. 301da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 311da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 321da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 331da177e4SLinus Torvalds * J Hadi Salim : ECN support 341da177e4SLinus Torvalds * 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 3791df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt 3891df42beSJoe Perches 391da177e4SLinus Torvalds #include <net/tcp.h> 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/compiler.h> 425a0e3ad6STejun Heo #include <linux/gfp.h> 431da177e4SLinus Torvalds #include <linux/module.h> 441da177e4SLinus Torvalds 451da177e4SLinus Torvalds /* People can turn this off for buggy TCP's found in printers etc. */ 46ab32ea5dSBrian Haley int sysctl_tcp_retrans_collapse __read_mostly = 1; 471da177e4SLinus Torvalds 4815d99e02SRick Jones /* People can turn this on to work with those rare, broken TCPs that 4915d99e02SRick Jones * interpret the window field as a signed quantity. 5015d99e02SRick Jones */ 51ab32ea5dSBrian Haley int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 5215d99e02SRick Jones 53c39c4c6aSWei Liu /* Default TSQ limit of four TSO segments */ 54c39c4c6aSWei Liu int sysctl_tcp_limit_output_bytes __read_mostly = 262144; 5546d3ceabSEric Dumazet 561da177e4SLinus Torvalds /* This limits the percentage of the congestion window which we 571da177e4SLinus Torvalds * will allow a single TSO frame to consume. Building TSO frames 581da177e4SLinus Torvalds * which are too large can cause TCP streams to be bursty. 591da177e4SLinus Torvalds */ 60ab32ea5dSBrian Haley int sysctl_tcp_tso_win_divisor __read_mostly = 3; 611da177e4SLinus Torvalds 6235089bb2SDavid S. Miller /* By default, RFC2861 behavior. */ 63ab32ea5dSBrian Haley int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 6435089bb2SDavid S. Miller 6546d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 6646d3ceabSEric Dumazet int push_one, gfp_t gfp); 67519855c5SWilliam Allen Simpson 6867edfef7SAndi Kleen /* Account for new data that has been sent to the network. */ 69cf533ea5SEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 706ff03ac3SIlpo Järvinen { 716ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 726ff03ac3SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 7366f5fe62SIlpo Järvinen unsigned int prior_packets = tp->packets_out; 749e412ba7SIlpo Järvinen 75fe067e8aSDavid S. Miller tcp_advance_send_head(sk, skb); 761da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 778512430eSIlpo Järvinen 7866f5fe62SIlpo Järvinen tp->packets_out += tcp_skb_pcount(skb); 79bec41a11SYuchung Cheng if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 80750ea2baSYuchung Cheng tcp_rearm_rto(sk); 81f19c29e3SYuchung Cheng 82f7324acdSDavid S. Miller NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, 83f19c29e3SYuchung Cheng tcp_skb_pcount(skb)); 846a5dc9e5SEric Dumazet } 851da177e4SLinus Torvalds 86a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one 87a4ecb15aSCui, Cheng * window scaling factor due to loss of precision. 881da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 891da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 901da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 911da177e4SLinus Torvalds * invalid. OK, let's make this for now: 921da177e4SLinus Torvalds */ 93cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk) 941da177e4SLinus Torvalds { 95cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 969e412ba7SIlpo Järvinen 97a4ecb15aSCui, Cheng if (!before(tcp_wnd_end(tp), tp->snd_nxt) || 98a4ecb15aSCui, Cheng (tp->rx_opt.wscale_ok && 99a4ecb15aSCui, Cheng ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) 1001da177e4SLinus Torvalds return tp->snd_nxt; 1011da177e4SLinus Torvalds else 10290840defSIlpo Järvinen return tcp_wnd_end(tp); 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds 1051da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 1061da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 1071da177e4SLinus Torvalds * 1081da177e4SLinus Torvalds * 1. It is independent of path mtu. 1091da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 1101da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 1111da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 1121da177e4SLinus Torvalds * large MSS. 1131da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 1141da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 1151da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1161da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1171da177e4SLinus Torvalds * probably even Jumbo". 1181da177e4SLinus Torvalds */ 1191da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1201da177e4SLinus Torvalds { 1211da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 122cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1231da177e4SLinus Torvalds int mss = tp->advmss; 1241da177e4SLinus Torvalds 1250dbaee3bSDavid S. Miller if (dst) { 1260dbaee3bSDavid S. Miller unsigned int metric = dst_metric_advmss(dst); 1270dbaee3bSDavid S. Miller 1280dbaee3bSDavid S. Miller if (metric < mss) { 1290dbaee3bSDavid S. Miller mss = metric; 1301da177e4SLinus Torvalds tp->advmss = mss; 1311da177e4SLinus Torvalds } 1320dbaee3bSDavid S. Miller } 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds return (__u16)mss; 1351da177e4SLinus Torvalds } 1361da177e4SLinus Torvalds 1371da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1386f021c62SEric Dumazet * This is the first part of cwnd validation mechanism. 1396f021c62SEric Dumazet */ 1406f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta) 1411da177e4SLinus Torvalds { 142463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1436f021c62SEric Dumazet u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 1441da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1451da177e4SLinus Torvalds 1466687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1471da177e4SLinus Torvalds 1486687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1491da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1501da177e4SLinus Torvalds 151463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1521da177e4SLinus Torvalds cwnd >>= 1; 1531da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 154c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 1551da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1561da177e4SLinus Torvalds } 1571da177e4SLinus Torvalds 15867edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */ 15940efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 160cf533ea5SEric Dumazet struct sock *sk) 1611da177e4SLinus Torvalds { 162463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 163d635fbe2SEric Dumazet const u32 now = tcp_jiffies32; 1641da177e4SLinus Torvalds 16505c5a46dSNeal Cardwell if (tcp_packets_in_flight(tp) == 0) 16605c5a46dSNeal Cardwell tcp_ca_event(sk, CA_EVENT_TX_START); 16705c5a46dSNeal Cardwell 1681da177e4SLinus Torvalds tp->lsndtime = now; 1691da177e4SLinus Torvalds 1701da177e4SLinus Torvalds /* If it is a reply for ato after last received 1711da177e4SLinus Torvalds * packet, enter pingpong mode. 1721da177e4SLinus Torvalds */ 1732251ae46SJon Maxwell if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 174463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 1; 1751da177e4SLinus Torvalds } 1761da177e4SLinus Torvalds 17767edfef7SAndi Kleen /* Account for an ACK we sent. */ 17840efc6faSStephen Hemminger static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 1791da177e4SLinus Torvalds { 180463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 181463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1821da177e4SLinus Torvalds } 1831da177e4SLinus Torvalds 18485f16525SYuchung Cheng 18585f16525SYuchung Cheng u32 tcp_default_init_rwnd(u32 mss) 18685f16525SYuchung Cheng { 18785f16525SYuchung Cheng /* Initial receive window should be twice of TCP_INIT_CWND to 1889ef71e0cSWeiping Pan * enable proper sending of new unsent data during fast recovery 18985f16525SYuchung Cheng * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a 19085f16525SYuchung Cheng * limit when mss is larger than 1460. 19185f16525SYuchung Cheng */ 19285f16525SYuchung Cheng u32 init_rwnd = TCP_INIT_CWND * 2; 19385f16525SYuchung Cheng 19485f16525SYuchung Cheng if (mss > 1460) 19585f16525SYuchung Cheng init_rwnd = max((1460 * init_rwnd) / mss, 2U); 19685f16525SYuchung Cheng return init_rwnd; 19785f16525SYuchung Cheng } 19885f16525SYuchung Cheng 1991da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 2001da177e4SLinus Torvalds * Based on the assumption that the given amount of space 2011da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 2021da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 2031da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 2041da177e4SLinus Torvalds * This MUST be enforced by all callers. 2051da177e4SLinus Torvalds */ 2061da177e4SLinus Torvalds void tcp_select_initial_window(int __space, __u32 mss, 2071da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 20831d12926Slaurent chavey int wscale_ok, __u8 *rcv_wscale, 20931d12926Slaurent chavey __u32 init_rcv_wnd) 2101da177e4SLinus Torvalds { 2111da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 2121da177e4SLinus Torvalds 2131da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 2141da177e4SLinus Torvalds if (*window_clamp == 0) 215589c49cbSGao Feng (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE); 2161da177e4SLinus Torvalds space = min(*window_clamp, space); 2171da177e4SLinus Torvalds 2181da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 2191da177e4SLinus Torvalds if (space > mss) 220589c49cbSGao Feng space = rounddown(space, mss); 2211da177e4SLinus Torvalds 2221da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 22315d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 22415d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 22515d99e02SRick Jones * we will truncate our initial window offering to 32K-1 22615d99e02SRick Jones * unless the remote has sent us a window scaling option, 22715d99e02SRick Jones * which we interpret as a sign the remote TCP is not 22815d99e02SRick Jones * misinterpreting the window field as a signed quantity. 2291da177e4SLinus Torvalds */ 23015d99e02SRick Jones if (sysctl_tcp_workaround_signed_windows) 2311da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 23215d99e02SRick Jones else 23315d99e02SRick Jones (*rcv_wnd) = space; 23415d99e02SRick Jones 2351da177e4SLinus Torvalds (*rcv_wscale) = 0; 2361da177e4SLinus Torvalds if (wscale_ok) { 237589c49cbSGao Feng /* Set window scaling on max possible window */ 238f626300aSSoheil Hassas Yeganeh space = max_t(u32, space, sysctl_tcp_rmem[2]); 239f626300aSSoheil Hassas Yeganeh space = max_t(u32, space, sysctl_rmem_max); 240316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 241589c49cbSGao Feng while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) { 2421da177e4SLinus Torvalds space >>= 1; 2431da177e4SLinus Torvalds (*rcv_wscale)++; 2441da177e4SLinus Torvalds } 2451da177e4SLinus Torvalds } 2461da177e4SLinus Torvalds 2471da177e4SLinus Torvalds if (mss > (1 << *rcv_wscale)) { 24885f16525SYuchung Cheng if (!init_rcv_wnd) /* Use default unless specified otherwise */ 24985f16525SYuchung Cheng init_rcv_wnd = tcp_default_init_rwnd(mss); 250b1afde60SNandita Dukkipati *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 254589c49cbSGao Feng (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); 2551da177e4SLinus Torvalds } 2564bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window); 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2591da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2601da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2611da177e4SLinus Torvalds * frame. 2621da177e4SLinus Torvalds */ 26340efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2641da177e4SLinus Torvalds { 2651da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2668e165e20SFlorian Westphal u32 old_win = tp->rcv_wnd; 2671da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2681da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2691da177e4SLinus Torvalds 2701da177e4SLinus Torvalds /* Never shrink the offered window */ 2711da177e4SLinus Torvalds if (new_win < cur_win) { 2721da177e4SLinus Torvalds /* Danger Will Robinson! 2731da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2741da177e4SLinus Torvalds * we will not be able to advertise a zero 2751da177e4SLinus Torvalds * window in time. --DaveM 2761da177e4SLinus Torvalds * 2771da177e4SLinus Torvalds * Relax Will Robinson. 2781da177e4SLinus Torvalds */ 2798e165e20SFlorian Westphal if (new_win == 0) 2808e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), 2818e165e20SFlorian Westphal LINUX_MIB_TCPWANTZEROWINDOWADV); 282607bfbf2SPatrick McHardy new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 2831da177e4SLinus Torvalds } 2841da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2851da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2881da177e4SLinus Torvalds * scaled window. 2891da177e4SLinus Torvalds */ 29015d99e02SRick Jones if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 2911da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2921da177e4SLinus Torvalds else 2931da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds /* RFC1323 scaling applied */ 2961da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 2971da177e4SLinus Torvalds 2981da177e4SLinus Torvalds /* If we advertise zero window, disable fast path. */ 2998e165e20SFlorian Westphal if (new_win == 0) { 3001da177e4SLinus Torvalds tp->pred_flags = 0; 3018e165e20SFlorian Westphal if (old_win) 3028e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), 3038e165e20SFlorian Westphal LINUX_MIB_TCPTOZEROWINDOWADV); 3048e165e20SFlorian Westphal } else if (old_win == 0) { 3058e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); 3068e165e20SFlorian Westphal } 3071da177e4SLinus Torvalds 3081da177e4SLinus Torvalds return new_win; 3091da177e4SLinus Torvalds } 3101da177e4SLinus Torvalds 31167edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */ 312735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) 313bdf1ee5dSIlpo Järvinen { 31430e502a3SDaniel Borkmann const struct tcp_sock *tp = tcp_sk(sk); 31530e502a3SDaniel Borkmann 3164de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 317bdf1ee5dSIlpo Järvinen if (!(tp->ecn_flags & TCP_ECN_OK)) 3184de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 31930e502a3SDaniel Borkmann else if (tcp_ca_needs_ecn(sk)) 32030e502a3SDaniel Borkmann INET_ECN_xmit(sk); 321bdf1ee5dSIlpo Järvinen } 322bdf1ee5dSIlpo Järvinen 32367edfef7SAndi Kleen /* Packet ECN state for a SYN. */ 324735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) 325bdf1ee5dSIlpo Järvinen { 326bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 327f7b3bec6SFlorian Westphal bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || 328f7b3bec6SFlorian Westphal tcp_ca_needs_ecn(sk); 329f7b3bec6SFlorian Westphal 330f7b3bec6SFlorian Westphal if (!use_ecn) { 331f7b3bec6SFlorian Westphal const struct dst_entry *dst = __sk_dst_get(sk); 332f7b3bec6SFlorian Westphal 333f7b3bec6SFlorian Westphal if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) 334f7b3bec6SFlorian Westphal use_ecn = true; 335f7b3bec6SFlorian Westphal } 336bdf1ee5dSIlpo Järvinen 337bdf1ee5dSIlpo Järvinen tp->ecn_flags = 0; 338f7b3bec6SFlorian Westphal 339f7b3bec6SFlorian Westphal if (use_ecn) { 3404de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 341bdf1ee5dSIlpo Järvinen tp->ecn_flags = TCP_ECN_OK; 34230e502a3SDaniel Borkmann if (tcp_ca_needs_ecn(sk)) 34330e502a3SDaniel Borkmann INET_ECN_xmit(sk); 344bdf1ee5dSIlpo Järvinen } 345bdf1ee5dSIlpo Järvinen } 346bdf1ee5dSIlpo Järvinen 34749213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) 34849213555SDaniel Borkmann { 34949213555SDaniel Borkmann if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) 35049213555SDaniel Borkmann /* tp->ecn_flags are cleared at a later point in time when 35149213555SDaniel Borkmann * SYN ACK is ultimatively being received. 35249213555SDaniel Borkmann */ 35349213555SDaniel Borkmann TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); 35449213555SDaniel Borkmann } 35549213555SDaniel Borkmann 356735d3831SFlorian Westphal static void 3576ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th) 358bdf1ee5dSIlpo Järvinen { 3596ac705b1SEric Dumazet if (inet_rsk(req)->ecn_ok) 360bdf1ee5dSIlpo Järvinen th->ece = 1; 361bdf1ee5dSIlpo Järvinen } 362bdf1ee5dSIlpo Järvinen 36367edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 36467edfef7SAndi Kleen * be sent. 36567edfef7SAndi Kleen */ 366735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, 367ea1627c2SEric Dumazet struct tcphdr *th, int tcp_header_len) 368bdf1ee5dSIlpo Järvinen { 369bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 370bdf1ee5dSIlpo Järvinen 371bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 372bdf1ee5dSIlpo Järvinen /* Not-retransmitted data segment: set ECT and inject CWR. */ 373bdf1ee5dSIlpo Järvinen if (skb->len != tcp_header_len && 374bdf1ee5dSIlpo Järvinen !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 375bdf1ee5dSIlpo Järvinen INET_ECN_xmit(sk); 376bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 377bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 378ea1627c2SEric Dumazet th->cwr = 1; 379bdf1ee5dSIlpo Järvinen skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 380bdf1ee5dSIlpo Järvinen } 38130e502a3SDaniel Borkmann } else if (!tcp_ca_needs_ecn(sk)) { 382bdf1ee5dSIlpo Järvinen /* ACK or retransmitted segment: clear ECT|CE */ 383bdf1ee5dSIlpo Järvinen INET_ECN_dontxmit(sk); 384bdf1ee5dSIlpo Järvinen } 385bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 386ea1627c2SEric Dumazet th->ece = 1; 387bdf1ee5dSIlpo Järvinen } 388bdf1ee5dSIlpo Järvinen } 389bdf1ee5dSIlpo Järvinen 390e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present, 391e870a8efSIlpo Järvinen * auto increment end seqno. 392e870a8efSIlpo Järvinen */ 393e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 394e870a8efSIlpo Järvinen { 3952e8e18efSDavid S. Miller skb->ip_summed = CHECKSUM_PARTIAL; 396e870a8efSIlpo Järvinen skb->csum = 0; 397e870a8efSIlpo Järvinen 3984de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags; 399e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->sacked = 0; 400e870a8efSIlpo Järvinen 401cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 1); 402e870a8efSIlpo Järvinen 403e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->seq = seq; 404a3433f35SChangli Gao if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 405e870a8efSIlpo Järvinen seq++; 406e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->end_seq = seq; 407e870a8efSIlpo Järvinen } 408e870a8efSIlpo Järvinen 409a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp) 41033f5f57eSIlpo Järvinen { 41133f5f57eSIlpo Järvinen return tp->snd_una != tp->snd_up; 41233f5f57eSIlpo Järvinen } 41333f5f57eSIlpo Järvinen 41433ad798cSAdam Langley #define OPTION_SACK_ADVERTISE (1 << 0) 41533ad798cSAdam Langley #define OPTION_TS (1 << 1) 41633ad798cSAdam Langley #define OPTION_MD5 (1 << 2) 41789e95a61SOri Finkelman #define OPTION_WSCALE (1 << 3) 4182100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE (1 << 8) 41933ad798cSAdam Langley 42033ad798cSAdam Langley struct tcp_out_options { 4212100c8d2SYuchung Cheng u16 options; /* bit field of OPTION_* */ 4222100c8d2SYuchung Cheng u16 mss; /* 0 to disable */ 42333ad798cSAdam Langley u8 ws; /* window scale, 0 to disable */ 42433ad798cSAdam Langley u8 num_sack_blocks; /* number of SACK blocks to include */ 425bd0388aeSWilliam Allen Simpson u8 hash_size; /* bytes in hash_location */ 426bd0388aeSWilliam Allen Simpson __u8 *hash_location; /* temporary pointer, overloaded */ 4272100c8d2SYuchung Cheng __u32 tsval, tsecr; /* need to include OPTION_TS */ 4282100c8d2SYuchung Cheng struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 42933ad798cSAdam Langley }; 43033ad798cSAdam Langley 43167edfef7SAndi Kleen /* Write previously computed TCP options to the packet. 43267edfef7SAndi Kleen * 43367edfef7SAndi Kleen * Beware: Something in the Internet is very sensitive to the ordering of 434fd6149d3SIlpo Järvinen * TCP options, we learned this through the hard way, so be careful here. 435fd6149d3SIlpo Järvinen * Luckily we can at least blame others for their non-compliance but from 4368e3bff96Sstephen hemminger * inter-operability perspective it seems that we're somewhat stuck with 437fd6149d3SIlpo Järvinen * the ordering which we have been using if we want to keep working with 438fd6149d3SIlpo Järvinen * those broken things (not that it currently hurts anybody as there isn't 439fd6149d3SIlpo Järvinen * particular reason why the ordering would need to be changed). 440fd6149d3SIlpo Järvinen * 441fd6149d3SIlpo Järvinen * At least SACK_PERM as the first option is known to lead to a disaster 442fd6149d3SIlpo Järvinen * (but it may well be that other scenarios fail similarly). 443fd6149d3SIlpo Järvinen */ 44433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 445bd0388aeSWilliam Allen Simpson struct tcp_out_options *opts) 446bd0388aeSWilliam Allen Simpson { 4472100c8d2SYuchung Cheng u16 options = opts->options; /* mungable copy */ 448bd0388aeSWilliam Allen Simpson 449bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_MD5 & options)) { 4501a2c6181SChristoph Paasch *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 4511a2c6181SChristoph Paasch (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 452bd0388aeSWilliam Allen Simpson /* overload cookie hash location */ 453bd0388aeSWilliam Allen Simpson opts->hash_location = (__u8 *)ptr; 45433ad798cSAdam Langley ptr += 4; 45533ad798cSAdam Langley } 45633ad798cSAdam Langley 457fd6149d3SIlpo Järvinen if (unlikely(opts->mss)) { 458fd6149d3SIlpo Järvinen *ptr++ = htonl((TCPOPT_MSS << 24) | 459fd6149d3SIlpo Järvinen (TCPOLEN_MSS << 16) | 460fd6149d3SIlpo Järvinen opts->mss); 461fd6149d3SIlpo Järvinen } 462fd6149d3SIlpo Järvinen 463bd0388aeSWilliam Allen Simpson if (likely(OPTION_TS & options)) { 464bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 46533ad798cSAdam Langley *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 46633ad798cSAdam Langley (TCPOLEN_SACK_PERM << 16) | 46733ad798cSAdam Langley (TCPOPT_TIMESTAMP << 8) | 46833ad798cSAdam Langley TCPOLEN_TIMESTAMP); 469bd0388aeSWilliam Allen Simpson options &= ~OPTION_SACK_ADVERTISE; 47033ad798cSAdam Langley } else { 471496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 47240efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 47340efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 47440efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 47540efc6faSStephen Hemminger } 47633ad798cSAdam Langley *ptr++ = htonl(opts->tsval); 47733ad798cSAdam Langley *ptr++ = htonl(opts->tsecr); 47833ad798cSAdam Langley } 47933ad798cSAdam Langley 480bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 48133ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 48233ad798cSAdam Langley (TCPOPT_NOP << 16) | 48333ad798cSAdam Langley (TCPOPT_SACK_PERM << 8) | 48433ad798cSAdam Langley TCPOLEN_SACK_PERM); 48533ad798cSAdam Langley } 48633ad798cSAdam Langley 487bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_WSCALE & options)) { 48833ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 48933ad798cSAdam Langley (TCPOPT_WINDOW << 16) | 49033ad798cSAdam Langley (TCPOLEN_WINDOW << 8) | 49133ad798cSAdam Langley opts->ws); 49233ad798cSAdam Langley } 49333ad798cSAdam Langley 49433ad798cSAdam Langley if (unlikely(opts->num_sack_blocks)) { 49533ad798cSAdam Langley struct tcp_sack_block *sp = tp->rx_opt.dsack ? 49633ad798cSAdam Langley tp->duplicate_sack : tp->selective_acks; 49740efc6faSStephen Hemminger int this_sack; 49840efc6faSStephen Hemminger 49940efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 50040efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 50140efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 50233ad798cSAdam Langley (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 50340efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 5042de979bdSStephen Hemminger 50533ad798cSAdam Langley for (this_sack = 0; this_sack < opts->num_sack_blocks; 50633ad798cSAdam Langley ++this_sack) { 50740efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 50840efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 50940efc6faSStephen Hemminger } 5102de979bdSStephen Hemminger 51140efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 51240efc6faSStephen Hemminger } 5132100c8d2SYuchung Cheng 5142100c8d2SYuchung Cheng if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 5152100c8d2SYuchung Cheng struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 5167f9b838bSDaniel Lee u8 *p = (u8 *)ptr; 5177f9b838bSDaniel Lee u32 len; /* Fast Open option length */ 5182100c8d2SYuchung Cheng 5197f9b838bSDaniel Lee if (foc->exp) { 5207f9b838bSDaniel Lee len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 5217f9b838bSDaniel Lee *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | 5222100c8d2SYuchung Cheng TCPOPT_FASTOPEN_MAGIC); 5237f9b838bSDaniel Lee p += TCPOLEN_EXP_FASTOPEN_BASE; 5247f9b838bSDaniel Lee } else { 5257f9b838bSDaniel Lee len = TCPOLEN_FASTOPEN_BASE + foc->len; 5267f9b838bSDaniel Lee *p++ = TCPOPT_FASTOPEN; 5277f9b838bSDaniel Lee *p++ = len; 5282100c8d2SYuchung Cheng } 5297f9b838bSDaniel Lee 5307f9b838bSDaniel Lee memcpy(p, foc->val, foc->len); 5317f9b838bSDaniel Lee if ((len & 3) == 2) { 5327f9b838bSDaniel Lee p[foc->len] = TCPOPT_NOP; 5337f9b838bSDaniel Lee p[foc->len + 1] = TCPOPT_NOP; 5347f9b838bSDaniel Lee } 5357f9b838bSDaniel Lee ptr += (len + 3) >> 2; 5362100c8d2SYuchung Cheng } 53740efc6faSStephen Hemminger } 53840efc6faSStephen Hemminger 53967edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final 54067edfef7SAndi Kleen * network wire format yet. 54167edfef7SAndi Kleen */ 54295c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 54333ad798cSAdam Langley struct tcp_out_options *opts, 544cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 545cf533ea5SEric Dumazet { 54633ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 54795c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 548783237e8SYuchung Cheng struct tcp_fastopen_request *fastopen = tp->fastopen_req; 54933ad798cSAdam Langley 550cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 55133ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 55233ad798cSAdam Langley if (*md5) { 55333ad798cSAdam Langley opts->options |= OPTION_MD5; 554bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 555cfb6eeb4SYOSHIFUJI Hideaki } 55633ad798cSAdam Langley #else 55733ad798cSAdam Langley *md5 = NULL; 558cfb6eeb4SYOSHIFUJI Hideaki #endif 55933ad798cSAdam Langley 56033ad798cSAdam Langley /* We always get an MSS option. The option bytes which will be seen in 56133ad798cSAdam Langley * normal data packets should timestamps be used, must be in the MSS 56233ad798cSAdam Langley * advertised. But we subtract them from tp->mss_cache so that 56333ad798cSAdam Langley * calculations in tcp_sendmsg are simpler etc. So account for this 56433ad798cSAdam Langley * fact here if necessary. If we don't do this correctly, as a 56533ad798cSAdam Langley * receiver we won't recognize data packets as being full sized when we 56633ad798cSAdam Langley * should, and thus we won't abide by the delayed ACK rules correctly. 56733ad798cSAdam Langley * SACKs don't matter, we never delay an ACK when we have any of those 56833ad798cSAdam Langley * going out. */ 56933ad798cSAdam Langley opts->mss = tcp_advertise_mss(sk); 570bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 57133ad798cSAdam Langley 5725d2ed052SEric Dumazet if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { 57333ad798cSAdam Langley opts->options |= OPTION_TS; 5747faee5c0SEric Dumazet opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; 57533ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 576bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 57733ad798cSAdam Langley } 5789bb37ef0SEric Dumazet if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { 57933ad798cSAdam Langley opts->ws = tp->rx_opt.rcv_wscale; 58089e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 581bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 58233ad798cSAdam Langley } 583f9301034SEric Dumazet if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { 58433ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 585b32d1310SDavid S. Miller if (unlikely(!(OPTION_TS & opts->options))) 586bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 58733ad798cSAdam Langley } 58833ad798cSAdam Langley 589783237e8SYuchung Cheng if (fastopen && fastopen->cookie.len >= 0) { 5902646c831SDaniel Lee u32 need = fastopen->cookie.len; 5912646c831SDaniel Lee 5922646c831SDaniel Lee need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : 5932646c831SDaniel Lee TCPOLEN_FASTOPEN_BASE; 594783237e8SYuchung Cheng need = (need + 3) & ~3U; /* Align to 32 bits */ 595783237e8SYuchung Cheng if (remaining >= need) { 596783237e8SYuchung Cheng opts->options |= OPTION_FAST_OPEN_COOKIE; 597783237e8SYuchung Cheng opts->fastopen_cookie = &fastopen->cookie; 598783237e8SYuchung Cheng remaining -= need; 599783237e8SYuchung Cheng tp->syn_fastopen = 1; 6002646c831SDaniel Lee tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; 601783237e8SYuchung Cheng } 602783237e8SYuchung Cheng } 603bd0388aeSWilliam Allen Simpson 604bd0388aeSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 60533ad798cSAdam Langley } 60633ad798cSAdam Langley 60767edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */ 60837bfbddaSEric Dumazet static unsigned int tcp_synack_options(struct request_sock *req, 60995c96174SEric Dumazet unsigned int mss, struct sk_buff *skb, 61033ad798cSAdam Langley struct tcp_out_options *opts, 61180f03e27SEric Dumazet const struct tcp_md5sig_key *md5, 6128336886fSJerry Chu struct tcp_fastopen_cookie *foc) 6134957faadSWilliam Allen Simpson { 61433ad798cSAdam Langley struct inet_request_sock *ireq = inet_rsk(req); 61595c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 61633ad798cSAdam Langley 61733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 61880f03e27SEric Dumazet if (md5) { 61933ad798cSAdam Langley opts->options |= OPTION_MD5; 6204957faadSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 6214957faadSWilliam Allen Simpson 6224957faadSWilliam Allen Simpson /* We can't fit any SACK blocks in a packet with MD5 + TS 6234957faadSWilliam Allen Simpson * options. There was discussion about disabling SACK 6244957faadSWilliam Allen Simpson * rather than TS in order to fit in better with old, 6254957faadSWilliam Allen Simpson * buggy kernels, but that was deemed to be unnecessary. 6264957faadSWilliam Allen Simpson */ 627de213e5eSEric Dumazet ireq->tstamp_ok &= !ireq->sack_ok; 62833ad798cSAdam Langley } 62933ad798cSAdam Langley #endif 63033ad798cSAdam Langley 6314957faadSWilliam Allen Simpson /* We always send an MSS option. */ 63233ad798cSAdam Langley opts->mss = mss; 6334957faadSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 63433ad798cSAdam Langley 63533ad798cSAdam Langley if (likely(ireq->wscale_ok)) { 63633ad798cSAdam Langley opts->ws = ireq->rcv_wscale; 63789e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 6384957faadSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 63933ad798cSAdam Langley } 640de213e5eSEric Dumazet if (likely(ireq->tstamp_ok)) { 64133ad798cSAdam Langley opts->options |= OPTION_TS; 64295a22caeSFlorian Westphal opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off; 64333ad798cSAdam Langley opts->tsecr = req->ts_recent; 6444957faadSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 64533ad798cSAdam Langley } 64633ad798cSAdam Langley if (likely(ireq->sack_ok)) { 64733ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 648de213e5eSEric Dumazet if (unlikely(!ireq->tstamp_ok)) 6494957faadSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 65033ad798cSAdam Langley } 6517f9b838bSDaniel Lee if (foc != NULL && foc->len >= 0) { 6527f9b838bSDaniel Lee u32 need = foc->len; 6537f9b838bSDaniel Lee 6547f9b838bSDaniel Lee need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : 6557f9b838bSDaniel Lee TCPOLEN_FASTOPEN_BASE; 6568336886fSJerry Chu need = (need + 3) & ~3U; /* Align to 32 bits */ 6578336886fSJerry Chu if (remaining >= need) { 6588336886fSJerry Chu opts->options |= OPTION_FAST_OPEN_COOKIE; 6598336886fSJerry Chu opts->fastopen_cookie = foc; 6608336886fSJerry Chu remaining -= need; 6618336886fSJerry Chu } 6628336886fSJerry Chu } 6634957faadSWilliam Allen Simpson 6644957faadSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 66533ad798cSAdam Langley } 66633ad798cSAdam Langley 66767edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the 66867edfef7SAndi Kleen * final wire format yet. 66967edfef7SAndi Kleen */ 67095c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 67133ad798cSAdam Langley struct tcp_out_options *opts, 672cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 673cf533ea5SEric Dumazet { 67433ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 67595c96174SEric Dumazet unsigned int size = 0; 676cabeccbdSIlpo Järvinen unsigned int eff_sacks; 67733ad798cSAdam Langley 6785843ef42SAndi Kleen opts->options = 0; 6795843ef42SAndi Kleen 68033ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 68133ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 68233ad798cSAdam Langley if (unlikely(*md5)) { 68333ad798cSAdam Langley opts->options |= OPTION_MD5; 68433ad798cSAdam Langley size += TCPOLEN_MD5SIG_ALIGNED; 68533ad798cSAdam Langley } 68633ad798cSAdam Langley #else 68733ad798cSAdam Langley *md5 = NULL; 68833ad798cSAdam Langley #endif 68933ad798cSAdam Langley 69033ad798cSAdam Langley if (likely(tp->rx_opt.tstamp_ok)) { 69133ad798cSAdam Langley opts->options |= OPTION_TS; 6927faee5c0SEric Dumazet opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; 69333ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 69433ad798cSAdam Langley size += TCPOLEN_TSTAMP_ALIGNED; 69533ad798cSAdam Langley } 69633ad798cSAdam Langley 697cabeccbdSIlpo Järvinen eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 698cabeccbdSIlpo Järvinen if (unlikely(eff_sacks)) { 69995c96174SEric Dumazet const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 70033ad798cSAdam Langley opts->num_sack_blocks = 70195c96174SEric Dumazet min_t(unsigned int, eff_sacks, 70233ad798cSAdam Langley (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 70333ad798cSAdam Langley TCPOLEN_SACK_PERBLOCK); 70433ad798cSAdam Langley size += TCPOLEN_SACK_BASE_ALIGNED + 70533ad798cSAdam Langley opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 70633ad798cSAdam Langley } 70733ad798cSAdam Langley 70833ad798cSAdam Langley return size; 70940efc6faSStephen Hemminger } 7101da177e4SLinus Torvalds 71146d3ceabSEric Dumazet 71246d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ) 71346d3ceabSEric Dumazet * 71446d3ceabSEric Dumazet * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 71546d3ceabSEric Dumazet * to reduce RTT and bufferbloat. 71646d3ceabSEric Dumazet * We do this using a special skb destructor (tcp_wfree). 71746d3ceabSEric Dumazet * 71846d3ceabSEric Dumazet * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 71946d3ceabSEric Dumazet * needs to be reallocated in a driver. 7208e3bff96Sstephen hemminger * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 72146d3ceabSEric Dumazet * 72246d3ceabSEric Dumazet * Since transmit from skb destructor is forbidden, we use a tasklet 72346d3ceabSEric Dumazet * to process all sockets that eventually need to send more skbs. 72446d3ceabSEric Dumazet * We use one tasklet per cpu, with its own queue of sockets. 72546d3ceabSEric Dumazet */ 72646d3ceabSEric Dumazet struct tsq_tasklet { 72746d3ceabSEric Dumazet struct tasklet_struct tasklet; 72846d3ceabSEric Dumazet struct list_head head; /* queue of tcp sockets */ 72946d3ceabSEric Dumazet }; 73046d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 73146d3ceabSEric Dumazet 7326f458dfbSEric Dumazet static void tcp_tsq_handler(struct sock *sk) 7336f458dfbSEric Dumazet { 7346f458dfbSEric Dumazet if ((1 << sk->sk_state) & 7356f458dfbSEric Dumazet (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 736f9616c35SEric Dumazet TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) { 737f9616c35SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 738f9616c35SEric Dumazet 739f9616c35SEric Dumazet if (tp->lost_out > tp->retrans_out && 740f9616c35SEric Dumazet tp->snd_cwnd > tcp_packets_in_flight(tp)) 741f9616c35SEric Dumazet tcp_xmit_retransmit_queue(sk); 742f9616c35SEric Dumazet 743f9616c35SEric Dumazet tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 744bf06200eSJohn Ogness 0, GFP_ATOMIC); 7456f458dfbSEric Dumazet } 746f9616c35SEric Dumazet } 74746d3ceabSEric Dumazet /* 7488e3bff96Sstephen hemminger * One tasklet per cpu tries to send more skbs. 74946d3ceabSEric Dumazet * We run in tasklet context but need to disable irqs when 7508e3bff96Sstephen hemminger * transferring tsq->head because tcp_wfree() might 75146d3ceabSEric Dumazet * interrupt us (non NAPI drivers) 75246d3ceabSEric Dumazet */ 75346d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data) 75446d3ceabSEric Dumazet { 75546d3ceabSEric Dumazet struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 75646d3ceabSEric Dumazet LIST_HEAD(list); 75746d3ceabSEric Dumazet unsigned long flags; 75846d3ceabSEric Dumazet struct list_head *q, *n; 75946d3ceabSEric Dumazet struct tcp_sock *tp; 76046d3ceabSEric Dumazet struct sock *sk; 76146d3ceabSEric Dumazet 76246d3ceabSEric Dumazet local_irq_save(flags); 76346d3ceabSEric Dumazet list_splice_init(&tsq->head, &list); 76446d3ceabSEric Dumazet local_irq_restore(flags); 76546d3ceabSEric Dumazet 76646d3ceabSEric Dumazet list_for_each_safe(q, n, &list) { 76746d3ceabSEric Dumazet tp = list_entry(q, struct tcp_sock, tsq_node); 76846d3ceabSEric Dumazet list_del(&tp->tsq_node); 76946d3ceabSEric Dumazet 77046d3ceabSEric Dumazet sk = (struct sock *)tp; 7710a9648f1SEric Dumazet smp_mb__before_atomic(); 7727aa5470cSEric Dumazet clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); 7737aa5470cSEric Dumazet 774b223feb9SEric Dumazet if (!sk->sk_lock.owned && 7757aa5470cSEric Dumazet test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) { 77646d3ceabSEric Dumazet bh_lock_sock(sk); 77746d3ceabSEric Dumazet if (!sock_owned_by_user(sk)) { 7787aa5470cSEric Dumazet clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags); 7796f458dfbSEric Dumazet tcp_tsq_handler(sk); 78046d3ceabSEric Dumazet } 78146d3ceabSEric Dumazet bh_unlock_sock(sk); 782b223feb9SEric Dumazet } 78346d3ceabSEric Dumazet 78446d3ceabSEric Dumazet sk_free(sk); 78546d3ceabSEric Dumazet } 78646d3ceabSEric Dumazet } 78746d3ceabSEric Dumazet 78840fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ 78940fc3423SEric Dumazet TCPF_WRITE_TIMER_DEFERRED | \ 79040fc3423SEric Dumazet TCPF_DELACK_TIMER_DEFERRED | \ 79140fc3423SEric Dumazet TCPF_MTU_REDUCED_DEFERRED) 79246d3ceabSEric Dumazet /** 79346d3ceabSEric Dumazet * tcp_release_cb - tcp release_sock() callback 79446d3ceabSEric Dumazet * @sk: socket 79546d3ceabSEric Dumazet * 79646d3ceabSEric Dumazet * called from release_sock() to perform protocol dependent 79746d3ceabSEric Dumazet * actions before socket release. 79846d3ceabSEric Dumazet */ 79946d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk) 80046d3ceabSEric Dumazet { 8016f458dfbSEric Dumazet unsigned long flags, nflags; 80246d3ceabSEric Dumazet 8036f458dfbSEric Dumazet /* perform an atomic operation only if at least one flag is set */ 8046f458dfbSEric Dumazet do { 8057aa5470cSEric Dumazet flags = sk->sk_tsq_flags; 8066f458dfbSEric Dumazet if (!(flags & TCP_DEFERRED_ALL)) 8076f458dfbSEric Dumazet return; 8086f458dfbSEric Dumazet nflags = flags & ~TCP_DEFERRED_ALL; 8097aa5470cSEric Dumazet } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); 8106f458dfbSEric Dumazet 81140fc3423SEric Dumazet if (flags & TCPF_TSQ_DEFERRED) 8126f458dfbSEric Dumazet tcp_tsq_handler(sk); 8136f458dfbSEric Dumazet 814c3f9b018SEric Dumazet /* Here begins the tricky part : 815c3f9b018SEric Dumazet * We are called from release_sock() with : 816c3f9b018SEric Dumazet * 1) BH disabled 817c3f9b018SEric Dumazet * 2) sk_lock.slock spinlock held 818c3f9b018SEric Dumazet * 3) socket owned by us (sk->sk_lock.owned == 1) 819c3f9b018SEric Dumazet * 820c3f9b018SEric Dumazet * But following code is meant to be called from BH handlers, 821c3f9b018SEric Dumazet * so we should keep BH disabled, but early release socket ownership 822c3f9b018SEric Dumazet */ 823c3f9b018SEric Dumazet sock_release_ownership(sk); 824c3f9b018SEric Dumazet 82540fc3423SEric Dumazet if (flags & TCPF_WRITE_TIMER_DEFERRED) { 8266f458dfbSEric Dumazet tcp_write_timer_handler(sk); 827144d56e9SEric Dumazet __sock_put(sk); 828144d56e9SEric Dumazet } 82940fc3423SEric Dumazet if (flags & TCPF_DELACK_TIMER_DEFERRED) { 8306f458dfbSEric Dumazet tcp_delack_timer_handler(sk); 831144d56e9SEric Dumazet __sock_put(sk); 832144d56e9SEric Dumazet } 83340fc3423SEric Dumazet if (flags & TCPF_MTU_REDUCED_DEFERRED) { 8344fab9071SNeal Cardwell inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); 835144d56e9SEric Dumazet __sock_put(sk); 836144d56e9SEric Dumazet } 83746d3ceabSEric Dumazet } 83846d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb); 83946d3ceabSEric Dumazet 84046d3ceabSEric Dumazet void __init tcp_tasklet_init(void) 84146d3ceabSEric Dumazet { 84246d3ceabSEric Dumazet int i; 84346d3ceabSEric Dumazet 84446d3ceabSEric Dumazet for_each_possible_cpu(i) { 84546d3ceabSEric Dumazet struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 84646d3ceabSEric Dumazet 84746d3ceabSEric Dumazet INIT_LIST_HEAD(&tsq->head); 84846d3ceabSEric Dumazet tasklet_init(&tsq->tasklet, 84946d3ceabSEric Dumazet tcp_tasklet_func, 85046d3ceabSEric Dumazet (unsigned long)tsq); 85146d3ceabSEric Dumazet } 85246d3ceabSEric Dumazet } 85346d3ceabSEric Dumazet 85446d3ceabSEric Dumazet /* 85546d3ceabSEric Dumazet * Write buffer destructor automatically called from kfree_skb. 8568e3bff96Sstephen hemminger * We can't xmit new skbs from this context, as we might already 85746d3ceabSEric Dumazet * hold qdisc lock. 85846d3ceabSEric Dumazet */ 859d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb) 86046d3ceabSEric Dumazet { 86146d3ceabSEric Dumazet struct sock *sk = skb->sk; 86246d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 863408f0a6cSEric Dumazet unsigned long flags, nval, oval; 8649b462d02SEric Dumazet 8659b462d02SEric Dumazet /* Keep one reference on sk_wmem_alloc. 8669b462d02SEric Dumazet * Will be released by sk_free() from here or tcp_tasklet_func() 8679b462d02SEric Dumazet */ 86814afee4bSReshetova, Elena WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); 8699b462d02SEric Dumazet 8709b462d02SEric Dumazet /* If this softirq is serviced by ksoftirqd, we are likely under stress. 8719b462d02SEric Dumazet * Wait until our queues (qdisc + devices) are drained. 8729b462d02SEric Dumazet * This gives : 8739b462d02SEric Dumazet * - less callbacks to tcp_write_xmit(), reducing stress (batches) 8749b462d02SEric Dumazet * - chance for incoming ACK (processed by another cpu maybe) 8759b462d02SEric Dumazet * to migrate this flow (skb->ooo_okay will be eventually set) 8769b462d02SEric Dumazet */ 87714afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 8789b462d02SEric Dumazet goto out; 87946d3ceabSEric Dumazet 8807aa5470cSEric Dumazet for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { 88146d3ceabSEric Dumazet struct tsq_tasklet *tsq; 882a9b204d1SEric Dumazet bool empty; 88346d3ceabSEric Dumazet 884408f0a6cSEric Dumazet if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) 885408f0a6cSEric Dumazet goto out; 886408f0a6cSEric Dumazet 887b223feb9SEric Dumazet nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; 8887aa5470cSEric Dumazet nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); 889408f0a6cSEric Dumazet if (nval != oval) 890408f0a6cSEric Dumazet continue; 891408f0a6cSEric Dumazet 89246d3ceabSEric Dumazet /* queue this socket to tasklet queue */ 89346d3ceabSEric Dumazet local_irq_save(flags); 894903ceff7SChristoph Lameter tsq = this_cpu_ptr(&tsq_tasklet); 895a9b204d1SEric Dumazet empty = list_empty(&tsq->head); 89646d3ceabSEric Dumazet list_add(&tp->tsq_node, &tsq->head); 897a9b204d1SEric Dumazet if (empty) 89846d3ceabSEric Dumazet tasklet_schedule(&tsq->tasklet); 89946d3ceabSEric Dumazet local_irq_restore(flags); 9009b462d02SEric Dumazet return; 90146d3ceabSEric Dumazet } 9029b462d02SEric Dumazet out: 9039b462d02SEric Dumazet sk_free(sk); 90446d3ceabSEric Dumazet } 90546d3ceabSEric Dumazet 906218af599SEric Dumazet /* Note: Called under hard irq. 907218af599SEric Dumazet * We can not call TCP stack right away. 908218af599SEric Dumazet */ 909218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) 910218af599SEric Dumazet { 911218af599SEric Dumazet struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); 912218af599SEric Dumazet struct sock *sk = (struct sock *)tp; 913218af599SEric Dumazet unsigned long nval, oval; 914218af599SEric Dumazet 915218af599SEric Dumazet for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { 916218af599SEric Dumazet struct tsq_tasklet *tsq; 917218af599SEric Dumazet bool empty; 918218af599SEric Dumazet 919218af599SEric Dumazet if (oval & TSQF_QUEUED) 920218af599SEric Dumazet break; 921218af599SEric Dumazet 922218af599SEric Dumazet nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED; 923218af599SEric Dumazet nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); 924218af599SEric Dumazet if (nval != oval) 925218af599SEric Dumazet continue; 926218af599SEric Dumazet 92714afee4bSReshetova, Elena if (!refcount_inc_not_zero(&sk->sk_wmem_alloc)) 928218af599SEric Dumazet break; 929218af599SEric Dumazet /* queue this socket to tasklet queue */ 930218af599SEric Dumazet tsq = this_cpu_ptr(&tsq_tasklet); 931218af599SEric Dumazet empty = list_empty(&tsq->head); 932218af599SEric Dumazet list_add(&tp->tsq_node, &tsq->head); 933218af599SEric Dumazet if (empty) 934218af599SEric Dumazet tasklet_schedule(&tsq->tasklet); 935218af599SEric Dumazet break; 936218af599SEric Dumazet } 937218af599SEric Dumazet return HRTIMER_NORESTART; 938218af599SEric Dumazet } 939218af599SEric Dumazet 940218af599SEric Dumazet /* BBR congestion control needs pacing. 941218af599SEric Dumazet * Same remark for SO_MAX_PACING_RATE. 942218af599SEric Dumazet * sch_fq packet scheduler is efficiently handling pacing, 943218af599SEric Dumazet * but is not always installed/used. 944218af599SEric Dumazet * Return true if TCP stack should pace packets itself. 945218af599SEric Dumazet */ 946218af599SEric Dumazet static bool tcp_needs_internal_pacing(const struct sock *sk) 947218af599SEric Dumazet { 948218af599SEric Dumazet return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; 949218af599SEric Dumazet } 950218af599SEric Dumazet 951218af599SEric Dumazet static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) 952218af599SEric Dumazet { 953218af599SEric Dumazet u64 len_ns; 954218af599SEric Dumazet u32 rate; 955218af599SEric Dumazet 956218af599SEric Dumazet if (!tcp_needs_internal_pacing(sk)) 957218af599SEric Dumazet return; 958218af599SEric Dumazet rate = sk->sk_pacing_rate; 959218af599SEric Dumazet if (!rate || rate == ~0U) 960218af599SEric Dumazet return; 961218af599SEric Dumazet 962218af599SEric Dumazet /* Should account for header sizes as sch_fq does, 963218af599SEric Dumazet * but lets make things simple. 964218af599SEric Dumazet */ 965218af599SEric Dumazet len_ns = (u64)skb->len * NSEC_PER_SEC; 966218af599SEric Dumazet do_div(len_ns, rate); 967218af599SEric Dumazet hrtimer_start(&tcp_sk(sk)->pacing_timer, 968218af599SEric Dumazet ktime_add_ns(ktime_get(), len_ns), 969218af599SEric Dumazet HRTIMER_MODE_ABS_PINNED); 970218af599SEric Dumazet } 971218af599SEric Dumazet 9721da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 9731da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 9741da177e4SLinus Torvalds * transmission and possible later retransmissions. 9751da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 9761da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 9771da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 9781da177e4SLinus Torvalds * device. 9791da177e4SLinus Torvalds * 9801da177e4SLinus Torvalds * We are working here with either a clone of the original 9811da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 9821da177e4SLinus Torvalds */ 983056834d9SIlpo Järvinen static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 984056834d9SIlpo Järvinen gfp_t gfp_mask) 9851da177e4SLinus Torvalds { 9866687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 987dfb4b9dcSDavid S. Miller struct inet_sock *inet; 988dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 989dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 99033ad798cSAdam Langley struct tcp_out_options opts; 99195c96174SEric Dumazet unsigned int tcp_options_size, tcp_header_size; 992cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 9931da177e4SLinus Torvalds struct tcphdr *th; 9941da177e4SLinus Torvalds int err; 9951da177e4SLinus Torvalds 996dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 9976f094b9eSLawrence Brakmo tp = tcp_sk(sk); 998dfb4b9dcSDavid S. Miller 999385e2070SEric Dumazet skb->skb_mstamp = tp->tcp_mstamp; 1000ccdbb6e9SEric Dumazet if (clone_it) { 10016f094b9eSLawrence Brakmo TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq 10026f094b9eSLawrence Brakmo - tp->snd_una; 1003b9f64820SYuchung Cheng tcp_rate_skb_sent(sk, skb); 1004dfb4b9dcSDavid S. Miller 1005dfb4b9dcSDavid S. Miller if (unlikely(skb_cloned(skb))) 1006dfb4b9dcSDavid S. Miller skb = pskb_copy(skb, gfp_mask); 1007dfb4b9dcSDavid S. Miller else 1008dfb4b9dcSDavid S. Miller skb = skb_clone(skb, gfp_mask); 1009dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 1010dfb4b9dcSDavid S. Miller return -ENOBUFS; 1011dfb4b9dcSDavid S. Miller } 1012dfb4b9dcSDavid S. Miller 1013dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 1014dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 101533ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 10161da177e4SLinus Torvalds 10174de075e0SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 101833ad798cSAdam Langley tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 101933ad798cSAdam Langley else 102033ad798cSAdam Langley tcp_options_size = tcp_established_options(sk, skb, &opts, 102133ad798cSAdam Langley &md5); 102233ad798cSAdam Langley tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 10231da177e4SLinus Torvalds 1024547669d4SEric Dumazet /* if no packet is in qdisc/device queue, then allow XPS to select 1025b2532eb9SEric Dumazet * another queue. We can be called from tcp_tsq_handler() 1026b2532eb9SEric Dumazet * which holds one reference to sk_wmem_alloc. 1027b2532eb9SEric Dumazet * 1028b2532eb9SEric Dumazet * TODO: Ideally, in-flight pure ACK packets should not matter here. 1029b2532eb9SEric Dumazet * One way to get this would be to set skb->truesize = 2 on them. 1030547669d4SEric Dumazet */ 1031b2532eb9SEric Dumazet skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); 10321da177e4SLinus Torvalds 103338ab52e8SEric Dumazet /* If we had to use memory reserve to allocate this skb, 103438ab52e8SEric Dumazet * this might cause drops if packet is looped back : 103538ab52e8SEric Dumazet * Other socket might not have SOCK_MEMALLOC. 103638ab52e8SEric Dumazet * Packets not looped back do not care about pfmemalloc. 103738ab52e8SEric Dumazet */ 103838ab52e8SEric Dumazet skb->pfmemalloc = 0; 103938ab52e8SEric Dumazet 1040aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 1041aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 104246d3ceabSEric Dumazet 104346d3ceabSEric Dumazet skb_orphan(skb); 104446d3ceabSEric Dumazet skb->sk = sk; 10451d2077acSEric Dumazet skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 1046b73c3d0eSTom Herbert skb_set_hash_from_sk(skb, sk); 104714afee4bSReshetova, Elena refcount_add(skb->truesize, &sk->sk_wmem_alloc); 10481da177e4SLinus Torvalds 1049c3a2e837SJulian Anastasov skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); 1050c3a2e837SJulian Anastasov 10511da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 1052ea1627c2SEric Dumazet th = (struct tcphdr *)skb->data; 1053c720c7e8SEric Dumazet th->source = inet->inet_sport; 1054c720c7e8SEric Dumazet th->dest = inet->inet_dport; 10551da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 10561da177e4SLinus Torvalds th->ack_seq = htonl(tp->rcv_nxt); 1057df7a3b07SAl Viro *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 10584de075e0SEric Dumazet tcb->tcp_flags); 1059dfb4b9dcSDavid S. Miller 10601da177e4SLinus Torvalds th->check = 0; 10611da177e4SLinus Torvalds th->urg_ptr = 0; 10621da177e4SLinus Torvalds 106333f5f57eSIlpo Järvinen /* The urg_mode check is necessary during a below snd_una win probe */ 10647691367dSHerbert Xu if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 10657691367dSHerbert Xu if (before(tp->snd_up, tcb->seq + 0x10000)) { 10661da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up - tcb->seq); 10671da177e4SLinus Torvalds th->urg = 1; 10687691367dSHerbert Xu } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 10690eae88f3SEric Dumazet th->urg_ptr = htons(0xFFFF); 10707691367dSHerbert Xu th->urg = 1; 10717691367dSHerbert Xu } 10721da177e4SLinus Torvalds } 10731da177e4SLinus Torvalds 1074bd0388aeSWilliam Allen Simpson tcp_options_write((__be32 *)(th + 1), tp, &opts); 107551466a75SEric Dumazet skb_shinfo(skb)->gso_type = sk->sk_gso_type; 1076ea1627c2SEric Dumazet if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { 1077ea1627c2SEric Dumazet th->window = htons(tcp_select_window(sk)); 1078ea1627c2SEric Dumazet tcp_ecn_send(sk, skb, th, tcp_header_size); 1079ea1627c2SEric Dumazet } else { 1080ea1627c2SEric Dumazet /* RFC1323: The window in SYN & SYN/ACK segments 1081ea1627c2SEric Dumazet * is never scaled. 1082ea1627c2SEric Dumazet */ 1083ea1627c2SEric Dumazet th->window = htons(min(tp->rcv_wnd, 65535U)); 1084ea1627c2SEric Dumazet } 1085cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 1086cfb6eeb4SYOSHIFUJI Hideaki /* Calculate the MD5 hash, as we have all we need now */ 1087cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 1088a465419bSEric Dumazet sk_nocaps_add(sk, NETIF_F_GSO_MASK); 1089bd0388aeSWilliam Allen Simpson tp->af_specific->calc_md5_hash(opts.hash_location, 109039f8e58eSEric Dumazet md5, sk, skb); 1091cfb6eeb4SYOSHIFUJI Hideaki } 1092cfb6eeb4SYOSHIFUJI Hideaki #endif 1093cfb6eeb4SYOSHIFUJI Hideaki 1094bb296246SHerbert Xu icsk->icsk_af_ops->send_check(sk, skb); 10951da177e4SLinus Torvalds 10964de075e0SEric Dumazet if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1097fc6415bcSDavid S. Miller tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 10981da177e4SLinus Torvalds 1099a44d6eacSMartin KaFai Lau if (skb->len != tcp_header_size) { 1100cf533ea5SEric Dumazet tcp_event_data_sent(tp, sk); 1101a44d6eacSMartin KaFai Lau tp->data_segs_out += tcp_skb_pcount(skb); 1102218af599SEric Dumazet tcp_internal_pacing(sk, skb); 1103a44d6eacSMartin KaFai Lau } 11041da177e4SLinus Torvalds 1105bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 1106aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 1107aa2ea058STom Herbert tcp_skb_pcount(skb)); 11081da177e4SLinus Torvalds 11092efd055cSMarcelo Ricardo Leitner tp->segs_out += tcp_skb_pcount(skb); 1110f69ad292SEric Dumazet /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ 1111cd7d8498SEric Dumazet skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); 1112f69ad292SEric Dumazet skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1113cd7d8498SEric Dumazet 11147faee5c0SEric Dumazet /* Our usage of tstamp should remain private */ 11152456e855SThomas Gleixner skb->tstamp = 0; 1116971f10ecSEric Dumazet 1117971f10ecSEric Dumazet /* Cleanup our debris for IP stacks */ 1118971f10ecSEric Dumazet memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1119971f10ecSEric Dumazet sizeof(struct inet6_skb_parm))); 1120971f10ecSEric Dumazet 1121b0270e91SEric Dumazet err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 11227faee5c0SEric Dumazet 112383de47cdSHua Zhong if (likely(err <= 0)) 11241da177e4SLinus Torvalds return err; 11251da177e4SLinus Torvalds 11265ee2c941SChristoph Paasch tcp_enter_cwr(sk); 11271da177e4SLinus Torvalds 1128b9df3cb8SGerrit Renker return net_xmit_eval(err); 11291da177e4SLinus Torvalds } 11301da177e4SLinus Torvalds 113167edfef7SAndi Kleen /* This routine just queues the buffer for sending. 11321da177e4SLinus Torvalds * 11331da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 11341da177e4SLinus Torvalds * otherwise socket can stall. 11351da177e4SLinus Torvalds */ 11361da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 11371da177e4SLinus Torvalds { 11381da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 11391da177e4SLinus Torvalds 11401da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 11411da177e4SLinus Torvalds tp->write_seq = TCP_SKB_CB(skb)->end_seq; 1142f4a775d1SEric Dumazet __skb_header_release(skb); 1143fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 11443ab224beSHideo Aoki sk->sk_wmem_queued += skb->truesize; 11453ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds 114867edfef7SAndi Kleen /* Initialize TSO segments for a packet. */ 11495bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1150f6302d1dSDavid S. Miller { 11518f26fb1cSEric Dumazet if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { 1152f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 1153f6302d1dSDavid S. Miller * non-TSO case. 1154f6302d1dSDavid S. Miller */ 1155cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 1); 1156f69ad292SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_size = 0; 1157f6302d1dSDavid S. Miller } else { 1158cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); 1159f69ad292SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_size = mss_now; 11601da177e4SLinus Torvalds } 11611da177e4SLinus Torvalds } 11621da177e4SLinus Torvalds 116391fed7a1SIlpo Järvinen /* When a modification to fackets out becomes necessary, we need to check 116468f8353bSIlpo Järvinen * skb is counted to fackets_out or not. 116591fed7a1SIlpo Järvinen */ 1166cf533ea5SEric Dumazet static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 116791fed7a1SIlpo Järvinen int decr) 116891fed7a1SIlpo Järvinen { 1169a47e5a98SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1170a47e5a98SIlpo Järvinen 1171dc86967bSIlpo Järvinen if (!tp->sacked_out || tcp_is_reno(tp)) 117291fed7a1SIlpo Järvinen return; 117391fed7a1SIlpo Järvinen 11746859d494SIlpo Järvinen if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 117591fed7a1SIlpo Järvinen tp->fackets_out -= decr; 117691fed7a1SIlpo Järvinen } 117791fed7a1SIlpo Järvinen 1178797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various 1179797108d1SIlpo Järvinen * tweaks to fix counters 1180797108d1SIlpo Järvinen */ 1181cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1182797108d1SIlpo Järvinen { 1183797108d1SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1184797108d1SIlpo Järvinen 1185797108d1SIlpo Järvinen tp->packets_out -= decr; 1186797108d1SIlpo Järvinen 1187797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1188797108d1SIlpo Järvinen tp->sacked_out -= decr; 1189797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1190797108d1SIlpo Järvinen tp->retrans_out -= decr; 1191797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1192797108d1SIlpo Järvinen tp->lost_out -= decr; 1193797108d1SIlpo Järvinen 1194797108d1SIlpo Järvinen /* Reno case is special. Sigh... */ 1195797108d1SIlpo Järvinen if (tcp_is_reno(tp) && decr > 0) 1196797108d1SIlpo Järvinen tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1197797108d1SIlpo Järvinen 1198797108d1SIlpo Järvinen tcp_adjust_fackets_out(sk, skb, decr); 1199797108d1SIlpo Järvinen 1200797108d1SIlpo Järvinen if (tp->lost_skb_hint && 1201797108d1SIlpo Järvinen before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 120252cf3cc8SIlpo Järvinen (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 1203797108d1SIlpo Järvinen tp->lost_cnt_hint -= decr; 1204797108d1SIlpo Järvinen 1205797108d1SIlpo Järvinen tcp_verify_left_out(tp); 1206797108d1SIlpo Järvinen } 1207797108d1SIlpo Järvinen 12080a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb) 12090a2cf20cSSoheil Hassas Yeganeh { 12100a2cf20cSSoheil Hassas Yeganeh return TCP_SKB_CB(skb)->txstamp_ack || 12110a2cf20cSSoheil Hassas Yeganeh (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); 12120a2cf20cSSoheil Hassas Yeganeh } 12130a2cf20cSSoheil Hassas Yeganeh 1214490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) 1215490cc7d0SWillem de Bruijn { 1216490cc7d0SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 1217490cc7d0SWillem de Bruijn 12180a2cf20cSSoheil Hassas Yeganeh if (unlikely(tcp_has_tx_tstamp(skb)) && 1219490cc7d0SWillem de Bruijn !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { 1220490cc7d0SWillem de Bruijn struct skb_shared_info *shinfo2 = skb_shinfo(skb2); 1221490cc7d0SWillem de Bruijn u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; 1222490cc7d0SWillem de Bruijn 1223490cc7d0SWillem de Bruijn shinfo->tx_flags &= ~tsflags; 1224490cc7d0SWillem de Bruijn shinfo2->tx_flags |= tsflags; 1225490cc7d0SWillem de Bruijn swap(shinfo->tskey, shinfo2->tskey); 1226b51e13faSMartin KaFai Lau TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; 1227b51e13faSMartin KaFai Lau TCP_SKB_CB(skb)->txstamp_ack = 0; 1228490cc7d0SWillem de Bruijn } 1229490cc7d0SWillem de Bruijn } 1230490cc7d0SWillem de Bruijn 1231a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) 1232a166140eSMartin KaFai Lau { 1233a166140eSMartin KaFai Lau TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; 1234a166140eSMartin KaFai Lau TCP_SKB_CB(skb)->eor = 0; 1235a166140eSMartin KaFai Lau } 1236a166140eSMartin KaFai Lau 12371da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 12381da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 12391da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 12401da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 12411da177e4SLinus Torvalds */ 1242056834d9SIlpo Järvinen int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 12436cc55e09SOctavian Purdila unsigned int mss_now, gfp_t gfp) 12441da177e4SLinus Torvalds { 12451da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 12461da177e4SLinus Torvalds struct sk_buff *buff; 12476475be16SDavid S. Miller int nsize, old_factor; 1248b60b49eaSHerbert Xu int nlen; 12499ce01461SIlpo Järvinen u8 flags; 12501da177e4SLinus Torvalds 12512fceec13SIlpo Järvinen if (WARN_ON(len > skb->len)) 12522fceec13SIlpo Järvinen return -EINVAL; 12536a438bbeSStephen Hemminger 12541da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 12551da177e4SLinus Torvalds if (nsize < 0) 12561da177e4SLinus Torvalds nsize = 0; 12571da177e4SLinus Torvalds 12586cc55e09SOctavian Purdila if (skb_unclone(skb, gfp)) 12591da177e4SLinus Torvalds return -ENOMEM; 12601da177e4SLinus Torvalds 12611da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 1262eb934478SEric Dumazet buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 126351456b29SIan Morris if (!buff) 12641da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 1265ef5cb973SHerbert Xu 12663ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 12673ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1268b60b49eaSHerbert Xu nlen = skb->len - len - nsize; 1269b60b49eaSHerbert Xu buff->truesize += nlen; 1270b60b49eaSHerbert Xu skb->truesize -= nlen; 12711da177e4SLinus Torvalds 12721da177e4SLinus Torvalds /* Correct the sequence numbers. */ 12731da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 12741da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 12751da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 12761da177e4SLinus Torvalds 12771da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 12784de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 12794de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 12804de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1281e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1282a166140eSMartin KaFai Lau tcp_skb_fragment_eor(skb, buff); 12831da177e4SLinus Torvalds 128484fa7933SPatrick McHardy if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 12851da177e4SLinus Torvalds /* Copy and checksum data tail into the new buffer. */ 1286056834d9SIlpo Järvinen buff->csum = csum_partial_copy_nocheck(skb->data + len, 1287056834d9SIlpo Järvinen skb_put(buff, nsize), 12881da177e4SLinus Torvalds nsize, 0); 12891da177e4SLinus Torvalds 12901da177e4SLinus Torvalds skb_trim(skb, len); 12911da177e4SLinus Torvalds 12921da177e4SLinus Torvalds skb->csum = csum_block_sub(skb->csum, buff->csum, len); 12931da177e4SLinus Torvalds } else { 129484fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 12951da177e4SLinus Torvalds skb_split(skb, buff, len); 12961da177e4SLinus Torvalds } 12971da177e4SLinus Torvalds 12981da177e4SLinus Torvalds buff->ip_summed = skb->ip_summed; 12991da177e4SLinus Torvalds 1300a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 1301490cc7d0SWillem de Bruijn tcp_fragment_tstamp(skb, buff); 13021da177e4SLinus Torvalds 13036475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 13046475be16SDavid S. Miller 13051da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 13065bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 13075bbb432cSEric Dumazet tcp_set_skb_tso_segs(buff, mss_now); 13081da177e4SLinus Torvalds 1309b9f64820SYuchung Cheng /* Update delivered info for the new segment */ 1310b9f64820SYuchung Cheng TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; 1311b9f64820SYuchung Cheng 13126475be16SDavid S. Miller /* If this packet has been sent out already, we must 13136475be16SDavid S. Miller * adjust the various packet counters. 13146475be16SDavid S. Miller */ 1315cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 13166475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 13176475be16SDavid S. Miller tcp_skb_pcount(buff); 13181da177e4SLinus Torvalds 1319797108d1SIlpo Järvinen if (diff) 1320797108d1SIlpo Järvinen tcp_adjust_pcount(sk, skb, diff); 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds 13231da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 1324f4a775d1SEric Dumazet __skb_header_release(buff); 1325fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 13261da177e4SLinus Torvalds 13271da177e4SLinus Torvalds return 0; 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 1330f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled 1331f4d01666SEric Dumazet * data is not copied, but immediately discarded. 13321da177e4SLinus Torvalds */ 13337162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len) 13341da177e4SLinus Torvalds { 13357b7fc97aSEric Dumazet struct skb_shared_info *shinfo; 13361da177e4SLinus Torvalds int i, k, eat; 13371da177e4SLinus Torvalds 13384fa48bf3SEric Dumazet eat = min_t(int, len, skb_headlen(skb)); 13394fa48bf3SEric Dumazet if (eat) { 13404fa48bf3SEric Dumazet __skb_pull(skb, eat); 13414fa48bf3SEric Dumazet len -= eat; 13424fa48bf3SEric Dumazet if (!len) 13437162fb24SEric Dumazet return 0; 13444fa48bf3SEric Dumazet } 13451da177e4SLinus Torvalds eat = len; 13461da177e4SLinus Torvalds k = 0; 13477b7fc97aSEric Dumazet shinfo = skb_shinfo(skb); 13487b7fc97aSEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) { 13497b7fc97aSEric Dumazet int size = skb_frag_size(&shinfo->frags[i]); 13509e903e08SEric Dumazet 13519e903e08SEric Dumazet if (size <= eat) { 1352aff65da0SIan Campbell skb_frag_unref(skb, i); 13539e903e08SEric Dumazet eat -= size; 13541da177e4SLinus Torvalds } else { 13557b7fc97aSEric Dumazet shinfo->frags[k] = shinfo->frags[i]; 13561da177e4SLinus Torvalds if (eat) { 13577b7fc97aSEric Dumazet shinfo->frags[k].page_offset += eat; 13587b7fc97aSEric Dumazet skb_frag_size_sub(&shinfo->frags[k], eat); 13591da177e4SLinus Torvalds eat = 0; 13601da177e4SLinus Torvalds } 13611da177e4SLinus Torvalds k++; 13621da177e4SLinus Torvalds } 13631da177e4SLinus Torvalds } 13647b7fc97aSEric Dumazet shinfo->nr_frags = k; 13651da177e4SLinus Torvalds 13661da177e4SLinus Torvalds skb->data_len -= len; 13671da177e4SLinus Torvalds skb->len = skb->data_len; 13687162fb24SEric Dumazet return len; 13691da177e4SLinus Torvalds } 13701da177e4SLinus Torvalds 137167edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */ 13721da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 13731da177e4SLinus Torvalds { 13747162fb24SEric Dumazet u32 delta_truesize; 13757162fb24SEric Dumazet 137614bbd6a5SPravin B Shelar if (skb_unclone(skb, GFP_ATOMIC)) 13771da177e4SLinus Torvalds return -ENOMEM; 13781da177e4SLinus Torvalds 13797162fb24SEric Dumazet delta_truesize = __pskb_trim_head(skb, len); 13801da177e4SLinus Torvalds 13811da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 138284fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 13831da177e4SLinus Torvalds 13847162fb24SEric Dumazet if (delta_truesize) { 13857162fb24SEric Dumazet skb->truesize -= delta_truesize; 13867162fb24SEric Dumazet sk->sk_wmem_queued -= delta_truesize; 13877162fb24SEric Dumazet sk_mem_uncharge(sk, delta_truesize); 13881da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 13897162fb24SEric Dumazet } 13901da177e4SLinus Torvalds 13915b35e1e6SNeal Cardwell /* Any change of skb->len requires recalculation of tso factor. */ 13921da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 13935bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); 13941da177e4SLinus Torvalds 13951da177e4SLinus Torvalds return 0; 13961da177e4SLinus Torvalds } 13971da177e4SLinus Torvalds 13981b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options. */ 13991b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 14005d424d5aSJohn Heffner { 1401cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1402cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 14035d424d5aSJohn Heffner int mss_now; 14045d424d5aSJohn Heffner 14055d424d5aSJohn Heffner /* Calculate base mss without TCP options: 14065d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 14075d424d5aSJohn Heffner */ 14085d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 14095d424d5aSJohn Heffner 141067469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 141167469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 141267469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 141367469601SEric Dumazet 141467469601SEric Dumazet if (dst && dst_allfrag(dst)) 141567469601SEric Dumazet mss_now -= icsk->icsk_af_ops->net_frag_header_len; 141667469601SEric Dumazet } 141767469601SEric Dumazet 14185d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 14195d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 14205d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 14215d424d5aSJohn Heffner 14225d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 14235d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 14245d424d5aSJohn Heffner 14255d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 14265d424d5aSJohn Heffner if (mss_now < 48) 14275d424d5aSJohn Heffner mss_now = 48; 14285d424d5aSJohn Heffner return mss_now; 14295d424d5aSJohn Heffner } 14305d424d5aSJohn Heffner 14311b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here. */ 14321b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu) 14331b63edd6SYuchung Cheng { 14341b63edd6SYuchung Cheng /* Subtract TCP options size, not including SACKs */ 14351b63edd6SYuchung Cheng return __tcp_mtu_to_mss(sk, pmtu) - 14361b63edd6SYuchung Cheng (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 14371b63edd6SYuchung Cheng } 14381b63edd6SYuchung Cheng 14395d424d5aSJohn Heffner /* Inverse of above */ 144067469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss) 14415d424d5aSJohn Heffner { 1442cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1443cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 14445d424d5aSJohn Heffner int mtu; 14455d424d5aSJohn Heffner 14465d424d5aSJohn Heffner mtu = mss + 14475d424d5aSJohn Heffner tp->tcp_header_len + 14485d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 14495d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 14505d424d5aSJohn Heffner 145167469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 145267469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 145367469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 145467469601SEric Dumazet 145567469601SEric Dumazet if (dst && dst_allfrag(dst)) 145667469601SEric Dumazet mtu += icsk->icsk_af_ops->net_frag_header_len; 145767469601SEric Dumazet } 14585d424d5aSJohn Heffner return mtu; 14595d424d5aSJohn Heffner } 1460556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu); 14615d424d5aSJohn Heffner 146267edfef7SAndi Kleen /* MTU probing init per socket */ 14635d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 14645d424d5aSJohn Heffner { 14655d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 14665d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 1467b0f9ca53SFan Du struct net *net = sock_net(sk); 14685d424d5aSJohn Heffner 1469b0f9ca53SFan Du icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; 14705d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 14715d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 1472b0f9ca53SFan Du icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); 14735d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 147405cbc0dbSFan Du if (icsk->icsk_mtup.enabled) 1475c74df29aSEric Dumazet icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 14765d424d5aSJohn Heffner } 14774bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init); 14785d424d5aSJohn Heffner 14791da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 14801da177e4SLinus Torvalds 14811da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 14821da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1485caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 14861da177e4SLinus Torvalds It also does not include TCP options. 14871da177e4SLinus Torvalds 1488d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 14891da177e4SLinus Torvalds 14901da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 14911da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 14921da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 14931da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 14961da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 14971da177e4SLinus Torvalds 1498d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1499d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 15001da177e4SLinus Torvalds */ 15011da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 15021da177e4SLinus Torvalds { 15031da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1504d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 15055d424d5aSJohn Heffner int mss_now; 15061da177e4SLinus Torvalds 15075d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 15085d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 15091da177e4SLinus Torvalds 15105d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 1511409d22b4SIlpo Järvinen mss_now = tcp_bound_to_half_wnd(tp, mss_now); 15121da177e4SLinus Torvalds 15131da177e4SLinus Torvalds /* And store cached results */ 1514d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 15155d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 15165d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1517c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 15181da177e4SLinus Torvalds 15191da177e4SLinus Torvalds return mss_now; 15201da177e4SLinus Torvalds } 15214bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss); 15221da177e4SLinus Torvalds 15231da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 15241da177e4SLinus Torvalds * and even PMTU discovery events into account. 15251da177e4SLinus Torvalds */ 15260c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk) 15271da177e4SLinus Torvalds { 1528cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1529cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1530c1b4a7e6SDavid S. Miller u32 mss_now; 153195c96174SEric Dumazet unsigned int header_len; 153233ad798cSAdam Langley struct tcp_out_options opts; 153333ad798cSAdam Langley struct tcp_md5sig_key *md5; 15341da177e4SLinus Torvalds 1535c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 1536c1b4a7e6SDavid S. Miller 15371da177e4SLinus Torvalds if (dst) { 15381da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 1539d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 15401da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 15411da177e4SLinus Torvalds } 15421da177e4SLinus Torvalds 154333ad798cSAdam Langley header_len = tcp_established_options(sk, NULL, &opts, &md5) + 154433ad798cSAdam Langley sizeof(struct tcphdr); 154533ad798cSAdam Langley /* The mss_cache is sized based on tp->tcp_header_len, which assumes 154633ad798cSAdam Langley * some common options. If this is an odd packet (because we have SACK 154733ad798cSAdam Langley * blocks etc) then our calculated header_len will be different, and 154833ad798cSAdam Langley * we have to adjust mss_now correspondingly */ 154933ad798cSAdam Langley if (header_len != tp->tcp_header_len) { 155033ad798cSAdam Langley int delta = (int) header_len - tp->tcp_header_len; 155133ad798cSAdam Langley mss_now -= delta; 155233ad798cSAdam Langley } 1553cfb6eeb4SYOSHIFUJI Hideaki 15541da177e4SLinus Torvalds return mss_now; 15551da177e4SLinus Torvalds } 15561da177e4SLinus Torvalds 155786fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 155886fd14adSWeiping Pan * As additional protections, we do not touch cwnd in retransmission phases, 155986fd14adSWeiping Pan * and if application hit its sndbuf limit recently. 156086fd14adSWeiping Pan */ 156186fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk) 1562a762a980SDavid S. Miller { 15639e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1564a762a980SDavid S. Miller 156586fd14adSWeiping Pan if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 156686fd14adSWeiping Pan sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 156786fd14adSWeiping Pan /* Limited by application or receiver window. */ 156886fd14adSWeiping Pan u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 156986fd14adSWeiping Pan u32 win_used = max(tp->snd_cwnd_used, init_win); 157086fd14adSWeiping Pan if (win_used < tp->snd_cwnd) { 157186fd14adSWeiping Pan tp->snd_ssthresh = tcp_current_ssthresh(sk); 157286fd14adSWeiping Pan tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 157386fd14adSWeiping Pan } 157486fd14adSWeiping Pan tp->snd_cwnd_used = 0; 157586fd14adSWeiping Pan } 1576c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 157786fd14adSWeiping Pan } 157886fd14adSWeiping Pan 1579ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) 1580a762a980SDavid S. Miller { 15811b1fc3fdSWei Wang const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1582a762a980SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1583a762a980SDavid S. Miller 1584ca8a2263SNeal Cardwell /* Track the maximum number of outstanding packets in each 1585ca8a2263SNeal Cardwell * window, and remember whether we were cwnd-limited then. 1586ca8a2263SNeal Cardwell */ 1587ca8a2263SNeal Cardwell if (!before(tp->snd_una, tp->max_packets_seq) || 1588ca8a2263SNeal Cardwell tp->packets_out > tp->max_packets_out) { 1589ca8a2263SNeal Cardwell tp->max_packets_out = tp->packets_out; 1590ca8a2263SNeal Cardwell tp->max_packets_seq = tp->snd_nxt; 1591ca8a2263SNeal Cardwell tp->is_cwnd_limited = is_cwnd_limited; 1592ca8a2263SNeal Cardwell } 1593e114a710SEric Dumazet 159424901551SEric Dumazet if (tcp_is_cwnd_limited(sk)) { 1595a762a980SDavid S. Miller /* Network is feed fully. */ 1596a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 1597c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 1598a762a980SDavid S. Miller } else { 1599a762a980SDavid S. Miller /* Network starves. */ 1600a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 1601a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 1602a762a980SDavid S. Miller 160315d33c07SDavid S. Miller if (sysctl_tcp_slow_start_after_idle && 1604c2203cf7SEric Dumazet (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && 16051b1fc3fdSWei Wang !ca_ops->cong_control) 1606a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 1607b0f71bd3SFrancis Yan 1608b0f71bd3SFrancis Yan /* The following conditions together indicate the starvation 1609b0f71bd3SFrancis Yan * is caused by insufficient sender buffer: 1610b0f71bd3SFrancis Yan * 1) just sent some data (see tcp_write_xmit) 1611b0f71bd3SFrancis Yan * 2) not cwnd limited (this else condition) 1612b0f71bd3SFrancis Yan * 3) no more data to send (null tcp_send_head ) 1613b0f71bd3SFrancis Yan * 4) application is hitting buffer limit (SOCK_NOSPACE) 1614b0f71bd3SFrancis Yan */ 1615b0f71bd3SFrancis Yan if (!tcp_send_head(sk) && sk->sk_socket && 1616b0f71bd3SFrancis Yan test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && 1617b0f71bd3SFrancis Yan (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 1618b0f71bd3SFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); 1619a762a980SDavid S. Miller } 1620a762a980SDavid S. Miller } 1621a762a980SDavid S. Miller 1622d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */ 1623d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp) 1624d4589926SEric Dumazet { 1625d4589926SEric Dumazet return after(tp->snd_sml, tp->snd_una) && 1626d4589926SEric Dumazet !after(tp->snd_sml, tp->snd_nxt); 1627d4589926SEric Dumazet } 1628d4589926SEric Dumazet 1629d4589926SEric Dumazet /* Update snd_sml if this skb is under mss 1630d4589926SEric Dumazet * Note that a TSO packet might end with a sub-mss segment 1631d4589926SEric Dumazet * The test is really : 1632d4589926SEric Dumazet * if ((skb->len % mss) != 0) 1633d4589926SEric Dumazet * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1634d4589926SEric Dumazet * But we can avoid doing the divide again given we already have 1635d4589926SEric Dumazet * skb_pcount = skb->len / mss_now 16360e3a4803SIlpo Järvinen */ 1637d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, 1638d4589926SEric Dumazet const struct sk_buff *skb) 1639d4589926SEric Dumazet { 1640d4589926SEric Dumazet if (skb->len < tcp_skb_pcount(skb) * mss_now) 1641d4589926SEric Dumazet tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1642d4589926SEric Dumazet } 1643d4589926SEric Dumazet 1644d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules: 1645d4589926SEric Dumazet * 1. It is full sized. (provided by caller in %partial bool) 1646d4589926SEric Dumazet * 2. Or it contains FIN. (already checked by caller) 1647d4589926SEric Dumazet * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1648d4589926SEric Dumazet * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1649d4589926SEric Dumazet * With Minshall's modification: all sent small packets are ACKed. 1650d4589926SEric Dumazet */ 1651d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, 1652cc93fc51SPeter Pan(潘卫平) int nonagle) 1653d4589926SEric Dumazet { 1654d4589926SEric Dumazet return partial && 1655d4589926SEric Dumazet ((nonagle & TCP_NAGLE_CORK) || 1656d4589926SEric Dumazet (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1657d4589926SEric Dumazet } 1658605ad7f1SEric Dumazet 1659605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet, 1660605ad7f1SEric Dumazet * to send one TSO packet per ms 1661605ad7f1SEric Dumazet */ 16621b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, 16631b3878caSNeal Cardwell int min_tso_segs) 1664605ad7f1SEric Dumazet { 1665605ad7f1SEric Dumazet u32 bytes, segs; 1666605ad7f1SEric Dumazet 1667605ad7f1SEric Dumazet bytes = min(sk->sk_pacing_rate >> 10, 1668605ad7f1SEric Dumazet sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); 1669605ad7f1SEric Dumazet 1670605ad7f1SEric Dumazet /* Goal is to send at least one packet per ms, 1671605ad7f1SEric Dumazet * not one big TSO packet every 100 ms. 1672605ad7f1SEric Dumazet * This preserves ACK clocking and is consistent 1673605ad7f1SEric Dumazet * with tcp_tso_should_defer() heuristic. 1674605ad7f1SEric Dumazet */ 16751b3878caSNeal Cardwell segs = max_t(u32, bytes / mss_now, min_tso_segs); 1676605ad7f1SEric Dumazet 1677605ad7f1SEric Dumazet return min_t(u32, segs, sk->sk_gso_max_segs); 1678605ad7f1SEric Dumazet } 16791b3878caSNeal Cardwell EXPORT_SYMBOL(tcp_tso_autosize); 1680605ad7f1SEric Dumazet 1681ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting. 1682ed6e7268SNeal Cardwell * See if congestion control module wants to decide; otherwise, autosize. 1683ed6e7268SNeal Cardwell */ 1684ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) 1685ed6e7268SNeal Cardwell { 1686ed6e7268SNeal Cardwell const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1687ed6e7268SNeal Cardwell u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; 1688ed6e7268SNeal Cardwell 16891b3878caSNeal Cardwell return tso_segs ? : 16901b3878caSNeal Cardwell tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs); 1691ed6e7268SNeal Cardwell } 1692ed6e7268SNeal Cardwell 1693d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */ 1694d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, 1695d4589926SEric Dumazet const struct sk_buff *skb, 1696d4589926SEric Dumazet unsigned int mss_now, 1697d4589926SEric Dumazet unsigned int max_segs, 1698d4589926SEric Dumazet int nonagle) 1699c1b4a7e6SDavid S. Miller { 1700cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1701d4589926SEric Dumazet u32 partial, needed, window, max_len; 1702c1b4a7e6SDavid S. Miller 170390840defSIlpo Järvinen window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 17041485348dSBen Hutchings max_len = mss_now * max_segs; 17050e3a4803SIlpo Järvinen 17061485348dSBen Hutchings if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 17071485348dSBen Hutchings return max_len; 17080e3a4803SIlpo Järvinen 17095ea3a748SIlpo Järvinen needed = min(skb->len, window); 17105ea3a748SIlpo Järvinen 17111485348dSBen Hutchings if (max_len <= needed) 17121485348dSBen Hutchings return max_len; 17130e3a4803SIlpo Järvinen 1714d4589926SEric Dumazet partial = needed % mss_now; 1715d4589926SEric Dumazet /* If last segment is not a full MSS, check if Nagle rules allow us 1716d4589926SEric Dumazet * to include this last segment in this skb. 1717d4589926SEric Dumazet * Otherwise, we'll split the skb at last MSS boundary 1718d4589926SEric Dumazet */ 1719cc93fc51SPeter Pan(潘卫平) if (tcp_nagle_check(partial != 0, tp, nonagle)) 1720d4589926SEric Dumazet return needed - partial; 1721d4589926SEric Dumazet 1722d4589926SEric Dumazet return needed; 1723c1b4a7e6SDavid S. Miller } 1724c1b4a7e6SDavid S. Miller 1725c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 1726c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 1727c1b4a7e6SDavid S. Miller */ 1728cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1729cf533ea5SEric Dumazet const struct sk_buff *skb) 1730c1b4a7e6SDavid S. Miller { 1731d649a7a8SEric Dumazet u32 in_flight, cwnd, halfcwnd; 1732c1b4a7e6SDavid S. Miller 1733c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 17344de075e0SEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 17354de075e0SEric Dumazet tcp_skb_pcount(skb) == 1) 1736c1b4a7e6SDavid S. Miller return 1; 1737c1b4a7e6SDavid S. Miller 1738c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1739c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 1740d649a7a8SEric Dumazet if (in_flight >= cwnd) 1741c1b4a7e6SDavid S. Miller return 0; 1742d649a7a8SEric Dumazet 1743d649a7a8SEric Dumazet /* For better scheduling, ensure we have at least 1744d649a7a8SEric Dumazet * 2 GSO packets in flight. 1745d649a7a8SEric Dumazet */ 1746d649a7a8SEric Dumazet halfcwnd = max(cwnd >> 1, 1U); 1747d649a7a8SEric Dumazet return min(halfcwnd, cwnd - in_flight); 1748c1b4a7e6SDavid S. Miller } 1749c1b4a7e6SDavid S. Miller 1750b595076aSUwe Kleine-König /* Initialize TSO state of a skb. 175167edfef7SAndi Kleen * This must be invoked the first time we consider transmitting 1752c1b4a7e6SDavid S. Miller * SKB onto the wire. 1753c1b4a7e6SDavid S. Miller */ 17545bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1755c1b4a7e6SDavid S. Miller { 1756c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 1757c1b4a7e6SDavid S. Miller 1758f8269a49SIlpo Järvinen if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 17595bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 1760c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 1761c1b4a7e6SDavid S. Miller } 1762c1b4a7e6SDavid S. Miller return tso_segs; 1763c1b4a7e6SDavid S. Miller } 1764c1b4a7e6SDavid S. Miller 1765c1b4a7e6SDavid S. Miller 1766a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be 1767c1b4a7e6SDavid S. Miller * sent now. 1768c1b4a7e6SDavid S. Miller */ 1769a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1770c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1771c1b4a7e6SDavid S. Miller { 1772c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 1773c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 1774c1b4a7e6SDavid S. Miller * 1775c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 1776c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 1777c1b4a7e6SDavid S. Miller */ 1778c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 1779a2a385d6SEric Dumazet return true; 1780c1b4a7e6SDavid S. Miller 17819b44190dSYuchung Cheng /* Don't use the nagle rule for urgent data (or for the final FIN). */ 17829b44190dSYuchung Cheng if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1783a2a385d6SEric Dumazet return true; 1784c1b4a7e6SDavid S. Miller 1785cc93fc51SPeter Pan(潘卫平) if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) 1786a2a385d6SEric Dumazet return true; 1787c1b4a7e6SDavid S. Miller 1788a2a385d6SEric Dumazet return false; 1789c1b4a7e6SDavid S. Miller } 1790c1b4a7e6SDavid S. Miller 1791c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 1792a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1793a2a385d6SEric Dumazet const struct sk_buff *skb, 1794056834d9SIlpo Järvinen unsigned int cur_mss) 1795c1b4a7e6SDavid S. Miller { 1796c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1797c1b4a7e6SDavid S. Miller 1798c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 1799c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1800c1b4a7e6SDavid S. Miller 180190840defSIlpo Järvinen return !after(end_seq, tcp_wnd_end(tp)); 1802c1b4a7e6SDavid S. Miller } 1803c1b4a7e6SDavid S. Miller 1804fe067e8aSDavid S. Miller /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1805c1b4a7e6SDavid S. Miller * should be put on the wire right now. If so, it returns the number of 1806c1b4a7e6SDavid S. Miller * packets allowed by the congestion window. 1807c1b4a7e6SDavid S. Miller */ 1808cf533ea5SEric Dumazet static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1809c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1810c1b4a7e6SDavid S. Miller { 1811cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1812c1b4a7e6SDavid S. Miller unsigned int cwnd_quota; 1813c1b4a7e6SDavid S. Miller 18145bbb432cSEric Dumazet tcp_init_tso_segs(skb, cur_mss); 1815c1b4a7e6SDavid S. Miller 1816c1b4a7e6SDavid S. Miller if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1817c1b4a7e6SDavid S. Miller return 0; 1818c1b4a7e6SDavid S. Miller 1819c1b4a7e6SDavid S. Miller cwnd_quota = tcp_cwnd_test(tp, skb); 1820056834d9SIlpo Järvinen if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1821c1b4a7e6SDavid S. Miller cwnd_quota = 0; 1822c1b4a7e6SDavid S. Miller 1823c1b4a7e6SDavid S. Miller return cwnd_quota; 1824c1b4a7e6SDavid S. Miller } 1825c1b4a7e6SDavid S. Miller 182667edfef7SAndi Kleen /* Test if sending is allowed right now. */ 1827a2a385d6SEric Dumazet bool tcp_may_send_now(struct sock *sk) 1828c1b4a7e6SDavid S. Miller { 1829cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1830fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 1831c1b4a7e6SDavid S. Miller 1832a02cec21SEric Dumazet return skb && 18330c54b85fSIlpo Järvinen tcp_snd_test(sk, skb, tcp_current_mss(sk), 1834c1b4a7e6SDavid S. Miller (tcp_skb_is_last(sk, skb) ? 1835a02cec21SEric Dumazet tp->nonagle : TCP_NAGLE_PUSH)); 1836c1b4a7e6SDavid S. Miller } 1837c1b4a7e6SDavid S. Miller 1838c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1839c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 1840c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 1841c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 1842c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 1843c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 1844c1b4a7e6SDavid S. Miller */ 1845056834d9SIlpo Järvinen static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1846c4ead4c5SEric Dumazet unsigned int mss_now, gfp_t gfp) 1847c1b4a7e6SDavid S. Miller { 1848c1b4a7e6SDavid S. Miller struct sk_buff *buff; 1849c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 18509ce01461SIlpo Järvinen u8 flags; 1851c1b4a7e6SDavid S. Miller 1852c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 1853c8ac3774SHerbert Xu if (skb->len != skb->data_len) 18546cc55e09SOctavian Purdila return tcp_fragment(sk, skb, len, mss_now, gfp); 1855c1b4a7e6SDavid S. Miller 1856eb934478SEric Dumazet buff = sk_stream_alloc_skb(sk, 0, gfp, true); 185751456b29SIan Morris if (unlikely(!buff)) 1858c1b4a7e6SDavid S. Miller return -ENOMEM; 1859c1b4a7e6SDavid S. Miller 18603ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 18613ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1862b60b49eaSHerbert Xu buff->truesize += nlen; 1863c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 1864c1b4a7e6SDavid S. Miller 1865c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 1866c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1867c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1868c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1869c1b4a7e6SDavid S. Miller 1870c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 18714de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 18724de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 18734de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1874c1b4a7e6SDavid S. Miller 1875c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 1876c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 1877c1b4a7e6SDavid S. Miller 1878a166140eSMartin KaFai Lau tcp_skb_fragment_eor(skb, buff); 1879a166140eSMartin KaFai Lau 188084fa7933SPatrick McHardy buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1881c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 1882490cc7d0SWillem de Bruijn tcp_fragment_tstamp(skb, buff); 1883c1b4a7e6SDavid S. Miller 1884c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 18855bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 18865bbb432cSEric Dumazet tcp_set_skb_tso_segs(buff, mss_now); 1887c1b4a7e6SDavid S. Miller 1888c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 1889f4a775d1SEric Dumazet __skb_header_release(buff); 1890fe067e8aSDavid S. Miller tcp_insert_write_queue_after(skb, buff, sk); 1891c1b4a7e6SDavid S. Miller 1892c1b4a7e6SDavid S. Miller return 0; 1893c1b4a7e6SDavid S. Miller } 1894c1b4a7e6SDavid S. Miller 1895c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 1896c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 1897c1b4a7e6SDavid S. Miller * 1898c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 1899c1b4a7e6SDavid S. Miller */ 1900ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 1901605ad7f1SEric Dumazet bool *is_cwnd_limited, u32 max_segs) 1902c1b4a7e6SDavid S. Miller { 19036687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 190450c8339eSEric Dumazet u32 age, send_win, cong_win, limit, in_flight; 190550c8339eSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 190650c8339eSEric Dumazet struct sk_buff *head; 1907ad9f4f50SEric Dumazet int win_divisor; 1908c1b4a7e6SDavid S. Miller 19094de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1910ae8064acSJohn Heffner goto send_now; 1911c1b4a7e6SDavid S. Miller 191299d7662aSEric Dumazet if (icsk->icsk_ca_state >= TCP_CA_Recovery) 1913ae8064acSJohn Heffner goto send_now; 1914ae8064acSJohn Heffner 19155f852eb5SEric Dumazet /* Avoid bursty behavior by allowing defer 19165f852eb5SEric Dumazet * only if the last write was recent. 19175f852eb5SEric Dumazet */ 1918d635fbe2SEric Dumazet if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0) 1919ae8064acSJohn Heffner goto send_now; 1920908a75c1SDavid S. Miller 1921c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1922c1b4a7e6SDavid S. Miller 1923056834d9SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1924c1b4a7e6SDavid S. Miller 192590840defSIlpo Järvinen send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1926c1b4a7e6SDavid S. Miller 1927c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 1928c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1929c1b4a7e6SDavid S. Miller 1930c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 1931c1b4a7e6SDavid S. Miller 1932ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 1933605ad7f1SEric Dumazet if (limit >= max_segs * tp->mss_cache) 1934ae8064acSJohn Heffner goto send_now; 1935ba244fe9SDavid S. Miller 193662ad2761SIlpo Järvinen /* Middle in queue won't get any more data, full sendable already? */ 193762ad2761SIlpo Järvinen if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 193862ad2761SIlpo Järvinen goto send_now; 193962ad2761SIlpo Järvinen 1940ad9f4f50SEric Dumazet win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1941ad9f4f50SEric Dumazet if (win_divisor) { 1942c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1943c1b4a7e6SDavid S. Miller 1944c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 1945c1b4a7e6SDavid S. Miller * just use it. 1946c1b4a7e6SDavid S. Miller */ 1947ad9f4f50SEric Dumazet chunk /= win_divisor; 1948c1b4a7e6SDavid S. Miller if (limit >= chunk) 1949ae8064acSJohn Heffner goto send_now; 1950c1b4a7e6SDavid S. Miller } else { 1951c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 1952c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 1953c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 1954c1b4a7e6SDavid S. Miller * then send now. 1955c1b4a7e6SDavid S. Miller */ 19566b5a5c0dSNeal Cardwell if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1957ae8064acSJohn Heffner goto send_now; 1958c1b4a7e6SDavid S. Miller } 1959c1b4a7e6SDavid S. Miller 196050c8339eSEric Dumazet head = tcp_write_queue_head(sk); 1961385e2070SEric Dumazet 19629a568de4SEric Dumazet age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); 196350c8339eSEric Dumazet /* If next ACK is likely to come too late (half srtt), do not defer */ 196450c8339eSEric Dumazet if (age < (tp->srtt_us >> 4)) 196550c8339eSEric Dumazet goto send_now; 196650c8339eSEric Dumazet 19675f852eb5SEric Dumazet /* Ok, it looks like it is advisable to defer. */ 1968ae8064acSJohn Heffner 1969d2e1339fSBendik Rønning Opstad if (cong_win < send_win && cong_win <= skb->len) 1970ca8a2263SNeal Cardwell *is_cwnd_limited = true; 1971ca8a2263SNeal Cardwell 1972a2a385d6SEric Dumazet return true; 1973ae8064acSJohn Heffner 1974ae8064acSJohn Heffner send_now: 1975a2a385d6SEric Dumazet return false; 1976c1b4a7e6SDavid S. Miller } 1977c1b4a7e6SDavid S. Miller 197805cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk) 197905cbc0dbSFan Du { 198005cbc0dbSFan Du struct inet_connection_sock *icsk = inet_csk(sk); 198105cbc0dbSFan Du struct tcp_sock *tp = tcp_sk(sk); 198205cbc0dbSFan Du struct net *net = sock_net(sk); 198305cbc0dbSFan Du u32 interval; 198405cbc0dbSFan Du s32 delta; 198505cbc0dbSFan Du 198605cbc0dbSFan Du interval = net->ipv4.sysctl_tcp_probe_interval; 1987c74df29aSEric Dumazet delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; 198805cbc0dbSFan Du if (unlikely(delta >= interval * HZ)) { 198905cbc0dbSFan Du int mss = tcp_current_mss(sk); 199005cbc0dbSFan Du 199105cbc0dbSFan Du /* Update current search range */ 199205cbc0dbSFan Du icsk->icsk_mtup.probe_size = 0; 199305cbc0dbSFan Du icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + 199405cbc0dbSFan Du sizeof(struct tcphdr) + 199505cbc0dbSFan Du icsk->icsk_af_ops->net_header_len; 199605cbc0dbSFan Du icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 199705cbc0dbSFan Du 199805cbc0dbSFan Du /* Update probe time stamp */ 1999c74df29aSEric Dumazet icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 200005cbc0dbSFan Du } 200105cbc0dbSFan Du } 200205cbc0dbSFan Du 20035d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 200467edfef7SAndi Kleen * MTU probe is regularly attempting to increase the path MTU by 200567edfef7SAndi Kleen * deliberately sending larger packets. This discovers routing 200667edfef7SAndi Kleen * changes resulting in larger path MTUs. 200767edfef7SAndi Kleen * 20085d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 20095d424d5aSJohn Heffner * 1 if a probe was sent, 2010056834d9SIlpo Järvinen * -1 otherwise 2011056834d9SIlpo Järvinen */ 20125d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 20135d424d5aSJohn Heffner { 20145d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 201512a59abcSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 20165d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 20176b58e0a5SFan Du struct net *net = sock_net(sk); 20185d424d5aSJohn Heffner int probe_size; 201991cc17c0SIlpo Järvinen int size_needed; 202012a59abcSEric Dumazet int copy, len; 20215d424d5aSJohn Heffner int mss_now; 20226b58e0a5SFan Du int interval; 20235d424d5aSJohn Heffner 20245d424d5aSJohn Heffner /* Not currently probing/verifying, 20255d424d5aSJohn Heffner * not in recovery, 20265d424d5aSJohn Heffner * have enough cwnd, and 202712a59abcSEric Dumazet * not SACKing (the variable headers throw things off) 202812a59abcSEric Dumazet */ 202912a59abcSEric Dumazet if (likely(!icsk->icsk_mtup.enabled || 20305d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 20315d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 20325d424d5aSJohn Heffner tp->snd_cwnd < 11 || 203312a59abcSEric Dumazet tp->rx_opt.num_sacks || tp->rx_opt.dsack)) 20345d424d5aSJohn Heffner return -1; 20355d424d5aSJohn Heffner 20366b58e0a5SFan Du /* Use binary search for probe_size between tcp_mss_base, 20376b58e0a5SFan Du * and current mss_clamp. if (search_high - search_low) 20386b58e0a5SFan Du * smaller than a threshold, backoff from probing. 20396b58e0a5SFan Du */ 20400c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 20416b58e0a5SFan Du probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + 20426b58e0a5SFan Du icsk->icsk_mtup.search_low) >> 1); 204391cc17c0SIlpo Järvinen size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 20446b58e0a5SFan Du interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; 204505cbc0dbSFan Du /* When misfortune happens, we are reprobing actively, 204605cbc0dbSFan Du * and then reprobe timer has expired. We stick with current 204705cbc0dbSFan Du * probing process by not resetting search range to its orignal. 204805cbc0dbSFan Du */ 20496b58e0a5SFan Du if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || 205005cbc0dbSFan Du interval < net->ipv4.sysctl_tcp_probe_threshold) { 205105cbc0dbSFan Du /* Check whether enough time has elaplased for 205205cbc0dbSFan Du * another round of probing. 205305cbc0dbSFan Du */ 205405cbc0dbSFan Du tcp_mtu_check_reprobe(sk); 20555d424d5aSJohn Heffner return -1; 20565d424d5aSJohn Heffner } 20575d424d5aSJohn Heffner 20585d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 20597f9c33e5SIlpo Järvinen if (tp->write_seq - tp->snd_nxt < size_needed) 20605d424d5aSJohn Heffner return -1; 20615d424d5aSJohn Heffner 206291cc17c0SIlpo Järvinen if (tp->snd_wnd < size_needed) 20635d424d5aSJohn Heffner return -1; 206490840defSIlpo Järvinen if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 20655d424d5aSJohn Heffner return 0; 20665d424d5aSJohn Heffner 2067d67c58e9SIlpo Järvinen /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 2068d67c58e9SIlpo Järvinen if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 2069d67c58e9SIlpo Järvinen if (!tcp_packets_in_flight(tp)) 20705d424d5aSJohn Heffner return -1; 20715d424d5aSJohn Heffner else 20725d424d5aSJohn Heffner return 0; 20735d424d5aSJohn Heffner } 20745d424d5aSJohn Heffner 20755d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 2076eb934478SEric Dumazet nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); 207751456b29SIan Morris if (!nskb) 20785d424d5aSJohn Heffner return -1; 20793ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 20803ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 20815d424d5aSJohn Heffner 2082fe067e8aSDavid S. Miller skb = tcp_send_head(sk); 20835d424d5aSJohn Heffner 20845d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 20855d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 20864de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 20875d424d5aSJohn Heffner TCP_SKB_CB(nskb)->sacked = 0; 20885d424d5aSJohn Heffner nskb->csum = 0; 208984fa7933SPatrick McHardy nskb->ip_summed = skb->ip_summed; 20905d424d5aSJohn Heffner 209150c4817eSIlpo Järvinen tcp_insert_write_queue_before(nskb, skb, sk); 209250c4817eSIlpo Järvinen 20935d424d5aSJohn Heffner len = 0; 2094234b6860SIlpo Järvinen tcp_for_write_queue_from_safe(skb, next, sk) { 20955d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 20962fe664f1SDouglas Caetano dos Santos if (nskb->ip_summed) { 20975d424d5aSJohn Heffner skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 20982fe664f1SDouglas Caetano dos Santos } else { 20992fe664f1SDouglas Caetano dos Santos __wsum csum = skb_copy_and_csum_bits(skb, 0, 2100056834d9SIlpo Järvinen skb_put(nskb, copy), 21012fe664f1SDouglas Caetano dos Santos copy, 0); 21022fe664f1SDouglas Caetano dos Santos nskb->csum = csum_block_add(nskb->csum, csum, len); 21032fe664f1SDouglas Caetano dos Santos } 21045d424d5aSJohn Heffner 21055d424d5aSJohn Heffner if (skb->len <= copy) { 21065d424d5aSJohn Heffner /* We've eaten all the data from this skb. 21075d424d5aSJohn Heffner * Throw it away. */ 21084de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2109fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 21103ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 21115d424d5aSJohn Heffner } else { 21124de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 2113a3433f35SChangli Gao ~(TCPHDR_FIN|TCPHDR_PSH); 21145d424d5aSJohn Heffner if (!skb_shinfo(skb)->nr_frags) { 21155d424d5aSJohn Heffner skb_pull(skb, copy); 211684fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 2117056834d9SIlpo Järvinen skb->csum = csum_partial(skb->data, 2118056834d9SIlpo Järvinen skb->len, 0); 21195d424d5aSJohn Heffner } else { 21205d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 21215bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 21225d424d5aSJohn Heffner } 21235d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 21245d424d5aSJohn Heffner } 21255d424d5aSJohn Heffner 21265d424d5aSJohn Heffner len += copy; 2127234b6860SIlpo Järvinen 2128234b6860SIlpo Järvinen if (len >= probe_size) 2129234b6860SIlpo Järvinen break; 21305d424d5aSJohn Heffner } 21315bbb432cSEric Dumazet tcp_init_tso_segs(nskb, nskb->len); 21325d424d5aSJohn Heffner 21335d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 21347faee5c0SEric Dumazet * be resegmented into mss-sized pieces by tcp_write_xmit(). 21357faee5c0SEric Dumazet */ 21365d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 21375d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 21385d424d5aSJohn Heffner * effectively two packets. */ 21395d424d5aSJohn Heffner tp->snd_cwnd--; 214066f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, nskb); 21415d424d5aSJohn Heffner 21425d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 21430e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 21440e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 21455d424d5aSJohn Heffner 21465d424d5aSJohn Heffner return 1; 21475d424d5aSJohn Heffner } 21485d424d5aSJohn Heffner 21495d424d5aSJohn Heffner return -1; 21505d424d5aSJohn Heffner } 21515d424d5aSJohn Heffner 2152218af599SEric Dumazet static bool tcp_pacing_check(const struct sock *sk) 2153218af599SEric Dumazet { 2154218af599SEric Dumazet return tcp_needs_internal_pacing(sk) && 2155218af599SEric Dumazet hrtimer_active(&tcp_sk(sk)->pacing_timer); 2156218af599SEric Dumazet } 2157218af599SEric Dumazet 2158f9616c35SEric Dumazet /* TCP Small Queues : 2159f9616c35SEric Dumazet * Control number of packets in qdisc/devices to two packets / or ~1 ms. 2160f9616c35SEric Dumazet * (These limits are doubled for retransmits) 2161f9616c35SEric Dumazet * This allows for : 2162f9616c35SEric Dumazet * - better RTT estimation and ACK scheduling 2163f9616c35SEric Dumazet * - faster recovery 2164f9616c35SEric Dumazet * - high rates 2165f9616c35SEric Dumazet * Alas, some drivers / subsystems require a fair amount 2166f9616c35SEric Dumazet * of queued bytes to ensure line rate. 2167f9616c35SEric Dumazet * One example is wifi aggregation (802.11 AMPDU) 2168f9616c35SEric Dumazet */ 2169f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, 2170f9616c35SEric Dumazet unsigned int factor) 2171f9616c35SEric Dumazet { 2172f9616c35SEric Dumazet unsigned int limit; 2173f9616c35SEric Dumazet 2174f9616c35SEric Dumazet limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); 2175f9616c35SEric Dumazet limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes); 2176f9616c35SEric Dumazet limit <<= factor; 2177f9616c35SEric Dumazet 217814afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > limit) { 217975eefc6cSEric Dumazet /* Always send the 1st or 2nd skb in write queue. 218075eefc6cSEric Dumazet * No need to wait for TX completion to call us back, 218175eefc6cSEric Dumazet * after softirq/tasklet schedule. 218275eefc6cSEric Dumazet * This helps when TX completions are delayed too much. 218375eefc6cSEric Dumazet */ 218475eefc6cSEric Dumazet if (skb == sk->sk_write_queue.next || 218575eefc6cSEric Dumazet skb->prev == sk->sk_write_queue.next) 218675eefc6cSEric Dumazet return false; 218775eefc6cSEric Dumazet 21887aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 2189f9616c35SEric Dumazet /* It is possible TX completion already happened 2190f9616c35SEric Dumazet * before we set TSQ_THROTTLED, so we must 2191f9616c35SEric Dumazet * test again the condition. 2192f9616c35SEric Dumazet */ 2193f9616c35SEric Dumazet smp_mb__after_atomic(); 219414afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > limit) 2195f9616c35SEric Dumazet return true; 2196f9616c35SEric Dumazet } 2197f9616c35SEric Dumazet return false; 2198f9616c35SEric Dumazet } 2199f9616c35SEric Dumazet 220005b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) 220105b055e8SFrancis Yan { 2202628174ccSEric Dumazet const u32 now = tcp_jiffies32; 220305b055e8SFrancis Yan 220405b055e8SFrancis Yan if (tp->chrono_type > TCP_CHRONO_UNSPEC) 220505b055e8SFrancis Yan tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start; 220605b055e8SFrancis Yan tp->chrono_start = now; 220705b055e8SFrancis Yan tp->chrono_type = new; 220805b055e8SFrancis Yan } 220905b055e8SFrancis Yan 221005b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) 221105b055e8SFrancis Yan { 221205b055e8SFrancis Yan struct tcp_sock *tp = tcp_sk(sk); 221305b055e8SFrancis Yan 221405b055e8SFrancis Yan /* If there are multiple conditions worthy of tracking in a 22150f87230dSFrancis Yan * chronograph then the highest priority enum takes precedence 22160f87230dSFrancis Yan * over the other conditions. So that if something "more interesting" 221705b055e8SFrancis Yan * starts happening, stop the previous chrono and start a new one. 221805b055e8SFrancis Yan */ 221905b055e8SFrancis Yan if (type > tp->chrono_type) 222005b055e8SFrancis Yan tcp_chrono_set(tp, type); 222105b055e8SFrancis Yan } 222205b055e8SFrancis Yan 222305b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) 222405b055e8SFrancis Yan { 222505b055e8SFrancis Yan struct tcp_sock *tp = tcp_sk(sk); 222605b055e8SFrancis Yan 22270f87230dSFrancis Yan 22280f87230dSFrancis Yan /* There are multiple conditions worthy of tracking in a 22290f87230dSFrancis Yan * chronograph, so that the highest priority enum takes 22300f87230dSFrancis Yan * precedence over the other conditions (see tcp_chrono_start). 22310f87230dSFrancis Yan * If a condition stops, we only stop chrono tracking if 22320f87230dSFrancis Yan * it's the "most interesting" or current chrono we are 22330f87230dSFrancis Yan * tracking and starts busy chrono if we have pending data. 22340f87230dSFrancis Yan */ 22350f87230dSFrancis Yan if (tcp_write_queue_empty(sk)) 223605b055e8SFrancis Yan tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); 22370f87230dSFrancis Yan else if (type == tp->chrono_type) 22380f87230dSFrancis Yan tcp_chrono_set(tp, TCP_CHRONO_BUSY); 223905b055e8SFrancis Yan } 224005b055e8SFrancis Yan 22411da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 22421da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 22431da177e4SLinus Torvalds * window for us. 22441da177e4SLinus Torvalds * 2245f8269a49SIlpo Järvinen * LARGESEND note: !tcp_urg_mode is overkill, only frames between 2246f8269a49SIlpo Järvinen * snd_up-64k-mss .. snd_up cannot be large. However, taking into 2247f8269a49SIlpo Järvinen * account rare use of URG, this is not a big flaw. 2248f8269a49SIlpo Järvinen * 22496ba8a3b1SNandita Dukkipati * Send at most one packet when push_one > 0. Temporarily ignore 22506ba8a3b1SNandita Dukkipati * cwnd limit to force at most one packet out when push_one == 2. 22516ba8a3b1SNandita Dukkipati 2252a2a385d6SEric Dumazet * Returns true, if no segments are in flight and we have queued segments, 2253a2a385d6SEric Dumazet * but cannot send anything now because of SWS or another problem. 22541da177e4SLinus Torvalds */ 2255a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 2256d5dd9175SIlpo Järvinen int push_one, gfp_t gfp) 22571da177e4SLinus Torvalds { 22581da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 225992df7b51SDavid S. Miller struct sk_buff *skb; 2260c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 2261c1b4a7e6SDavid S. Miller int cwnd_quota; 22625d424d5aSJohn Heffner int result; 22635615f886SFrancis Yan bool is_cwnd_limited = false, is_rwnd_limited = false; 2264605ad7f1SEric Dumazet u32 max_segs; 22651da177e4SLinus Torvalds 2266c1b4a7e6SDavid S. Miller sent_pkts = 0; 22675d424d5aSJohn Heffner 2268d5dd9175SIlpo Järvinen if (!push_one) { 22695d424d5aSJohn Heffner /* Do MTU probing. */ 2270d5dd9175SIlpo Järvinen result = tcp_mtu_probe(sk); 2271d5dd9175SIlpo Järvinen if (!result) { 2272a2a385d6SEric Dumazet return false; 22735d424d5aSJohn Heffner } else if (result > 0) { 22745d424d5aSJohn Heffner sent_pkts = 1; 22755d424d5aSJohn Heffner } 2276d5dd9175SIlpo Järvinen } 22775d424d5aSJohn Heffner 2278ed6e7268SNeal Cardwell max_segs = tcp_tso_segs(sk, mss_now); 22799a568de4SEric Dumazet tcp_mstamp_refresh(tp); 2280fe067e8aSDavid S. Miller while ((skb = tcp_send_head(sk))) { 2281c8ac3774SHerbert Xu unsigned int limit; 2282c8ac3774SHerbert Xu 2283218af599SEric Dumazet if (tcp_pacing_check(sk)) 2284218af599SEric Dumazet break; 2285218af599SEric Dumazet 22865bbb432cSEric Dumazet tso_segs = tcp_init_tso_segs(skb, mss_now); 2287c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 2288c1b4a7e6SDavid S. Miller 22899d186cacSAndrey Vagin if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 22907faee5c0SEric Dumazet /* "skb_mstamp" is used as a start point for the retransmit timer */ 2291385e2070SEric Dumazet skb->skb_mstamp = tp->tcp_mstamp; 2292ec342325SAndrew Vagin goto repair; /* Skip network transmission */ 22939d186cacSAndrey Vagin } 2294ec342325SAndrew Vagin 2295b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 22966ba8a3b1SNandita Dukkipati if (!cwnd_quota) { 22976ba8a3b1SNandita Dukkipati if (push_one == 2) 22986ba8a3b1SNandita Dukkipati /* Force out a loss probe pkt. */ 22996ba8a3b1SNandita Dukkipati cwnd_quota = 1; 23006ba8a3b1SNandita Dukkipati else 2301b68e9f85SHerbert Xu break; 23026ba8a3b1SNandita Dukkipati } 2303b68e9f85SHerbert Xu 23045615f886SFrancis Yan if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { 23055615f886SFrancis Yan is_rwnd_limited = true; 2306b68e9f85SHerbert Xu break; 23075615f886SFrancis Yan } 2308b68e9f85SHerbert Xu 2309d6a4e26aSEric Dumazet if (tso_segs == 1) { 2310aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2311aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 2312aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 2313aa93466bSDavid S. Miller break; 2314c1b4a7e6SDavid S. Miller } else { 2315ca8a2263SNeal Cardwell if (!push_one && 2316605ad7f1SEric Dumazet tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2317605ad7f1SEric Dumazet max_segs)) 2318aa93466bSDavid S. Miller break; 2319c1b4a7e6SDavid S. Miller } 2320aa93466bSDavid S. Miller 2321605ad7f1SEric Dumazet limit = mss_now; 2322d6a4e26aSEric Dumazet if (tso_segs > 1 && !tcp_urg_mode(tp)) 2323605ad7f1SEric Dumazet limit = tcp_mss_split_point(sk, skb, mss_now, 2324605ad7f1SEric Dumazet min_t(unsigned int, 2325605ad7f1SEric Dumazet cwnd_quota, 2326605ad7f1SEric Dumazet max_segs), 2327605ad7f1SEric Dumazet nonagle); 2328605ad7f1SEric Dumazet 2329605ad7f1SEric Dumazet if (skb->len > limit && 2330605ad7f1SEric Dumazet unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2331605ad7f1SEric Dumazet break; 2332605ad7f1SEric Dumazet 23337aa5470cSEric Dumazet if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) 23347aa5470cSEric Dumazet clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags); 2335f9616c35SEric Dumazet if (tcp_small_queue_check(sk, skb, 0)) 233646d3ceabSEric Dumazet break; 2337c9eeec26SEric Dumazet 2338d5dd9175SIlpo Järvinen if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 23391da177e4SLinus Torvalds break; 23401da177e4SLinus Torvalds 2341ec342325SAndrew Vagin repair: 23421da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 23431da177e4SLinus Torvalds * This call will increment packets_out. 23441da177e4SLinus Torvalds */ 234566f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 23461da177e4SLinus Torvalds 23471da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 2348a262f0cdSNandita Dukkipati sent_pkts += tcp_skb_pcount(skb); 2349d5dd9175SIlpo Järvinen 2350d5dd9175SIlpo Järvinen if (push_one) 2351d5dd9175SIlpo Järvinen break; 23521da177e4SLinus Torvalds } 23531da177e4SLinus Torvalds 23545615f886SFrancis Yan if (is_rwnd_limited) 23555615f886SFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); 23565615f886SFrancis Yan else 23575615f886SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); 23585615f886SFrancis Yan 2359aa93466bSDavid S. Miller if (likely(sent_pkts)) { 2360684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 2361684bad11SYuchung Cheng tp->prr_out += sent_pkts; 23626ba8a3b1SNandita Dukkipati 23636ba8a3b1SNandita Dukkipati /* Send one loss probe per tail loss episode. */ 23646ba8a3b1SNandita Dukkipati if (push_one != 2) 23656ba8a3b1SNandita Dukkipati tcp_schedule_loss_probe(sk); 2366d2e1339fSBendik Rønning Opstad is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); 2367ca8a2263SNeal Cardwell tcp_cwnd_validate(sk, is_cwnd_limited); 2368a2a385d6SEric Dumazet return false; 23691da177e4SLinus Torvalds } 2370b340b264SYuchung Cheng return !tp->packets_out && tcp_send_head(sk); 23716ba8a3b1SNandita Dukkipati } 23726ba8a3b1SNandita Dukkipati 23736ba8a3b1SNandita Dukkipati bool tcp_schedule_loss_probe(struct sock *sk) 23746ba8a3b1SNandita Dukkipati { 23756ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 23766ba8a3b1SNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 23776ba8a3b1SNandita Dukkipati u32 timeout, tlp_time_stamp, rto_time_stamp; 2378740b0f18SEric Dumazet u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 23796ba8a3b1SNandita Dukkipati 23806ba8a3b1SNandita Dukkipati /* No consecutive loss probes. */ 23816ba8a3b1SNandita Dukkipati if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 23826ba8a3b1SNandita Dukkipati tcp_rearm_rto(sk); 23836ba8a3b1SNandita Dukkipati return false; 23846ba8a3b1SNandita Dukkipati } 23856ba8a3b1SNandita Dukkipati /* Don't do any loss probe on a Fast Open connection before 3WHS 23866ba8a3b1SNandita Dukkipati * finishes. 23876ba8a3b1SNandita Dukkipati */ 2388f9b99582SYuchung Cheng if (tp->fastopen_rsk) 23896ba8a3b1SNandita Dukkipati return false; 23906ba8a3b1SNandita Dukkipati 23916ba8a3b1SNandita Dukkipati /* TLP is only scheduled when next timer event is RTO. */ 23926ba8a3b1SNandita Dukkipati if (icsk->icsk_pending != ICSK_TIME_RETRANS) 23936ba8a3b1SNandita Dukkipati return false; 23946ba8a3b1SNandita Dukkipati 23956ba8a3b1SNandita Dukkipati /* Schedule a loss probe in 2*RTT for SACK capable connections 23966ba8a3b1SNandita Dukkipati * in Open state, that are either limited by cwnd or application. 23976ba8a3b1SNandita Dukkipati */ 2398bec41a11SYuchung Cheng if ((sysctl_tcp_early_retrans != 3 && sysctl_tcp_early_retrans != 4) || 2399bec41a11SYuchung Cheng !tp->packets_out || !tcp_is_sack(tp) || 2400bec41a11SYuchung Cheng icsk->icsk_ca_state != TCP_CA_Open) 24016ba8a3b1SNandita Dukkipati return false; 24026ba8a3b1SNandita Dukkipati 24036ba8a3b1SNandita Dukkipati if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && 24046ba8a3b1SNandita Dukkipati tcp_send_head(sk)) 24056ba8a3b1SNandita Dukkipati return false; 24066ba8a3b1SNandita Dukkipati 24076ba8a3b1SNandita Dukkipati /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account 2408f9b99582SYuchung Cheng * for delayed ack when there's one outstanding packet. If no RTT 2409f9b99582SYuchung Cheng * sample is available then probe after TCP_TIMEOUT_INIT. 24106ba8a3b1SNandita Dukkipati */ 2411f9b99582SYuchung Cheng timeout = rtt << 1 ? : TCP_TIMEOUT_INIT; 24126ba8a3b1SNandita Dukkipati if (tp->packets_out == 1) 24136ba8a3b1SNandita Dukkipati timeout = max_t(u32, timeout, 24146ba8a3b1SNandita Dukkipati (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 24156ba8a3b1SNandita Dukkipati timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 24166ba8a3b1SNandita Dukkipati 24176ba8a3b1SNandita Dukkipati /* If RTO is shorter, just schedule TLP in its place. */ 2418ac9517fcSEric Dumazet tlp_time_stamp = tcp_jiffies32 + timeout; 24196ba8a3b1SNandita Dukkipati rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 24206ba8a3b1SNandita Dukkipati if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2421ac9517fcSEric Dumazet s32 delta = rto_time_stamp - tcp_jiffies32; 24226ba8a3b1SNandita Dukkipati if (delta > 0) 24236ba8a3b1SNandita Dukkipati timeout = delta; 24246ba8a3b1SNandita Dukkipati } 24256ba8a3b1SNandita Dukkipati 24266ba8a3b1SNandita Dukkipati inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 24276ba8a3b1SNandita Dukkipati TCP_RTO_MAX); 24286ba8a3b1SNandita Dukkipati return true; 24296ba8a3b1SNandita Dukkipati } 24306ba8a3b1SNandita Dukkipati 24311f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of 24321f3279aeSEric Dumazet * a packet is still in a qdisc or driver queue. 24331f3279aeSEric Dumazet * In this case, there is very little point doing a retransmit ! 24341f3279aeSEric Dumazet */ 24351f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk, 24361f3279aeSEric Dumazet const struct sk_buff *skb) 24371f3279aeSEric Dumazet { 243839bb5e62SEric Dumazet if (unlikely(skb_fclone_busy(sk, skb))) { 2439c10d9310SEric Dumazet NET_INC_STATS(sock_net(sk), 24401f3279aeSEric Dumazet LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 24411f3279aeSEric Dumazet return true; 24421f3279aeSEric Dumazet } 24431f3279aeSEric Dumazet return false; 24441f3279aeSEric Dumazet } 24451f3279aeSEric Dumazet 2446b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else 24476ba8a3b1SNandita Dukkipati * retransmit the last segment. 24486ba8a3b1SNandita Dukkipati */ 24496ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk) 24506ba8a3b1SNandita Dukkipati { 24519b717a8dSNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 24526ba8a3b1SNandita Dukkipati struct sk_buff *skb; 24536ba8a3b1SNandita Dukkipati int pcount; 24546ba8a3b1SNandita Dukkipati int mss = tcp_current_mss(sk); 24556ba8a3b1SNandita Dukkipati 2456b340b264SYuchung Cheng skb = tcp_send_head(sk); 2457b340b264SYuchung Cheng if (skb) { 2458b340b264SYuchung Cheng if (tcp_snd_wnd_test(tp, skb, mss)) { 2459b340b264SYuchung Cheng pcount = tp->packets_out; 2460b340b264SYuchung Cheng tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 2461b340b264SYuchung Cheng if (tp->packets_out > pcount) 2462b340b264SYuchung Cheng goto probe_sent; 24636ba8a3b1SNandita Dukkipati goto rearm_timer; 24646ba8a3b1SNandita Dukkipati } 2465b340b264SYuchung Cheng skb = tcp_write_queue_prev(sk, skb); 2466b340b264SYuchung Cheng } else { 2467b340b264SYuchung Cheng skb = tcp_write_queue_tail(sk); 2468b340b264SYuchung Cheng } 24696ba8a3b1SNandita Dukkipati 24709b717a8dSNandita Dukkipati /* At most one outstanding TLP retransmission. */ 24719b717a8dSNandita Dukkipati if (tp->tlp_high_seq) 24729b717a8dSNandita Dukkipati goto rearm_timer; 24739b717a8dSNandita Dukkipati 24746ba8a3b1SNandita Dukkipati /* Retransmit last segment. */ 24756ba8a3b1SNandita Dukkipati if (WARN_ON(!skb)) 24766ba8a3b1SNandita Dukkipati goto rearm_timer; 24776ba8a3b1SNandita Dukkipati 24781f3279aeSEric Dumazet if (skb_still_in_host_queue(sk, skb)) 24791f3279aeSEric Dumazet goto rearm_timer; 24801f3279aeSEric Dumazet 24816ba8a3b1SNandita Dukkipati pcount = tcp_skb_pcount(skb); 24826ba8a3b1SNandita Dukkipati if (WARN_ON(!pcount)) 24836ba8a3b1SNandita Dukkipati goto rearm_timer; 24846ba8a3b1SNandita Dukkipati 24856ba8a3b1SNandita Dukkipati if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 24866cc55e09SOctavian Purdila if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, 24876cc55e09SOctavian Purdila GFP_ATOMIC))) 24886ba8a3b1SNandita Dukkipati goto rearm_timer; 2489b340b264SYuchung Cheng skb = tcp_write_queue_next(sk, skb); 24906ba8a3b1SNandita Dukkipati } 24916ba8a3b1SNandita Dukkipati 24926ba8a3b1SNandita Dukkipati if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 24936ba8a3b1SNandita Dukkipati goto rearm_timer; 24946ba8a3b1SNandita Dukkipati 249510d3be56SEric Dumazet if (__tcp_retransmit_skb(sk, skb, 1)) 2496b340b264SYuchung Cheng goto rearm_timer; 24976ba8a3b1SNandita Dukkipati 24989b717a8dSNandita Dukkipati /* Record snd_nxt for loss detection. */ 24999b717a8dSNandita Dukkipati tp->tlp_high_seq = tp->snd_nxt; 25009b717a8dSNandita Dukkipati 2501b340b264SYuchung Cheng probe_sent: 2502c10d9310SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 2503fcd16c0aSYuchung Cheng /* Reset s.t. tcp_rearm_rto will restart timer from now */ 2504fcd16c0aSYuchung Cheng inet_csk(sk)->icsk_pending = 0; 2505b340b264SYuchung Cheng rearm_timer: 2506fcd16c0aSYuchung Cheng tcp_rearm_rto(sk); 25071da177e4SLinus Torvalds } 25081da177e4SLinus Torvalds 2509a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 2510a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 2511a762a980SDavid S. Miller * The socket must be locked by the caller. 2512a762a980SDavid S. Miller */ 25139e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 25149e412ba7SIlpo Järvinen int nonagle) 2515a762a980SDavid S. Miller { 2516726e07a8SIlpo Järvinen /* If we are closed, the bytes will have to remain here. 2517726e07a8SIlpo Järvinen * In time closedown will finish, we empty the write queue and 2518726e07a8SIlpo Järvinen * all will be happy. 2519726e07a8SIlpo Järvinen */ 2520726e07a8SIlpo Järvinen if (unlikely(sk->sk_state == TCP_CLOSE)) 2521726e07a8SIlpo Järvinen return; 2522726e07a8SIlpo Järvinen 252399a1dec7SMel Gorman if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 25247450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC))) 25259e412ba7SIlpo Järvinen tcp_check_probe_timer(sk); 2526a762a980SDavid S. Miller } 2527a762a980SDavid S. Miller 2528c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 2529c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 2530c1b4a7e6SDavid S. Miller */ 2531c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 2532c1b4a7e6SDavid S. Miller { 2533fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 2534c1b4a7e6SDavid S. Miller 2535c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 2536c1b4a7e6SDavid S. Miller 2537d5dd9175SIlpo Järvinen tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2538c1b4a7e6SDavid S. Miller } 2539c1b4a7e6SDavid S. Miller 25401da177e4SLinus Torvalds /* This function returns the amount that we can raise the 25411da177e4SLinus Torvalds * usable window based on the following constraints 25421da177e4SLinus Torvalds * 25431da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 25441da177e4SLinus Torvalds * 2. We limit memory per socket 25451da177e4SLinus Torvalds * 25461da177e4SLinus Torvalds * RFC 1122: 25471da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 25481da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 25491da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 25501da177e4SLinus Torvalds * 25511da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 25521da177e4SLinus Torvalds * it at least MSS bytes. 25531da177e4SLinus Torvalds * 25541da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 25551da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 25561da177e4SLinus Torvalds * 25571da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 25581da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 25591da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 25601da177e4SLinus Torvalds * window to always advance by a single byte. 25611da177e4SLinus Torvalds * 25621da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 25631da177e4SLinus Torvalds * then this will not be a problem. 25641da177e4SLinus Torvalds * 25651da177e4SLinus Torvalds * BSD seems to make the following compromise: 25661da177e4SLinus Torvalds * 25671da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 25681da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 25691da177e4SLinus Torvalds * then set the window to 0. 25701da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 25711da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 25721da177e4SLinus Torvalds * and from being larger than the largest representable value. 25731da177e4SLinus Torvalds * 25741da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 25751da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 25761da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 25771da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 25781da177e4SLinus Torvalds * because the pipeline is full. 25791da177e4SLinus Torvalds * 25801da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 25811da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 25821da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 25831da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 25841da177e4SLinus Torvalds * of having a fixed window size at almost all times. 25851da177e4SLinus Torvalds * 25861da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 25871da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 25881da177e4SLinus Torvalds * 25891da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 25901da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 25911da177e4SLinus Torvalds */ 25921da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 25931da177e4SLinus Torvalds { 2594463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 25951da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2596caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 25971da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 25981da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 25991da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 26001da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 26011da177e4SLinus Torvalds */ 2602463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 26031da177e4SLinus Torvalds int free_space = tcp_space(sk); 260486c1a045SFlorian Westphal int allowed_space = tcp_full_space(sk); 260586c1a045SFlorian Westphal int full_space = min_t(int, tp->window_clamp, allowed_space); 26061da177e4SLinus Torvalds int window; 26071da177e4SLinus Torvalds 260806425c30SEric Dumazet if (unlikely(mss > full_space)) { 26091da177e4SLinus Torvalds mss = full_space; 261006425c30SEric Dumazet if (mss <= 0) 261106425c30SEric Dumazet return 0; 261206425c30SEric Dumazet } 2613b92edbe0SEric Dumazet if (free_space < (full_space >> 1)) { 2614463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 26151da177e4SLinus Torvalds 2616b8da51ebSEric Dumazet if (tcp_under_memory_pressure(sk)) 2617056834d9SIlpo Järvinen tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2618056834d9SIlpo Järvinen 4U * tp->advmss); 26191da177e4SLinus Torvalds 262086c1a045SFlorian Westphal /* free_space might become our new window, make sure we don't 262186c1a045SFlorian Westphal * increase it due to wscale. 262286c1a045SFlorian Westphal */ 262386c1a045SFlorian Westphal free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 262486c1a045SFlorian Westphal 262586c1a045SFlorian Westphal /* if free space is less than mss estimate, or is below 1/16th 262686c1a045SFlorian Westphal * of the maximum allowed, try to move to zero-window, else 262786c1a045SFlorian Westphal * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and 262886c1a045SFlorian Westphal * new incoming data is dropped due to memory limits. 262986c1a045SFlorian Westphal * With large window, mss test triggers way too late in order 263086c1a045SFlorian Westphal * to announce zero window in time before rmem limit kicks in. 263186c1a045SFlorian Westphal */ 263286c1a045SFlorian Westphal if (free_space < (allowed_space >> 4) || free_space < mss) 26331da177e4SLinus Torvalds return 0; 26341da177e4SLinus Torvalds } 26351da177e4SLinus Torvalds 26361da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 26371da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 26381da177e4SLinus Torvalds 26391da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 26401da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 26411da177e4SLinus Torvalds */ 26421da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 26431da177e4SLinus Torvalds window = free_space; 26441da177e4SLinus Torvalds 26451da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 26461da177e4SLinus Torvalds * Import case: prevent zero window announcement if 26471da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 26481da177e4SLinus Torvalds */ 26491935299dSGao Feng window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); 26501da177e4SLinus Torvalds } else { 26511935299dSGao Feng window = tp->rcv_wnd; 26521da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 26531da177e4SLinus Torvalds * Window clamp already applied above. 26541da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 26551da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 26561da177e4SLinus Torvalds * and multiply from happening most of the time. 26571da177e4SLinus Torvalds * We also don't do any window rounding when the free space 26581da177e4SLinus Torvalds * is too small. 26591da177e4SLinus Torvalds */ 26601da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 26611935299dSGao Feng window = rounddown(free_space, mss); 266284565070SJohn Heffner else if (mss == full_space && 2663b92edbe0SEric Dumazet free_space > window + (full_space >> 1)) 266484565070SJohn Heffner window = free_space; 26651da177e4SLinus Torvalds } 26661da177e4SLinus Torvalds 26671da177e4SLinus Torvalds return window; 26681da177e4SLinus Torvalds } 26691da177e4SLinus Torvalds 2670cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb, 2671082ac2d5SMartin KaFai Lau const struct sk_buff *next_skb) 2672082ac2d5SMartin KaFai Lau { 26730a2cf20cSSoheil Hassas Yeganeh if (unlikely(tcp_has_tx_tstamp(next_skb))) { 26740a2cf20cSSoheil Hassas Yeganeh const struct skb_shared_info *next_shinfo = 26750a2cf20cSSoheil Hassas Yeganeh skb_shinfo(next_skb); 2676082ac2d5SMartin KaFai Lau struct skb_shared_info *shinfo = skb_shinfo(skb); 2677082ac2d5SMartin KaFai Lau 26780a2cf20cSSoheil Hassas Yeganeh shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 2679082ac2d5SMartin KaFai Lau shinfo->tskey = next_shinfo->tskey; 26802de8023eSMartin KaFai Lau TCP_SKB_CB(skb)->txstamp_ack |= 26812de8023eSMartin KaFai Lau TCP_SKB_CB(next_skb)->txstamp_ack; 2682082ac2d5SMartin KaFai Lau } 2683082ac2d5SMartin KaFai Lau } 2684082ac2d5SMartin KaFai Lau 26854a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */ 2686f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 26871da177e4SLinus Torvalds { 26881da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2689fe067e8aSDavid S. Miller struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 2690058dc334SIlpo Järvinen int skb_size, next_skb_size; 26911da177e4SLinus Torvalds 2692058dc334SIlpo Järvinen skb_size = skb->len; 2693058dc334SIlpo Järvinen next_skb_size = next_skb->len; 26941da177e4SLinus Torvalds 2695058dc334SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 26961da177e4SLinus Torvalds 2697f8071cdeSEric Dumazet if (next_skb_size) { 2698f8071cdeSEric Dumazet if (next_skb_size <= skb_availroom(skb)) 2699f8071cdeSEric Dumazet skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), 2700f8071cdeSEric Dumazet next_skb_size); 2701f8071cdeSEric Dumazet else if (!skb_shift(skb, next_skb, next_skb_size)) 2702f8071cdeSEric Dumazet return false; 2703f8071cdeSEric Dumazet } 27046859d494SIlpo Järvinen tcp_highest_sack_combine(sk, next_skb, skb); 2705a6963a6bSIlpo Järvinen 2706fe067e8aSDavid S. Miller tcp_unlink_write_queue(next_skb, sk); 27071da177e4SLinus Torvalds 270852d570aaSJarek Poplawski if (next_skb->ip_summed == CHECKSUM_PARTIAL) 270952d570aaSJarek Poplawski skb->ip_summed = CHECKSUM_PARTIAL; 27101da177e4SLinus Torvalds 271184fa7933SPatrick McHardy if (skb->ip_summed != CHECKSUM_PARTIAL) 27121da177e4SLinus Torvalds skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 27131da177e4SLinus Torvalds 27141da177e4SLinus Torvalds /* Update sequence range on original skb. */ 27151da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 27161da177e4SLinus Torvalds 2717e6c7d085SIlpo Järvinen /* Merge over control information. This moves PSH/FIN etc. over */ 27184de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 27191da177e4SLinus Torvalds 27201da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 27211da177e4SLinus Torvalds * packet counting does not break. 27221da177e4SLinus Torvalds */ 27234828e7f4SIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2724a643b5d4SMartin KaFai Lau TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; 2725b7689205SIlpo Järvinen 2726b7689205SIlpo Järvinen /* changed transmit queue under us so clear hints */ 2727ef9da47cSIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 2728ef9da47cSIlpo Järvinen if (next_skb == tp->retransmit_skb_hint) 2729ef9da47cSIlpo Järvinen tp->retransmit_skb_hint = skb; 2730b7689205SIlpo Järvinen 2731797108d1SIlpo Järvinen tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2732797108d1SIlpo Järvinen 2733082ac2d5SMartin KaFai Lau tcp_skb_collapse_tstamp(skb, next_skb); 2734082ac2d5SMartin KaFai Lau 27353ab224beSHideo Aoki sk_wmem_free_skb(sk, next_skb); 2736f8071cdeSEric Dumazet return true; 27371da177e4SLinus Torvalds } 27381da177e4SLinus Torvalds 273967edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */ 2740a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 27414a17fc3aSIlpo Järvinen { 27424a17fc3aSIlpo Järvinen if (tcp_skb_pcount(skb) > 1) 2743a2a385d6SEric Dumazet return false; 27444a17fc3aSIlpo Järvinen if (skb_cloned(skb)) 2745a2a385d6SEric Dumazet return false; 27464a17fc3aSIlpo Järvinen if (skb == tcp_send_head(sk)) 2747a2a385d6SEric Dumazet return false; 27482331ccc5SEric Dumazet /* Some heuristics for collapsing over SACK'd could be invented */ 27494a17fc3aSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2750a2a385d6SEric Dumazet return false; 27514a17fc3aSIlpo Järvinen 2752a2a385d6SEric Dumazet return true; 27534a17fc3aSIlpo Järvinen } 27544a17fc3aSIlpo Järvinen 275567edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create 275667edfef7SAndi Kleen * less packets on the wire. This is only done on retransmission. 275767edfef7SAndi Kleen */ 27584a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 27594a17fc3aSIlpo Järvinen int space) 27604a17fc3aSIlpo Järvinen { 27614a17fc3aSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 27624a17fc3aSIlpo Järvinen struct sk_buff *skb = to, *tmp; 2763a2a385d6SEric Dumazet bool first = true; 27644a17fc3aSIlpo Järvinen 27654a17fc3aSIlpo Järvinen if (!sysctl_tcp_retrans_collapse) 27664a17fc3aSIlpo Järvinen return; 27674de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 27684a17fc3aSIlpo Järvinen return; 27694a17fc3aSIlpo Järvinen 27704a17fc3aSIlpo Järvinen tcp_for_write_queue_from_safe(skb, tmp, sk) { 27714a17fc3aSIlpo Järvinen if (!tcp_can_collapse(sk, skb)) 27724a17fc3aSIlpo Järvinen break; 27734a17fc3aSIlpo Järvinen 2774a643b5d4SMartin KaFai Lau if (!tcp_skb_can_collapse_to(to)) 2775a643b5d4SMartin KaFai Lau break; 2776a643b5d4SMartin KaFai Lau 27774a17fc3aSIlpo Järvinen space -= skb->len; 27784a17fc3aSIlpo Järvinen 27794a17fc3aSIlpo Järvinen if (first) { 2780a2a385d6SEric Dumazet first = false; 27814a17fc3aSIlpo Järvinen continue; 27824a17fc3aSIlpo Järvinen } 27834a17fc3aSIlpo Järvinen 27844a17fc3aSIlpo Järvinen if (space < 0) 27854a17fc3aSIlpo Järvinen break; 27864a17fc3aSIlpo Järvinen 27874a17fc3aSIlpo Järvinen if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 27884a17fc3aSIlpo Järvinen break; 27894a17fc3aSIlpo Järvinen 2790f8071cdeSEric Dumazet if (!tcp_collapse_retrans(sk, to)) 2791f8071cdeSEric Dumazet break; 27924a17fc3aSIlpo Järvinen } 27934a17fc3aSIlpo Järvinen } 27944a17fc3aSIlpo Järvinen 27951da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 27961da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 27971da177e4SLinus Torvalds * error occurred which prevented the send. 27981da177e4SLinus Torvalds */ 279910d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 28001da177e4SLinus Torvalds { 28015d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 280210d3be56SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 28037d227cd2SSridhar Samudrala unsigned int cur_mss; 280410d3be56SEric Dumazet int diff, len, err; 28051da177e4SLinus Torvalds 280610d3be56SEric Dumazet 280710d3be56SEric Dumazet /* Inconclusive MTU probe */ 280810d3be56SEric Dumazet if (icsk->icsk_mtup.probe_size) 28095d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 28105d424d5aSJohn Heffner 28111da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 2812caa20d9aSStephen Hemminger * copying overhead: fragmentation, tunneling, mangling etc. 28131da177e4SLinus Torvalds */ 281414afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > 2815ffb4d6c8SEric Dumazet min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), 2816ffb4d6c8SEric Dumazet sk->sk_sndbuf)) 28171da177e4SLinus Torvalds return -EAGAIN; 28181da177e4SLinus Torvalds 28191f3279aeSEric Dumazet if (skb_still_in_host_queue(sk, skb)) 28201f3279aeSEric Dumazet return -EBUSY; 28211f3279aeSEric Dumazet 28221da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 28231da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 28241da177e4SLinus Torvalds BUG(); 28251da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 28261da177e4SLinus Torvalds return -ENOMEM; 28271da177e4SLinus Torvalds } 28281da177e4SLinus Torvalds 28297d227cd2SSridhar Samudrala if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 28307d227cd2SSridhar Samudrala return -EHOSTUNREACH; /* Routing failure or similar. */ 28317d227cd2SSridhar Samudrala 28320c54b85fSIlpo Järvinen cur_mss = tcp_current_mss(sk); 28337d227cd2SSridhar Samudrala 28341da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 28351da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 28361da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 28371da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 28381da177e4SLinus Torvalds */ 28399d4fb27dSJoe Perches if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 28409d4fb27dSJoe Perches TCP_SKB_CB(skb)->seq != tp->snd_una) 28411da177e4SLinus Torvalds return -EAGAIN; 28421da177e4SLinus Torvalds 284310d3be56SEric Dumazet len = cur_mss * segs; 284410d3be56SEric Dumazet if (skb->len > len) { 284510d3be56SEric Dumazet if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC)) 28461da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 284702276f3cSIlpo Järvinen } else { 2848c52e2421SEric Dumazet if (skb_unclone(skb, GFP_ATOMIC)) 2849c52e2421SEric Dumazet return -ENOMEM; 285010d3be56SEric Dumazet 285110d3be56SEric Dumazet diff = tcp_skb_pcount(skb); 285210d3be56SEric Dumazet tcp_set_skb_tso_segs(skb, cur_mss); 285310d3be56SEric Dumazet diff -= tcp_skb_pcount(skb); 285410d3be56SEric Dumazet if (diff) 285510d3be56SEric Dumazet tcp_adjust_pcount(sk, skb, diff); 285610d3be56SEric Dumazet if (skb->len < cur_mss) 285710d3be56SEric Dumazet tcp_retrans_try_collapse(sk, skb, cur_mss); 28581da177e4SLinus Torvalds } 28591da177e4SLinus Torvalds 286049213555SDaniel Borkmann /* RFC3168, section 6.1.1.1. ECN fallback */ 286149213555SDaniel Borkmann if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) 286249213555SDaniel Borkmann tcp_ecn_clear_syn(sk, skb); 286349213555SDaniel Borkmann 2864678550c6SYuchung Cheng /* Update global and local TCP statistics. */ 2865678550c6SYuchung Cheng segs = tcp_skb_pcount(skb); 2866678550c6SYuchung Cheng TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); 2867678550c6SYuchung Cheng if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2868678550c6SYuchung Cheng __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 2869678550c6SYuchung Cheng tp->total_retrans += segs; 2870678550c6SYuchung Cheng 287150bceae9SThomas Graf /* make sure skb->data is aligned on arches that require it 287250bceae9SThomas Graf * and check if ack-trimming & collapsing extended the headroom 287350bceae9SThomas Graf * beyond what csum_start can cover. 287450bceae9SThomas Graf */ 287550bceae9SThomas Graf if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 287650bceae9SThomas Graf skb_headroom(skb) >= 0xFFFF)) { 287710a81980SEric Dumazet struct sk_buff *nskb; 287810a81980SEric Dumazet 2879385e2070SEric Dumazet skb->skb_mstamp = tp->tcp_mstamp; 288010a81980SEric Dumazet nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2881c84a5711SYuchung Cheng err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2882117632e6SEric Dumazet -ENOBUFS; 2883117632e6SEric Dumazet } else { 2884c84a5711SYuchung Cheng err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2885117632e6SEric Dumazet } 2886c84a5711SYuchung Cheng 2887fc9f3501SEric Dumazet if (likely(!err)) { 2888c84a5711SYuchung Cheng TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 2889678550c6SYuchung Cheng } else if (err != -EBUSY) { 2890678550c6SYuchung Cheng NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2891fc9f3501SEric Dumazet } 2892c84a5711SYuchung Cheng return err; 289393b174adSYuchung Cheng } 289493b174adSYuchung Cheng 289510d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 289693b174adSYuchung Cheng { 289793b174adSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 289810d3be56SEric Dumazet int err = __tcp_retransmit_skb(sk, skb, segs); 28991da177e4SLinus Torvalds 29001da177e4SLinus Torvalds if (err == 0) { 29011da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 29021da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2903e87cc472SJoe Perches net_dbg_ratelimited("retrans_out leaked\n"); 29041da177e4SLinus Torvalds } 29051da177e4SLinus Torvalds #endif 29061da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 29071da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 29081da177e4SLinus Torvalds 29091da177e4SLinus Torvalds /* Save stamp of the first retransmit. */ 29101da177e4SLinus Torvalds if (!tp->retrans_stamp) 29117faee5c0SEric Dumazet tp->retrans_stamp = tcp_skb_timestamp(skb); 29121da177e4SLinus Torvalds 29131da177e4SLinus Torvalds } 29146e08d5e3SYuchung Cheng 29156e08d5e3SYuchung Cheng if (tp->undo_retrans < 0) 29166e08d5e3SYuchung Cheng tp->undo_retrans = 0; 29176e08d5e3SYuchung Cheng tp->undo_retrans += tcp_skb_pcount(skb); 29181da177e4SLinus Torvalds return err; 29191da177e4SLinus Torvalds } 29201da177e4SLinus Torvalds 29211da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 29221da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 29231da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 29241da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 29251da177e4SLinus Torvalds * If doing SACK, the first ACK which comes back for a timeout 29261da177e4SLinus Torvalds * based retransmit packet might feed us FACK information again. 29271da177e4SLinus Torvalds * If so, we use it to avoid unnecessarily retransmissions. 29281da177e4SLinus Torvalds */ 29291da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 29301da177e4SLinus Torvalds { 29316687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 29321da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29331da177e4SLinus Torvalds struct sk_buff *skb; 29340e1c54c2SIlpo Järvinen struct sk_buff *hole = NULL; 2935840a3cbeSYuchung Cheng u32 max_segs; 293661eb55f4SIlpo Järvinen int mib_idx; 29376a438bbeSStephen Hemminger 293845e77d31SIlpo Järvinen if (!tp->packets_out) 293945e77d31SIlpo Järvinen return; 294045e77d31SIlpo Järvinen 2941618d9f25SIlpo Järvinen if (tp->retransmit_skb_hint) { 29426a438bbeSStephen Hemminger skb = tp->retransmit_skb_hint; 2943618d9f25SIlpo Järvinen } else { 2944fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 2945618d9f25SIlpo Järvinen } 29461da177e4SLinus Torvalds 2947ed6e7268SNeal Cardwell max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); 2948fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 2949dca0aaf8SEric Dumazet __u8 sacked; 295010d3be56SEric Dumazet int segs; 29511da177e4SLinus Torvalds 2952fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2953fe067e8aSDavid S. Miller break; 2954218af599SEric Dumazet 2955218af599SEric Dumazet if (tcp_pacing_check(sk)) 2956218af599SEric Dumazet break; 2957218af599SEric Dumazet 29586a438bbeSStephen Hemminger /* we could do better than to assign each time */ 295951456b29SIan Morris if (!hole) 29606a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 29616a438bbeSStephen Hemminger 296210d3be56SEric Dumazet segs = tp->snd_cwnd - tcp_packets_in_flight(tp); 296310d3be56SEric Dumazet if (segs <= 0) 29641da177e4SLinus Torvalds return; 2965dca0aaf8SEric Dumazet sacked = TCP_SKB_CB(skb)->sacked; 2966a3d2e9f8SEric Dumazet /* In case tcp_shift_skb_data() have aggregated large skbs, 2967a3d2e9f8SEric Dumazet * we need to make sure not sending too bigs TSO packets 2968a3d2e9f8SEric Dumazet */ 2969a3d2e9f8SEric Dumazet segs = min_t(int, segs, max_segs); 29700e1c54c2SIlpo Järvinen 2971840a3cbeSYuchung Cheng if (tp->retrans_out >= tp->lost_out) { 2972006f582cSIlpo Järvinen break; 29730e1c54c2SIlpo Järvinen } else if (!(sacked & TCPCB_LOST)) { 297451456b29SIan Morris if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 29750e1c54c2SIlpo Järvinen hole = skb; 297661eb55f4SIlpo Järvinen continue; 29771da177e4SLinus Torvalds 29780e1c54c2SIlpo Järvinen } else { 29790e1c54c2SIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Loss) 29800e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFASTRETRANS; 29810e1c54c2SIlpo Järvinen else 29820e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 29830e1c54c2SIlpo Järvinen } 29840e1c54c2SIlpo Järvinen 29850e1c54c2SIlpo Järvinen if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 298661eb55f4SIlpo Järvinen continue; 298740b215e5SPavel Emelyanov 2988f9616c35SEric Dumazet if (tcp_small_queue_check(sk, skb, 1)) 2989f9616c35SEric Dumazet return; 2990f9616c35SEric Dumazet 299110d3be56SEric Dumazet if (tcp_retransmit_skb(sk, skb, segs)) 29921da177e4SLinus Torvalds return; 299324ab6becSYuchung Cheng 2994de1d6578SYuchung Cheng NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); 29951da177e4SLinus Torvalds 2996684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 2997a262f0cdSNandita Dukkipati tp->prr_out += tcp_skb_pcount(skb); 2998a262f0cdSNandita Dukkipati 299957dde7f7SYuchung Cheng if (skb == tcp_write_queue_head(sk) && 300057dde7f7SYuchung Cheng icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) 3001463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 30023f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 30033f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 30041da177e4SLinus Torvalds } 30051da177e4SLinus Torvalds } 30061da177e4SLinus Torvalds 3007d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite 3008d83769a5SEric Dumazet * connection tear down and (memory) recovery. 3009845704a5SEric Dumazet * Otherwise tcp_send_fin() could be tempted to either delay FIN 3010845704a5SEric Dumazet * or even be forced to close flow without any FIN. 3011a6c5ea4cSEric Dumazet * In general, we want to allow one skb per socket to avoid hangs 3012a6c5ea4cSEric Dumazet * with edge trigger epoll() 3013d83769a5SEric Dumazet */ 3014a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size) 3015d83769a5SEric Dumazet { 3016e805605cSJohannes Weiner int amt; 3017d83769a5SEric Dumazet 3018d83769a5SEric Dumazet if (size <= sk->sk_forward_alloc) 3019d83769a5SEric Dumazet return; 3020d83769a5SEric Dumazet amt = sk_mem_pages(size); 3021d83769a5SEric Dumazet sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 3022e805605cSJohannes Weiner sk_memory_allocated_add(sk, amt); 3023e805605cSJohannes Weiner 3024baac50bbSJohannes Weiner if (mem_cgroup_sockets_enabled && sk->sk_memcg) 3025baac50bbSJohannes Weiner mem_cgroup_charge_skmem(sk->sk_memcg, amt); 3026d83769a5SEric Dumazet } 3027d83769a5SEric Dumazet 3028845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us. 3029845704a5SEric Dumazet * We should try to send a FIN packet really hard, but eventually give up. 30301da177e4SLinus Torvalds */ 30311da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 30321da177e4SLinus Torvalds { 3033845704a5SEric Dumazet struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); 30341da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30351da177e4SLinus Torvalds 3036845704a5SEric Dumazet /* Optimization, tack on the FIN if we have one skb in write queue and 3037845704a5SEric Dumazet * this skb was not yet sent, or we are under memory pressure. 3038845704a5SEric Dumazet * Note: in the latter case, FIN packet will be sent after a timeout, 3039845704a5SEric Dumazet * as TCP stack thinks it has already been transmitted. 30401da177e4SLinus Torvalds */ 3041b8da51ebSEric Dumazet if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) { 3042845704a5SEric Dumazet coalesce: 3043845704a5SEric Dumazet TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; 3044845704a5SEric Dumazet TCP_SKB_CB(tskb)->end_seq++; 30451da177e4SLinus Torvalds tp->write_seq++; 3046845704a5SEric Dumazet if (!tcp_send_head(sk)) { 3047845704a5SEric Dumazet /* This means tskb was already sent. 3048845704a5SEric Dumazet * Pretend we included the FIN on previous transmit. 3049845704a5SEric Dumazet * We need to set tp->snd_nxt to the value it would have 3050845704a5SEric Dumazet * if FIN had been sent. This is because retransmit path 3051845704a5SEric Dumazet * does not change tp->snd_nxt. 3052845704a5SEric Dumazet */ 3053845704a5SEric Dumazet tp->snd_nxt++; 3054845704a5SEric Dumazet return; 3055845704a5SEric Dumazet } 30561da177e4SLinus Torvalds } else { 3057845704a5SEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); 3058845704a5SEric Dumazet if (unlikely(!skb)) { 3059845704a5SEric Dumazet if (tskb) 3060845704a5SEric Dumazet goto coalesce; 3061845704a5SEric Dumazet return; 30621da177e4SLinus Torvalds } 3063d83769a5SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 3064a6c5ea4cSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 30651da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 3066e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tp->write_seq, 3067a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_FIN); 30681da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 30691da177e4SLinus Torvalds } 3070845704a5SEric Dumazet __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); 30711da177e4SLinus Torvalds } 30721da177e4SLinus Torvalds 30731da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 30741da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 30751da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 307665bb723cSGerrit Renker * by RFC 2525, section 2.17. -DaveM 30771da177e4SLinus Torvalds */ 3078dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 30791da177e4SLinus Torvalds { 30801da177e4SLinus Torvalds struct sk_buff *skb; 30811da177e4SLinus Torvalds 30827cc2b043SGao Feng TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 30837cc2b043SGao Feng 30841da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 30851da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 30861da177e4SLinus Torvalds if (!skb) { 30874e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 30881da177e4SLinus Torvalds return; 30891da177e4SLinus Torvalds } 30901da177e4SLinus Torvalds 30911da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 30921da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 3093e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 3094a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_RST); 30959a568de4SEric Dumazet tcp_mstamp_refresh(tcp_sk(sk)); 30961da177e4SLinus Torvalds /* Send it off. */ 3097dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 30984e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 30991da177e4SLinus Torvalds } 31001da177e4SLinus Torvalds 310167edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment. 310267edfef7SAndi Kleen * WARNING: This routine must only be called when we have already sent 31031da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 31041da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 31051da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 31061da177e4SLinus Torvalds */ 31071da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 31081da177e4SLinus Torvalds { 31091da177e4SLinus Torvalds struct sk_buff *skb; 31101da177e4SLinus Torvalds 3111fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 311251456b29SIan Morris if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 311391df42beSJoe Perches pr_debug("%s: wrong queue state\n", __func__); 31141da177e4SLinus Torvalds return -EFAULT; 31151da177e4SLinus Torvalds } 31164de075e0SEric Dumazet if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 31171da177e4SLinus Torvalds if (skb_cloned(skb)) { 31181da177e4SLinus Torvalds struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 311951456b29SIan Morris if (!nskb) 31201da177e4SLinus Torvalds return -ENOMEM; 3121fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 3122f4a775d1SEric Dumazet __skb_header_release(nskb); 3123fe067e8aSDavid S. Miller __tcp_add_write_queue_head(sk, nskb); 31243ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 31253ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 31263ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 31271da177e4SLinus Torvalds skb = nskb; 31281da177e4SLinus Torvalds } 31291da177e4SLinus Torvalds 31304de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 3131735d3831SFlorian Westphal tcp_ecn_send_synack(sk, skb); 31321da177e4SLinus Torvalds } 3133dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 31341da177e4SLinus Torvalds } 31351da177e4SLinus Torvalds 31364aea39c1SEric Dumazet /** 31374aea39c1SEric Dumazet * tcp_make_synack - Prepare a SYN-ACK. 31384aea39c1SEric Dumazet * sk: listener socket 31394aea39c1SEric Dumazet * dst: dst entry attached to the SYNACK 31404aea39c1SEric Dumazet * req: request_sock pointer 31414aea39c1SEric Dumazet * 31424aea39c1SEric Dumazet * Allocate one skb and build a SYNACK packet. 31434aea39c1SEric Dumazet * @dst is consumed : Caller should not use it again. 31444aea39c1SEric Dumazet */ 31455d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 3146e6b4d113SWilliam Allen Simpson struct request_sock *req, 3147ca6fb065SEric Dumazet struct tcp_fastopen_cookie *foc, 3148b3d05147SEric Dumazet enum tcp_synack_type synack_type) 31491da177e4SLinus Torvalds { 31502e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 31515d062de7SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 315280f03e27SEric Dumazet struct tcp_md5sig_key *md5 = NULL; 31535d062de7SEric Dumazet struct tcp_out_options opts; 31545d062de7SEric Dumazet struct sk_buff *skb; 3155bd0388aeSWilliam Allen Simpson int tcp_header_size; 31565d062de7SEric Dumazet struct tcphdr *th; 3157f5fff5dcSTom Quetchenbach int mss; 31581da177e4SLinus Torvalds 3159ca6fb065SEric Dumazet skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 31604aea39c1SEric Dumazet if (unlikely(!skb)) { 31614aea39c1SEric Dumazet dst_release(dst); 31621da177e4SLinus Torvalds return NULL; 31634aea39c1SEric Dumazet } 31641da177e4SLinus Torvalds /* Reserve space for headers. */ 31651da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 31661da177e4SLinus Torvalds 3167b3d05147SEric Dumazet switch (synack_type) { 3168b3d05147SEric Dumazet case TCP_SYNACK_NORMAL: 31699e17f8a4SEric Dumazet skb_set_owner_w(skb, req_to_sk(req)); 3170b3d05147SEric Dumazet break; 3171b3d05147SEric Dumazet case TCP_SYNACK_COOKIE: 3172b3d05147SEric Dumazet /* Under synflood, we do not attach skb to a socket, 3173b3d05147SEric Dumazet * to avoid false sharing. 3174b3d05147SEric Dumazet */ 3175b3d05147SEric Dumazet break; 3176b3d05147SEric Dumazet case TCP_SYNACK_FASTOPEN: 3177ca6fb065SEric Dumazet /* sk is a const pointer, because we want to express multiple 3178ca6fb065SEric Dumazet * cpu might call us concurrently. 3179ca6fb065SEric Dumazet * sk->sk_wmem_alloc in an atomic, we can promote to rw. 3180ca6fb065SEric Dumazet */ 3181ca6fb065SEric Dumazet skb_set_owner_w(skb, (struct sock *)sk); 3182b3d05147SEric Dumazet break; 3183ca6fb065SEric Dumazet } 31844aea39c1SEric Dumazet skb_dst_set(skb, dst); 31851da177e4SLinus Torvalds 31863541f9e8SEric Dumazet mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3187f5fff5dcSTom Quetchenbach 318833ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 31898b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES 31908b5f12d0SFlorian Westphal if (unlikely(req->cookie_ts)) 31919a568de4SEric Dumazet skb->skb_mstamp = cookie_init_timestamp(req); 31928b5f12d0SFlorian Westphal else 31938b5f12d0SFlorian Westphal #endif 31949a568de4SEric Dumazet skb->skb_mstamp = tcp_clock_us(); 319580f03e27SEric Dumazet 319680f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG 319780f03e27SEric Dumazet rcu_read_lock(); 3198fd3a154aSEric Dumazet md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 319980f03e27SEric Dumazet #endif 320058d607d3SEric Dumazet skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); 320137bfbddaSEric Dumazet tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) + 320237bfbddaSEric Dumazet sizeof(*th); 320333ad798cSAdam Langley 3204aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 3205aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 32061da177e4SLinus Torvalds 3207ea1627c2SEric Dumazet th = (struct tcphdr *)skb->data; 32081da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 32091da177e4SLinus Torvalds th->syn = 1; 32101da177e4SLinus Torvalds th->ack = 1; 32116ac705b1SEric Dumazet tcp_ecn_make_synack(req, th); 3212b44084c2SEric Dumazet th->source = htons(ireq->ir_num); 3213634fb979SEric Dumazet th->dest = ireq->ir_rmt_port; 3214e870a8efSIlpo Järvinen /* Setting of flags are superfluous here for callers (and ECE is 3215e870a8efSIlpo Järvinen * not even correctly set) 3216e870a8efSIlpo Järvinen */ 3217e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 3218a3433f35SChangli Gao TCPHDR_SYN | TCPHDR_ACK); 32194957faadSWilliam Allen Simpson 32201da177e4SLinus Torvalds th->seq = htonl(TCP_SKB_CB(skb)->seq); 32218336886fSJerry Chu /* XXX data is queued and acked as is. No buffer/window check */ 32228336886fSJerry Chu th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 32231da177e4SLinus Torvalds 32241da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 3225ed53d0abSEric Dumazet th->window = htons(min(req->rsk_rcv_wnd, 65535U)); 32265d062de7SEric Dumazet tcp_options_write((__be32 *)(th + 1), NULL, &opts); 32271da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 322890bbcc60SEric Dumazet __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 3229cfb6eeb4SYOSHIFUJI Hideaki 3230cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3231cfb6eeb4SYOSHIFUJI Hideaki /* Okay, we have all we need - do the md5 hash if needed */ 323280f03e27SEric Dumazet if (md5) 3233bd0388aeSWilliam Allen Simpson tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 323439f8e58eSEric Dumazet md5, req_to_sk(req), skb); 323580f03e27SEric Dumazet rcu_read_unlock(); 3236cfb6eeb4SYOSHIFUJI Hideaki #endif 3237cfb6eeb4SYOSHIFUJI Hideaki 3238b50edd78SEric Dumazet /* Do not fool tcpdump (if any), clean our debris */ 32392456e855SThomas Gleixner skb->tstamp = 0; 32401da177e4SLinus Torvalds return skb; 32411da177e4SLinus Torvalds } 32424bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack); 32431da177e4SLinus Torvalds 324481164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 324581164413SDaniel Borkmann { 324681164413SDaniel Borkmann struct inet_connection_sock *icsk = inet_csk(sk); 324781164413SDaniel Borkmann const struct tcp_congestion_ops *ca; 324881164413SDaniel Borkmann u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 324981164413SDaniel Borkmann 325081164413SDaniel Borkmann if (ca_key == TCP_CA_UNSPEC) 325181164413SDaniel Borkmann return; 325281164413SDaniel Borkmann 325381164413SDaniel Borkmann rcu_read_lock(); 325481164413SDaniel Borkmann ca = tcp_ca_find_key(ca_key); 325581164413SDaniel Borkmann if (likely(ca && try_module_get(ca->owner))) { 325681164413SDaniel Borkmann module_put(icsk->icsk_ca_ops->owner); 325781164413SDaniel Borkmann icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 325881164413SDaniel Borkmann icsk->icsk_ca_ops = ca; 325981164413SDaniel Borkmann } 326081164413SDaniel Borkmann rcu_read_unlock(); 326181164413SDaniel Borkmann } 326281164413SDaniel Borkmann 326367edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */ 3264f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk) 32651da177e4SLinus Torvalds { 3266cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 32671da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 32681da177e4SLinus Torvalds __u8 rcv_wscale; 3269*13d3b1ebSLawrence Brakmo u32 rcv_wnd; 32701da177e4SLinus Torvalds 32711da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 32721da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 32731da177e4SLinus Torvalds */ 32745d2ed052SEric Dumazet tp->tcp_header_len = sizeof(struct tcphdr); 32755d2ed052SEric Dumazet if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) 32765d2ed052SEric Dumazet tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; 32771da177e4SLinus Torvalds 3278cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 327900db4124SIan Morris if (tp->af_specific->md5_lookup(sk, sk)) 3280cfb6eeb4SYOSHIFUJI Hideaki tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 3281cfb6eeb4SYOSHIFUJI Hideaki #endif 3282cfb6eeb4SYOSHIFUJI Hideaki 32831da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 32841da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 32851da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 32861da177e4SLinus Torvalds tp->max_window = 0; 32875d424d5aSJohn Heffner tcp_mtup_init(sk); 32881da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 32891da177e4SLinus Torvalds 329081164413SDaniel Borkmann tcp_ca_dst_init(sk, dst); 329181164413SDaniel Borkmann 32921da177e4SLinus Torvalds if (!tp->window_clamp) 32931da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 32943541f9e8SEric Dumazet tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3295f5fff5dcSTom Quetchenbach 32961da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 32971da177e4SLinus Torvalds 3298e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 3299e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 3300e88c64f0SHagen Paul Pfeifer (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 3301e88c64f0SHagen Paul Pfeifer tp->window_clamp = tcp_full_space(sk); 3302e88c64f0SHagen Paul Pfeifer 3303*13d3b1ebSLawrence Brakmo rcv_wnd = tcp_rwnd_init_bpf(sk); 3304*13d3b1ebSLawrence Brakmo if (rcv_wnd == 0) 3305*13d3b1ebSLawrence Brakmo rcv_wnd = dst_metric(dst, RTAX_INITRWND); 3306*13d3b1ebSLawrence Brakmo 33071da177e4SLinus Torvalds tcp_select_initial_window(tcp_full_space(sk), 33081da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 33091da177e4SLinus Torvalds &tp->rcv_wnd, 33101da177e4SLinus Torvalds &tp->window_clamp, 33119bb37ef0SEric Dumazet sock_net(sk)->ipv4.sysctl_tcp_window_scaling, 331231d12926Slaurent chavey &rcv_wscale, 3313*13d3b1ebSLawrence Brakmo rcv_wnd); 33141da177e4SLinus Torvalds 33151da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 33161da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 33171da177e4SLinus Torvalds 33181da177e4SLinus Torvalds sk->sk_err = 0; 33191da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 33201da177e4SLinus Torvalds tp->snd_wnd = 0; 3321ee7537b6SHantzis Fotis tcp_init_wl(tp, 0); 33221da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 33231da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 332433f5f57eSIlpo Järvinen tp->snd_up = tp->write_seq; 3325370816aeSPavel Emelyanov tp->snd_nxt = tp->write_seq; 3326ee995283SPavel Emelyanov 3327ee995283SPavel Emelyanov if (likely(!tp->repair)) 33281da177e4SLinus Torvalds tp->rcv_nxt = 0; 3329c7781a6eSAndrew Vagin else 333070eabf0eSEric Dumazet tp->rcv_tstamp = tcp_jiffies32; 3331ee995283SPavel Emelyanov tp->rcv_wup = tp->rcv_nxt; 3332ee995283SPavel Emelyanov tp->copied_seq = tp->rcv_nxt; 33331da177e4SLinus Torvalds 33348550f328SLawrence Brakmo inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); 3335463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 33361da177e4SLinus Torvalds tcp_clear_retrans(tp); 33371da177e4SLinus Torvalds } 33381da177e4SLinus Torvalds 3339783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 3340783237e8SYuchung Cheng { 3341783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 3342783237e8SYuchung Cheng struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 3343783237e8SYuchung Cheng 3344783237e8SYuchung Cheng tcb->end_seq += skb->len; 3345f4a775d1SEric Dumazet __skb_header_release(skb); 3346783237e8SYuchung Cheng __tcp_add_write_queue_tail(sk, skb); 3347783237e8SYuchung Cheng sk->sk_wmem_queued += skb->truesize; 3348783237e8SYuchung Cheng sk_mem_charge(sk, skb->truesize); 3349783237e8SYuchung Cheng tp->write_seq = tcb->end_seq; 3350783237e8SYuchung Cheng tp->packets_out += tcp_skb_pcount(skb); 3351783237e8SYuchung Cheng } 3352783237e8SYuchung Cheng 3353783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However, 3354783237e8SYuchung Cheng * queue a data-only packet after the regular SYN, such that regular SYNs 3355783237e8SYuchung Cheng * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 3356783237e8SYuchung Cheng * only the SYN sequence, the data are retransmitted in the first ACK. 3357783237e8SYuchung Cheng * If cookie is not cached or other error occurs, falls back to send a 3358783237e8SYuchung Cheng * regular SYN with Fast Open cookie request option. 3359783237e8SYuchung Cheng */ 3360783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 3361783237e8SYuchung Cheng { 3362783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 3363783237e8SYuchung Cheng struct tcp_fastopen_request *fo = tp->fastopen_req; 3364065263f4SWei Wang int space, err = 0; 3365355a901eSEric Dumazet struct sk_buff *syn_data; 3366783237e8SYuchung Cheng 336767da22d2SYuchung Cheng tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 3368065263f4SWei Wang if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) 3369783237e8SYuchung Cheng goto fallback; 3370783237e8SYuchung Cheng 3371783237e8SYuchung Cheng /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 3372783237e8SYuchung Cheng * user-MSS. Reserve maximum option space for middleboxes that add 3373783237e8SYuchung Cheng * private TCP options. The cost is reduced data space in SYN :( 3374783237e8SYuchung Cheng */ 33753541f9e8SEric Dumazet tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); 33763541f9e8SEric Dumazet 33771b63edd6SYuchung Cheng space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 3378783237e8SYuchung Cheng MAX_TCP_OPTION_SPACE; 3379783237e8SYuchung Cheng 3380f5ddcbbbSEric Dumazet space = min_t(size_t, space, fo->size); 3381f5ddcbbbSEric Dumazet 3382f5ddcbbbSEric Dumazet /* limit to order-0 allocations */ 3383f5ddcbbbSEric Dumazet space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); 3384f5ddcbbbSEric Dumazet 3385eb934478SEric Dumazet syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); 3386355a901eSEric Dumazet if (!syn_data) 3387783237e8SYuchung Cheng goto fallback; 3388355a901eSEric Dumazet syn_data->ip_summed = CHECKSUM_PARTIAL; 3389355a901eSEric Dumazet memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 339007e100f9SEric Dumazet if (space) { 339107e100f9SEric Dumazet int copied = copy_from_iter(skb_put(syn_data, space), space, 339257be5bdaSAl Viro &fo->data->msg_iter); 339357be5bdaSAl Viro if (unlikely(!copied)) { 3394355a901eSEric Dumazet kfree_skb(syn_data); 3395783237e8SYuchung Cheng goto fallback; 3396783237e8SYuchung Cheng } 339757be5bdaSAl Viro if (copied != space) { 339857be5bdaSAl Viro skb_trim(syn_data, copied); 339957be5bdaSAl Viro space = copied; 340057be5bdaSAl Viro } 340107e100f9SEric Dumazet } 3402355a901eSEric Dumazet /* No more data pending in inet_wait_for_connect() */ 3403355a901eSEric Dumazet if (space == fo->size) 3404355a901eSEric Dumazet fo->data = NULL; 3405355a901eSEric Dumazet fo->copied = space; 3406783237e8SYuchung Cheng 3407355a901eSEric Dumazet tcp_connect_queue_skb(sk, syn_data); 34080f87230dSFrancis Yan if (syn_data->len) 34090f87230dSFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_BUSY); 3410355a901eSEric Dumazet 3411355a901eSEric Dumazet err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); 3412355a901eSEric Dumazet 3413355a901eSEric Dumazet syn->skb_mstamp = syn_data->skb_mstamp; 3414355a901eSEric Dumazet 3415355a901eSEric Dumazet /* Now full SYN+DATA was cloned and sent (or not), 3416355a901eSEric Dumazet * remove the SYN from the original skb (syn_data) 3417355a901eSEric Dumazet * we keep in write queue in case of a retransmit, as we 3418355a901eSEric Dumazet * also have the SYN packet (with no data) in the same queue. 3419431a9124SEric Dumazet */ 3420355a901eSEric Dumazet TCP_SKB_CB(syn_data)->seq++; 3421355a901eSEric Dumazet TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; 3422355a901eSEric Dumazet if (!err) { 342367da22d2SYuchung Cheng tp->syn_data = (fo->copied > 0); 3424f19c29e3SYuchung Cheng NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); 3425783237e8SYuchung Cheng goto done; 3426783237e8SYuchung Cheng } 3427783237e8SYuchung Cheng 3428783237e8SYuchung Cheng fallback: 3429783237e8SYuchung Cheng /* Send a regular SYN with Fast Open cookie request option */ 3430783237e8SYuchung Cheng if (fo->cookie.len > 0) 3431783237e8SYuchung Cheng fo->cookie.len = 0; 3432783237e8SYuchung Cheng err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 3433783237e8SYuchung Cheng if (err) 3434783237e8SYuchung Cheng tp->syn_fastopen = 0; 3435783237e8SYuchung Cheng done: 3436783237e8SYuchung Cheng fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 3437783237e8SYuchung Cheng return err; 3438783237e8SYuchung Cheng } 3439783237e8SYuchung Cheng 344067edfef7SAndi Kleen /* Build a SYN and send it off. */ 34411da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 34421da177e4SLinus Torvalds { 34431da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 34441da177e4SLinus Torvalds struct sk_buff *buff; 3445ee586811SEric Paris int err; 34461da177e4SLinus Torvalds 34471da177e4SLinus Torvalds tcp_connect_init(sk); 34481da177e4SLinus Torvalds 34492b916477SAndrey Vagin if (unlikely(tp->repair)) { 34502b916477SAndrey Vagin tcp_finish_connect(sk, NULL); 34512b916477SAndrey Vagin return 0; 34522b916477SAndrey Vagin } 34532b916477SAndrey Vagin 3454eb934478SEric Dumazet buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); 3455355a901eSEric Dumazet if (unlikely(!buff)) 34561da177e4SLinus Torvalds return -ENOBUFS; 34571da177e4SLinus Torvalds 3458a3433f35SChangli Gao tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 34599a568de4SEric Dumazet tcp_mstamp_refresh(tp); 34609a568de4SEric Dumazet tp->retrans_stamp = tcp_time_stamp(tp); 3461783237e8SYuchung Cheng tcp_connect_queue_skb(sk, buff); 3462735d3831SFlorian Westphal tcp_ecn_send_syn(sk, buff); 34631da177e4SLinus Torvalds 3464783237e8SYuchung Cheng /* Send off SYN; include data in Fast Open. */ 3465783237e8SYuchung Cheng err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3466783237e8SYuchung Cheng tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 3467ee586811SEric Paris if (err == -ECONNREFUSED) 3468ee586811SEric Paris return err; 3469bd37a088SWei Yongjun 3470bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 3471bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 3472bd37a088SWei Yongjun */ 3473bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 3474bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 347581cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 34761da177e4SLinus Torvalds 34771da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 34783f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 34793f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 34801da177e4SLinus Torvalds return 0; 34811da177e4SLinus Torvalds } 34824bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect); 34831da177e4SLinus Torvalds 34841da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 34851da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 34861da177e4SLinus Torvalds * for details. 34871da177e4SLinus Torvalds */ 34881da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 34891da177e4SLinus Torvalds { 3490463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 3491463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 34921da177e4SLinus Torvalds unsigned long timeout; 34931da177e4SLinus Torvalds 34949890092eSFlorian Westphal tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); 34959890092eSFlorian Westphal 34961da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 3497463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 34981da177e4SLinus Torvalds int max_ato = HZ / 2; 34991da177e4SLinus Torvalds 3500056834d9SIlpo Järvinen if (icsk->icsk_ack.pingpong || 3501056834d9SIlpo Järvinen (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 35021da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 35031da177e4SLinus Torvalds 35041da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 35051da177e4SLinus Torvalds 35061da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 3507463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 35081da177e4SLinus Torvalds * directly. 35091da177e4SLinus Torvalds */ 3510740b0f18SEric Dumazet if (tp->srtt_us) { 3511740b0f18SEric Dumazet int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), 3512740b0f18SEric Dumazet TCP_DELACK_MIN); 35131da177e4SLinus Torvalds 35141da177e4SLinus Torvalds if (rtt < max_ato) 35151da177e4SLinus Torvalds max_ato = rtt; 35161da177e4SLinus Torvalds } 35171da177e4SLinus Torvalds 35181da177e4SLinus Torvalds ato = min(ato, max_ato); 35191da177e4SLinus Torvalds } 35201da177e4SLinus Torvalds 35211da177e4SLinus Torvalds /* Stay within the limit we were given */ 35221da177e4SLinus Torvalds timeout = jiffies + ato; 35231da177e4SLinus Torvalds 35241da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 3525463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 35261da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 35271da177e4SLinus Torvalds * send ACK now. 35281da177e4SLinus Torvalds */ 3529463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 3530463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 35311da177e4SLinus Torvalds tcp_send_ack(sk); 35321da177e4SLinus Torvalds return; 35331da177e4SLinus Torvalds } 35341da177e4SLinus Torvalds 3535463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 3536463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 35371da177e4SLinus Torvalds } 3538463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3539463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 3540463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 35411da177e4SLinus Torvalds } 35421da177e4SLinus Torvalds 35431da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 35441da177e4SLinus Torvalds void tcp_send_ack(struct sock *sk) 35451da177e4SLinus Torvalds { 35461da177e4SLinus Torvalds struct sk_buff *buff; 35471da177e4SLinus Torvalds 3548058dc334SIlpo Järvinen /* If we have been reset, we may not send again. */ 3549058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3550058dc334SIlpo Järvinen return; 3551058dc334SIlpo Järvinen 35529890092eSFlorian Westphal tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); 35539890092eSFlorian Westphal 35541da177e4SLinus Torvalds /* We are not putting this on the write queue, so 35551da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 35561da177e4SLinus Torvalds * sock. 35571da177e4SLinus Torvalds */ 35587450aaf6SEric Dumazet buff = alloc_skb(MAX_TCP_HEADER, 35597450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 35607450aaf6SEric Dumazet if (unlikely(!buff)) { 3561463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 3562463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 35633f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 35643f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 35651da177e4SLinus Torvalds return; 35661da177e4SLinus Torvalds } 35671da177e4SLinus Torvalds 35681da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 35691da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 3570a3433f35SChangli Gao tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 35711da177e4SLinus Torvalds 357298781965SEric Dumazet /* We do not want pure acks influencing TCP Small Queues or fq/pacing 357398781965SEric Dumazet * too much. 357498781965SEric Dumazet * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 357598781965SEric Dumazet */ 357698781965SEric Dumazet skb_set_tcp_pure_ack(buff); 357798781965SEric Dumazet 35781da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 35797450aaf6SEric Dumazet tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); 35801da177e4SLinus Torvalds } 3581e3118e83SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_send_ack); 35821da177e4SLinus Torvalds 35831da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 35841da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 35851da177e4SLinus Torvalds * 35861da177e4SLinus Torvalds * Question: what should we make while urgent mode? 35871da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 35881da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 35891da177e4SLinus Torvalds * 35901da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 35911da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 35921da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 35931da177e4SLinus Torvalds */ 3594e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) 35951da177e4SLinus Torvalds { 35961da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 35971da177e4SLinus Torvalds struct sk_buff *skb; 35981da177e4SLinus Torvalds 35991da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 36007450aaf6SEric Dumazet skb = alloc_skb(MAX_TCP_HEADER, 36017450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 360251456b29SIan Morris if (!skb) 36031da177e4SLinus Torvalds return -1; 36041da177e4SLinus Torvalds 36051da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 36061da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 36071da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 36081da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 36091da177e4SLinus Torvalds * send it. 36101da177e4SLinus Torvalds */ 3611a3433f35SChangli Gao tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3612e2e8009fSRenato Westphal NET_INC_STATS(sock_net(sk), mib); 36137450aaf6SEric Dumazet return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); 36141da177e4SLinus Torvalds } 36151da177e4SLinus Torvalds 3616385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */ 3617ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk) 3618ee995283SPavel Emelyanov { 3619ee995283SPavel Emelyanov if (sk->sk_state == TCP_ESTABLISHED) { 3620ee995283SPavel Emelyanov tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 36219a568de4SEric Dumazet tcp_mstamp_refresh(tcp_sk(sk)); 3622e520af48SEric Dumazet tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); 3623ee995283SPavel Emelyanov } 3624ee995283SPavel Emelyanov } 3625ee995283SPavel Emelyanov 362667edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */ 3627e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib) 36281da177e4SLinus Torvalds { 36291da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 36301da177e4SLinus Torvalds struct sk_buff *skb; 36311da177e4SLinus Torvalds 3632058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3633058dc334SIlpo Järvinen return -1; 3634058dc334SIlpo Järvinen 363500db4124SIan Morris skb = tcp_send_head(sk); 363600db4124SIan Morris if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 36371da177e4SLinus Torvalds int err; 36380c54b85fSIlpo Järvinen unsigned int mss = tcp_current_mss(sk); 363990840defSIlpo Järvinen unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 36401da177e4SLinus Torvalds 36411da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 36421da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 36431da177e4SLinus Torvalds 36441da177e4SLinus Torvalds /* We are probing the opening of a window 36451da177e4SLinus Torvalds * but the window size is != 0 36461da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 36471da177e4SLinus Torvalds */ 36481da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 36491da177e4SLinus Torvalds skb->len > mss) { 36501da177e4SLinus Torvalds seg_size = min(seg_size, mss); 36514de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 36526cc55e09SOctavian Purdila if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) 36531da177e4SLinus Torvalds return -1; 36541da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 36555bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss); 36561da177e4SLinus Torvalds 36574de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3658dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 365966f5fe62SIlpo Järvinen if (!err) 366066f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 36611da177e4SLinus Torvalds return err; 36621da177e4SLinus Torvalds } else { 366333f5f57eSIlpo Järvinen if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3664e520af48SEric Dumazet tcp_xmit_probe_skb(sk, 1, mib); 3665e520af48SEric Dumazet return tcp_xmit_probe_skb(sk, 0, mib); 36661da177e4SLinus Torvalds } 36671da177e4SLinus Torvalds } 36681da177e4SLinus Torvalds 36691da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 36701da177e4SLinus Torvalds * a partial packet else a zero probe. 36711da177e4SLinus Torvalds */ 36721da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 36731da177e4SLinus Torvalds { 3674463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 36751da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3676c6214a97SNikolay Borisov struct net *net = sock_net(sk); 3677fcdd1cf4SEric Dumazet unsigned long probe_max; 36781da177e4SLinus Torvalds int err; 36791da177e4SLinus Torvalds 3680e520af48SEric Dumazet err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); 36811da177e4SLinus Torvalds 3682fe067e8aSDavid S. Miller if (tp->packets_out || !tcp_send_head(sk)) { 36831da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 36846687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 3685463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 36861da177e4SLinus Torvalds return; 36871da177e4SLinus Torvalds } 36881da177e4SLinus Torvalds 36891da177e4SLinus Torvalds if (err <= 0) { 3690c6214a97SNikolay Borisov if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) 3691463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 36926687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out++; 3693fcdd1cf4SEric Dumazet probe_max = TCP_RTO_MAX; 36941da177e4SLinus Torvalds } else { 36951da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 36966687e988SArnaldo Carvalho de Melo * do not backoff and do not remember icsk_probes_out. 36971da177e4SLinus Torvalds * Let local senders to fight for local resources. 36981da177e4SLinus Torvalds * 36991da177e4SLinus Torvalds * Use accumulated backoff yet. 37001da177e4SLinus Torvalds */ 37016687e988SArnaldo Carvalho de Melo if (!icsk->icsk_probes_out) 37026687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 1; 3703fcdd1cf4SEric Dumazet probe_max = TCP_RESOURCE_PROBE_INTERVAL; 37041da177e4SLinus Torvalds } 3705fcdd1cf4SEric Dumazet inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 370621c8fe99SEric Dumazet tcp_probe0_when(sk, probe_max), 3707fcdd1cf4SEric Dumazet TCP_RTO_MAX); 37081da177e4SLinus Torvalds } 37095db92c99SOctavian Purdila 3710ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) 37115db92c99SOctavian Purdila { 37125db92c99SOctavian Purdila const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; 37135db92c99SOctavian Purdila struct flowi fl; 37145db92c99SOctavian Purdila int res; 37155db92c99SOctavian Purdila 371658d607d3SEric Dumazet tcp_rsk(req)->txhash = net_tx_rndhash(); 3717b3d05147SEric Dumazet res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); 37185db92c99SOctavian Purdila if (!res) { 371990bbcc60SEric Dumazet __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 372002a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 37217e32b443SYuchung Cheng if (unlikely(tcp_passive_fastopen(sk))) 37227e32b443SYuchung Cheng tcp_sk(sk)->total_retrans++; 37235db92c99SOctavian Purdila } 37245db92c99SOctavian Purdila return res; 37255db92c99SOctavian Purdila } 37265db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack); 3727