1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds /* 231da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 241da177e4SLinus Torvalds * : Fragmentation on mtu decrease 251da177e4SLinus Torvalds * : Segment collapse on retransmit 261da177e4SLinus Torvalds * : AF independence 271da177e4SLinus Torvalds * 281da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 291da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 301da177e4SLinus Torvalds * during syn/ack processing. 311da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 321da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 331da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 341da177e4SLinus Torvalds * J Hadi Salim : ECN support 351da177e4SLinus Torvalds * 361da177e4SLinus Torvalds */ 371da177e4SLinus Torvalds 3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt 3991df42beSJoe Perches 401da177e4SLinus Torvalds #include <net/tcp.h> 411da177e4SLinus Torvalds 421da177e4SLinus Torvalds #include <linux/compiler.h> 435a0e3ad6STejun Heo #include <linux/gfp.h> 441da177e4SLinus Torvalds #include <linux/module.h> 4560e2a778SUrsula Braun #include <linux/static_key.h> 461da177e4SLinus Torvalds 47e086101bSCong Wang #include <trace/events/tcp.h> 4835089bb2SDavid S. Miller 499799ccb0SEric Dumazet /* Refresh clocks of a TCP socket, 509799ccb0SEric Dumazet * ensuring monotically increasing values. 519799ccb0SEric Dumazet */ 529799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp) 539799ccb0SEric Dumazet { 549799ccb0SEric Dumazet u64 val = tcp_clock_ns(); 559799ccb0SEric Dumazet 565f6188a8SEric Dumazet tp->tcp_clock_cache = val; 57e6d14070SEric Dumazet tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); 589799ccb0SEric Dumazet } 599799ccb0SEric Dumazet 6046d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 6146d3ceabSEric Dumazet int push_one, gfp_t gfp); 62519855c5SWilliam Allen Simpson 6367edfef7SAndi Kleen /* Account for new data that has been sent to the network. */ 6475c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 656ff03ac3SIlpo Järvinen { 666ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 676ff03ac3SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 6866f5fe62SIlpo Järvinen unsigned int prior_packets = tp->packets_out; 699e412ba7SIlpo Järvinen 701da177e4SLinus Torvalds tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 718512430eSIlpo Järvinen 7275c119afSEric Dumazet __skb_unlink(skb, &sk->sk_write_queue); 7375c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); 7475c119afSEric Dumazet 7566f5fe62SIlpo Järvinen tp->packets_out += tcp_skb_pcount(skb); 76bec41a11SYuchung Cheng if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 77750ea2baSYuchung Cheng tcp_rearm_rto(sk); 78f19c29e3SYuchung Cheng 79f7324acdSDavid S. Miller NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, 80f19c29e3SYuchung Cheng tcp_skb_pcount(skb)); 816a5dc9e5SEric Dumazet } 821da177e4SLinus Torvalds 83a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one 84a4ecb15aSCui, Cheng * window scaling factor due to loss of precision. 851da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 861da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 871da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 881da177e4SLinus Torvalds * invalid. OK, let's make this for now: 891da177e4SLinus Torvalds */ 90cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk) 911da177e4SLinus Torvalds { 92cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 939e412ba7SIlpo Järvinen 94a4ecb15aSCui, Cheng if (!before(tcp_wnd_end(tp), tp->snd_nxt) || 95a4ecb15aSCui, Cheng (tp->rx_opt.wscale_ok && 96a4ecb15aSCui, Cheng ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) 971da177e4SLinus Torvalds return tp->snd_nxt; 981da177e4SLinus Torvalds else 9990840defSIlpo Järvinen return tcp_wnd_end(tp); 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 1031da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 1041da177e4SLinus Torvalds * 1051da177e4SLinus Torvalds * 1. It is independent of path mtu. 1061da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 1071da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 1081da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 1091da177e4SLinus Torvalds * large MSS. 1101da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 1111da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 1121da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1131da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1141da177e4SLinus Torvalds * probably even Jumbo". 1151da177e4SLinus Torvalds */ 1161da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1171da177e4SLinus Torvalds { 1181da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 119cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1201da177e4SLinus Torvalds int mss = tp->advmss; 1211da177e4SLinus Torvalds 1220dbaee3bSDavid S. Miller if (dst) { 1230dbaee3bSDavid S. Miller unsigned int metric = dst_metric_advmss(dst); 1240dbaee3bSDavid S. Miller 1250dbaee3bSDavid S. Miller if (metric < mss) { 1260dbaee3bSDavid S. Miller mss = metric; 1271da177e4SLinus Torvalds tp->advmss = mss; 1281da177e4SLinus Torvalds } 1290dbaee3bSDavid S. Miller } 1301da177e4SLinus Torvalds 1311da177e4SLinus Torvalds return (__u16)mss; 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1356f021c62SEric Dumazet * This is the first part of cwnd validation mechanism. 1366f021c62SEric Dumazet */ 1376f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta) 1381da177e4SLinus Torvalds { 139463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1406f021c62SEric Dumazet u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 1411da177e4SLinus Torvalds u32 cwnd = tp->snd_cwnd; 1421da177e4SLinus Torvalds 1436687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1441da177e4SLinus Torvalds 1456687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1461da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1471da177e4SLinus Torvalds 148463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1491da177e4SLinus Torvalds cwnd >>= 1; 1501da177e4SLinus Torvalds tp->snd_cwnd = max(cwnd, restart_cwnd); 151c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 1521da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1531da177e4SLinus Torvalds } 1541da177e4SLinus Torvalds 15567edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */ 15640efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 157cf533ea5SEric Dumazet struct sock *sk) 1581da177e4SLinus Torvalds { 159463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 160d635fbe2SEric Dumazet const u32 now = tcp_jiffies32; 1611da177e4SLinus Torvalds 16205c5a46dSNeal Cardwell if (tcp_packets_in_flight(tp) == 0) 16305c5a46dSNeal Cardwell tcp_ca_event(sk, CA_EVENT_TX_START); 16405c5a46dSNeal Cardwell 1654a41f453SWei Wang /* If this is the first data packet sent in response to the 1664a41f453SWei Wang * previous received data, 1674a41f453SWei Wang * and it is a reply for ato after last received packet, 1684a41f453SWei Wang * increase pingpong count. 1691da177e4SLinus Torvalds */ 1704a41f453SWei Wang if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) && 1714a41f453SWei Wang (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 1724a41f453SWei Wang inet_csk_inc_pingpong_cnt(sk); 1734a41f453SWei Wang 1744a41f453SWei Wang tp->lsndtime = now; 1751da177e4SLinus Torvalds } 1761da177e4SLinus Torvalds 17767edfef7SAndi Kleen /* Account for an ACK we sent. */ 17827cde44aSYuchung Cheng static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, 17927cde44aSYuchung Cheng u32 rcv_nxt) 1801da177e4SLinus Torvalds { 1815d9f4262SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 1825d9f4262SEric Dumazet 18386de5921SEric Dumazet if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { 184200d95f4SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, 18586de5921SEric Dumazet tp->compressed_ack - TCP_FASTRETRANS_THRESH); 18686de5921SEric Dumazet tp->compressed_ack = TCP_FASTRETRANS_THRESH; 1875d9f4262SEric Dumazet if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 1885d9f4262SEric Dumazet __sock_put(sk); 1895d9f4262SEric Dumazet } 19027cde44aSYuchung Cheng 19127cde44aSYuchung Cheng if (unlikely(rcv_nxt != tp->rcv_nxt)) 19227cde44aSYuchung Cheng return; /* Special ACK sent by DCTCP to reflect ECN */ 193463c84b9SArnaldo Carvalho de Melo tcp_dec_quickack_mode(sk, pkts); 194463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 1971da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 1981da177e4SLinus Torvalds * Based on the assumption that the given amount of space 1991da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 2001da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 2011da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 2021da177e4SLinus Torvalds * This MUST be enforced by all callers. 2031da177e4SLinus Torvalds */ 204ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, 2051da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 20631d12926Slaurent chavey int wscale_ok, __u8 *rcv_wscale, 20731d12926Slaurent chavey __u32 init_rcv_wnd) 2081da177e4SLinus Torvalds { 2091da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 2121da177e4SLinus Torvalds if (*window_clamp == 0) 213589c49cbSGao Feng (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE); 2141da177e4SLinus Torvalds space = min(*window_clamp, space); 2151da177e4SLinus Torvalds 2161da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 2171da177e4SLinus Torvalds if (space > mss) 218589c49cbSGao Feng space = rounddown(space, mss); 2191da177e4SLinus Torvalds 2201da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 22115d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 22215d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 22315d99e02SRick Jones * we will truncate our initial window offering to 32K-1 22415d99e02SRick Jones * unless the remote has sent us a window scaling option, 22515d99e02SRick Jones * which we interpret as a sign the remote TCP is not 22615d99e02SRick Jones * misinterpreting the window field as a signed quantity. 2271da177e4SLinus Torvalds */ 228ceef9ab6SEric Dumazet if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) 2291da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 23015d99e02SRick Jones else 231a337531bSYuchung Cheng (*rcv_wnd) = min_t(u32, space, U16_MAX); 232a337531bSYuchung Cheng 233a337531bSYuchung Cheng if (init_rcv_wnd) 234a337531bSYuchung Cheng *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 23515d99e02SRick Jones 23619bf6261SEric Dumazet *rcv_wscale = 0; 2371da177e4SLinus Torvalds if (wscale_ok) { 238589c49cbSGao Feng /* Set window scaling on max possible window */ 239356d1833SEric Dumazet space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 240f626300aSSoheil Hassas Yeganeh space = max_t(u32, space, sysctl_rmem_max); 241316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 24219bf6261SEric Dumazet *rcv_wscale = clamp_t(int, ilog2(space) - 15, 24319bf6261SEric Dumazet 0, TCP_MAX_WSCALE); 2441da177e4SLinus Torvalds } 2451da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 246589c49cbSGao Feng (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); 2471da177e4SLinus Torvalds } 2484bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window); 2491da177e4SLinus Torvalds 2501da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2511da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2521da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2531da177e4SLinus Torvalds * frame. 2541da177e4SLinus Torvalds */ 25540efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2561da177e4SLinus Torvalds { 2571da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2588e165e20SFlorian Westphal u32 old_win = tp->rcv_wnd; 2591da177e4SLinus Torvalds u32 cur_win = tcp_receive_window(tp); 2601da177e4SLinus Torvalds u32 new_win = __tcp_select_window(sk); 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds /* Never shrink the offered window */ 2631da177e4SLinus Torvalds if (new_win < cur_win) { 2641da177e4SLinus Torvalds /* Danger Will Robinson! 2651da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2661da177e4SLinus Torvalds * we will not be able to advertise a zero 2671da177e4SLinus Torvalds * window in time. --DaveM 2681da177e4SLinus Torvalds * 2691da177e4SLinus Torvalds * Relax Will Robinson. 2701da177e4SLinus Torvalds */ 2718e165e20SFlorian Westphal if (new_win == 0) 2728e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), 2738e165e20SFlorian Westphal LINUX_MIB_TCPWANTZEROWINDOWADV); 274607bfbf2SPatrick McHardy new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 2751da177e4SLinus Torvalds } 2761da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2771da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2801da177e4SLinus Torvalds * scaled window. 2811da177e4SLinus Torvalds */ 282ceef9ab6SEric Dumazet if (!tp->rx_opt.rcv_wscale && 283ceef9ab6SEric Dumazet sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows) 2841da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2851da177e4SLinus Torvalds else 2861da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2871da177e4SLinus Torvalds 2881da177e4SLinus Torvalds /* RFC1323 scaling applied */ 2891da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 2901da177e4SLinus Torvalds 29131770e34SFlorian Westphal /* If we advertise zero window, disable fast path. */ 2928e165e20SFlorian Westphal if (new_win == 0) { 29331770e34SFlorian Westphal tp->pred_flags = 0; 2948e165e20SFlorian Westphal if (old_win) 2958e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), 2968e165e20SFlorian Westphal LINUX_MIB_TCPTOZEROWINDOWADV); 2978e165e20SFlorian Westphal } else if (old_win == 0) { 2988e165e20SFlorian Westphal NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); 2998e165e20SFlorian Westphal } 3001da177e4SLinus Torvalds 3011da177e4SLinus Torvalds return new_win; 3021da177e4SLinus Torvalds } 3031da177e4SLinus Torvalds 30467edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */ 305735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) 306bdf1ee5dSIlpo Järvinen { 30730e502a3SDaniel Borkmann const struct tcp_sock *tp = tcp_sk(sk); 30830e502a3SDaniel Borkmann 3094de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 310bdf1ee5dSIlpo Järvinen if (!(tp->ecn_flags & TCP_ECN_OK)) 3114de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 31291b5b21cSLawrence Brakmo else if (tcp_ca_needs_ecn(sk) || 31391b5b21cSLawrence Brakmo tcp_bpf_ca_needs_ecn(sk)) 31430e502a3SDaniel Borkmann INET_ECN_xmit(sk); 315bdf1ee5dSIlpo Järvinen } 316bdf1ee5dSIlpo Järvinen 31767edfef7SAndi Kleen /* Packet ECN state for a SYN. */ 318735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) 319bdf1ee5dSIlpo Järvinen { 320bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 32191b5b21cSLawrence Brakmo bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); 322f7b3bec6SFlorian Westphal bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || 32391b5b21cSLawrence Brakmo tcp_ca_needs_ecn(sk) || bpf_needs_ecn; 324f7b3bec6SFlorian Westphal 325f7b3bec6SFlorian Westphal if (!use_ecn) { 326f7b3bec6SFlorian Westphal const struct dst_entry *dst = __sk_dst_get(sk); 327f7b3bec6SFlorian Westphal 328f7b3bec6SFlorian Westphal if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) 329f7b3bec6SFlorian Westphal use_ecn = true; 330f7b3bec6SFlorian Westphal } 331bdf1ee5dSIlpo Järvinen 332bdf1ee5dSIlpo Järvinen tp->ecn_flags = 0; 333f7b3bec6SFlorian Westphal 334f7b3bec6SFlorian Westphal if (use_ecn) { 3354de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 336bdf1ee5dSIlpo Järvinen tp->ecn_flags = TCP_ECN_OK; 33791b5b21cSLawrence Brakmo if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn) 33830e502a3SDaniel Borkmann INET_ECN_xmit(sk); 339bdf1ee5dSIlpo Järvinen } 340bdf1ee5dSIlpo Järvinen } 341bdf1ee5dSIlpo Järvinen 34249213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) 34349213555SDaniel Borkmann { 34449213555SDaniel Borkmann if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) 34549213555SDaniel Borkmann /* tp->ecn_flags are cleared at a later point in time when 34649213555SDaniel Borkmann * SYN ACK is ultimatively being received. 34749213555SDaniel Borkmann */ 34849213555SDaniel Borkmann TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); 34949213555SDaniel Borkmann } 35049213555SDaniel Borkmann 351735d3831SFlorian Westphal static void 3526ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th) 353bdf1ee5dSIlpo Järvinen { 3546ac705b1SEric Dumazet if (inet_rsk(req)->ecn_ok) 355bdf1ee5dSIlpo Järvinen th->ece = 1; 356bdf1ee5dSIlpo Järvinen } 357bdf1ee5dSIlpo Järvinen 35867edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 35967edfef7SAndi Kleen * be sent. 36067edfef7SAndi Kleen */ 361735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, 362ea1627c2SEric Dumazet struct tcphdr *th, int tcp_header_len) 363bdf1ee5dSIlpo Järvinen { 364bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 365bdf1ee5dSIlpo Järvinen 366bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 367bdf1ee5dSIlpo Järvinen /* Not-retransmitted data segment: set ECT and inject CWR. */ 368bdf1ee5dSIlpo Järvinen if (skb->len != tcp_header_len && 369bdf1ee5dSIlpo Järvinen !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 370bdf1ee5dSIlpo Järvinen INET_ECN_xmit(sk); 371bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 372bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 373ea1627c2SEric Dumazet th->cwr = 1; 374bdf1ee5dSIlpo Järvinen skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 375bdf1ee5dSIlpo Järvinen } 37630e502a3SDaniel Borkmann } else if (!tcp_ca_needs_ecn(sk)) { 377bdf1ee5dSIlpo Järvinen /* ACK or retransmitted segment: clear ECT|CE */ 378bdf1ee5dSIlpo Järvinen INET_ECN_dontxmit(sk); 379bdf1ee5dSIlpo Järvinen } 380bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 381ea1627c2SEric Dumazet th->ece = 1; 382bdf1ee5dSIlpo Järvinen } 383bdf1ee5dSIlpo Järvinen } 384bdf1ee5dSIlpo Järvinen 385e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present, 386e870a8efSIlpo Järvinen * auto increment end seqno. 387e870a8efSIlpo Järvinen */ 388e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 389e870a8efSIlpo Järvinen { 3902e8e18efSDavid S. Miller skb->ip_summed = CHECKSUM_PARTIAL; 391e870a8efSIlpo Järvinen 3924de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags; 393e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->sacked = 0; 394e870a8efSIlpo Järvinen 395cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 1); 396e870a8efSIlpo Järvinen 397e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->seq = seq; 398a3433f35SChangli Gao if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 399e870a8efSIlpo Järvinen seq++; 400e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->end_seq = seq; 401e870a8efSIlpo Järvinen } 402e870a8efSIlpo Järvinen 403a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp) 40433f5f57eSIlpo Järvinen { 40533f5f57eSIlpo Järvinen return tp->snd_una != tp->snd_up; 40633f5f57eSIlpo Järvinen } 40733f5f57eSIlpo Järvinen 40833ad798cSAdam Langley #define OPTION_SACK_ADVERTISE (1 << 0) 40933ad798cSAdam Langley #define OPTION_TS (1 << 1) 41033ad798cSAdam Langley #define OPTION_MD5 (1 << 2) 41189e95a61SOri Finkelman #define OPTION_WSCALE (1 << 3) 4122100c8d2SYuchung Cheng #define OPTION_FAST_OPEN_COOKIE (1 << 8) 41360e2a778SUrsula Braun #define OPTION_SMC (1 << 9) 41460e2a778SUrsula Braun 41560e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options) 41660e2a778SUrsula Braun { 41760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 41860e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) { 41960e2a778SUrsula Braun if (unlikely(OPTION_SMC & *options)) { 42060e2a778SUrsula Braun *ptr++ = htonl((TCPOPT_NOP << 24) | 42160e2a778SUrsula Braun (TCPOPT_NOP << 16) | 42260e2a778SUrsula Braun (TCPOPT_EXP << 8) | 42360e2a778SUrsula Braun (TCPOLEN_EXP_SMC_BASE)); 42460e2a778SUrsula Braun *ptr++ = htonl(TCPOPT_SMC_MAGIC); 42560e2a778SUrsula Braun } 42660e2a778SUrsula Braun } 42760e2a778SUrsula Braun #endif 42860e2a778SUrsula Braun } 42933ad798cSAdam Langley 43033ad798cSAdam Langley struct tcp_out_options { 4312100c8d2SYuchung Cheng u16 options; /* bit field of OPTION_* */ 4322100c8d2SYuchung Cheng u16 mss; /* 0 to disable */ 43333ad798cSAdam Langley u8 ws; /* window scale, 0 to disable */ 43433ad798cSAdam Langley u8 num_sack_blocks; /* number of SACK blocks to include */ 435bd0388aeSWilliam Allen Simpson u8 hash_size; /* bytes in hash_location */ 436bd0388aeSWilliam Allen Simpson __u8 *hash_location; /* temporary pointer, overloaded */ 4372100c8d2SYuchung Cheng __u32 tsval, tsecr; /* need to include OPTION_TS */ 4382100c8d2SYuchung Cheng struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 43933ad798cSAdam Langley }; 44033ad798cSAdam Langley 44167edfef7SAndi Kleen /* Write previously computed TCP options to the packet. 44267edfef7SAndi Kleen * 44367edfef7SAndi Kleen * Beware: Something in the Internet is very sensitive to the ordering of 444fd6149d3SIlpo Järvinen * TCP options, we learned this through the hard way, so be careful here. 445fd6149d3SIlpo Järvinen * Luckily we can at least blame others for their non-compliance but from 4468e3bff96Sstephen hemminger * inter-operability perspective it seems that we're somewhat stuck with 447fd6149d3SIlpo Järvinen * the ordering which we have been using if we want to keep working with 448fd6149d3SIlpo Järvinen * those broken things (not that it currently hurts anybody as there isn't 449fd6149d3SIlpo Järvinen * particular reason why the ordering would need to be changed). 450fd6149d3SIlpo Järvinen * 451fd6149d3SIlpo Järvinen * At least SACK_PERM as the first option is known to lead to a disaster 452fd6149d3SIlpo Järvinen * (but it may well be that other scenarios fail similarly). 453fd6149d3SIlpo Järvinen */ 45433ad798cSAdam Langley static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 455bd0388aeSWilliam Allen Simpson struct tcp_out_options *opts) 456bd0388aeSWilliam Allen Simpson { 4572100c8d2SYuchung Cheng u16 options = opts->options; /* mungable copy */ 458bd0388aeSWilliam Allen Simpson 459bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_MD5 & options)) { 4601a2c6181SChristoph Paasch *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 4611a2c6181SChristoph Paasch (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 462bd0388aeSWilliam Allen Simpson /* overload cookie hash location */ 463bd0388aeSWilliam Allen Simpson opts->hash_location = (__u8 *)ptr; 46433ad798cSAdam Langley ptr += 4; 46533ad798cSAdam Langley } 46633ad798cSAdam Langley 467fd6149d3SIlpo Järvinen if (unlikely(opts->mss)) { 468fd6149d3SIlpo Järvinen *ptr++ = htonl((TCPOPT_MSS << 24) | 469fd6149d3SIlpo Järvinen (TCPOLEN_MSS << 16) | 470fd6149d3SIlpo Järvinen opts->mss); 471fd6149d3SIlpo Järvinen } 472fd6149d3SIlpo Järvinen 473bd0388aeSWilliam Allen Simpson if (likely(OPTION_TS & options)) { 474bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 47533ad798cSAdam Langley *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 47633ad798cSAdam Langley (TCPOLEN_SACK_PERM << 16) | 47733ad798cSAdam Langley (TCPOPT_TIMESTAMP << 8) | 47833ad798cSAdam Langley TCPOLEN_TIMESTAMP); 479bd0388aeSWilliam Allen Simpson options &= ~OPTION_SACK_ADVERTISE; 48033ad798cSAdam Langley } else { 481496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 48240efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 48340efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 48440efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 48540efc6faSStephen Hemminger } 48633ad798cSAdam Langley *ptr++ = htonl(opts->tsval); 48733ad798cSAdam Langley *ptr++ = htonl(opts->tsecr); 48833ad798cSAdam Langley } 48933ad798cSAdam Langley 490bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 49133ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 49233ad798cSAdam Langley (TCPOPT_NOP << 16) | 49333ad798cSAdam Langley (TCPOPT_SACK_PERM << 8) | 49433ad798cSAdam Langley TCPOLEN_SACK_PERM); 49533ad798cSAdam Langley } 49633ad798cSAdam Langley 497bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_WSCALE & options)) { 49833ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 49933ad798cSAdam Langley (TCPOPT_WINDOW << 16) | 50033ad798cSAdam Langley (TCPOLEN_WINDOW << 8) | 50133ad798cSAdam Langley opts->ws); 50233ad798cSAdam Langley } 50333ad798cSAdam Langley 50433ad798cSAdam Langley if (unlikely(opts->num_sack_blocks)) { 50533ad798cSAdam Langley struct tcp_sack_block *sp = tp->rx_opt.dsack ? 50633ad798cSAdam Langley tp->duplicate_sack : tp->selective_acks; 50740efc6faSStephen Hemminger int this_sack; 50840efc6faSStephen Hemminger 50940efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 51040efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 51140efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 51233ad798cSAdam Langley (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 51340efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 5142de979bdSStephen Hemminger 51533ad798cSAdam Langley for (this_sack = 0; this_sack < opts->num_sack_blocks; 51633ad798cSAdam Langley ++this_sack) { 51740efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 51840efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 51940efc6faSStephen Hemminger } 5202de979bdSStephen Hemminger 52140efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 52240efc6faSStephen Hemminger } 5232100c8d2SYuchung Cheng 5242100c8d2SYuchung Cheng if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 5252100c8d2SYuchung Cheng struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 5267f9b838bSDaniel Lee u8 *p = (u8 *)ptr; 5277f9b838bSDaniel Lee u32 len; /* Fast Open option length */ 5282100c8d2SYuchung Cheng 5297f9b838bSDaniel Lee if (foc->exp) { 5307f9b838bSDaniel Lee len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 5317f9b838bSDaniel Lee *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | 5322100c8d2SYuchung Cheng TCPOPT_FASTOPEN_MAGIC); 5337f9b838bSDaniel Lee p += TCPOLEN_EXP_FASTOPEN_BASE; 5347f9b838bSDaniel Lee } else { 5357f9b838bSDaniel Lee len = TCPOLEN_FASTOPEN_BASE + foc->len; 5367f9b838bSDaniel Lee *p++ = TCPOPT_FASTOPEN; 5377f9b838bSDaniel Lee *p++ = len; 5382100c8d2SYuchung Cheng } 5397f9b838bSDaniel Lee 5407f9b838bSDaniel Lee memcpy(p, foc->val, foc->len); 5417f9b838bSDaniel Lee if ((len & 3) == 2) { 5427f9b838bSDaniel Lee p[foc->len] = TCPOPT_NOP; 5437f9b838bSDaniel Lee p[foc->len + 1] = TCPOPT_NOP; 5447f9b838bSDaniel Lee } 5457f9b838bSDaniel Lee ptr += (len + 3) >> 2; 5462100c8d2SYuchung Cheng } 54760e2a778SUrsula Braun 54860e2a778SUrsula Braun smc_options_write(ptr, &options); 54960e2a778SUrsula Braun } 55060e2a778SUrsula Braun 55160e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp, 55260e2a778SUrsula Braun struct tcp_out_options *opts, 55360e2a778SUrsula Braun unsigned int *remaining) 55460e2a778SUrsula Braun { 55560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 55660e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) { 55760e2a778SUrsula Braun if (tp->syn_smc) { 55860e2a778SUrsula Braun if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { 55960e2a778SUrsula Braun opts->options |= OPTION_SMC; 56060e2a778SUrsula Braun *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; 56160e2a778SUrsula Braun } 56260e2a778SUrsula Braun } 56360e2a778SUrsula Braun } 56460e2a778SUrsula Braun #endif 56560e2a778SUrsula Braun } 56660e2a778SUrsula Braun 56760e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp, 56860e2a778SUrsula Braun const struct inet_request_sock *ireq, 56960e2a778SUrsula Braun struct tcp_out_options *opts, 57060e2a778SUrsula Braun unsigned int *remaining) 57160e2a778SUrsula Braun { 57260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 57360e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) { 57460e2a778SUrsula Braun if (tp->syn_smc && ireq->smc_ok) { 57560e2a778SUrsula Braun if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { 57660e2a778SUrsula Braun opts->options |= OPTION_SMC; 57760e2a778SUrsula Braun *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; 57860e2a778SUrsula Braun } 57960e2a778SUrsula Braun } 58060e2a778SUrsula Braun } 58160e2a778SUrsula Braun #endif 58240efc6faSStephen Hemminger } 58340efc6faSStephen Hemminger 58467edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final 58567edfef7SAndi Kleen * network wire format yet. 58667edfef7SAndi Kleen */ 58795c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 58833ad798cSAdam Langley struct tcp_out_options *opts, 589cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 590cf533ea5SEric Dumazet { 59133ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 59295c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 593783237e8SYuchung Cheng struct tcp_fastopen_request *fastopen = tp->fastopen_req; 59433ad798cSAdam Langley 5958c2320e8SEric Dumazet *md5 = NULL; 596cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 597921f9a0fSEric Dumazet if (static_branch_unlikely(&tcp_md5_needed) && 5986015c71eSEric Dumazet rcu_access_pointer(tp->md5sig_info)) { 59933ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 60033ad798cSAdam Langley if (*md5) { 60133ad798cSAdam Langley opts->options |= OPTION_MD5; 602bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 603cfb6eeb4SYOSHIFUJI Hideaki } 6048c2320e8SEric Dumazet } 605cfb6eeb4SYOSHIFUJI Hideaki #endif 60633ad798cSAdam Langley 60733ad798cSAdam Langley /* We always get an MSS option. The option bytes which will be seen in 60833ad798cSAdam Langley * normal data packets should timestamps be used, must be in the MSS 60933ad798cSAdam Langley * advertised. But we subtract them from tp->mss_cache so that 61033ad798cSAdam Langley * calculations in tcp_sendmsg are simpler etc. So account for this 61133ad798cSAdam Langley * fact here if necessary. If we don't do this correctly, as a 61233ad798cSAdam Langley * receiver we won't recognize data packets as being full sized when we 61333ad798cSAdam Langley * should, and thus we won't abide by the delayed ACK rules correctly. 61433ad798cSAdam Langley * SACKs don't matter, we never delay an ACK when we have any of those 61533ad798cSAdam Langley * going out. */ 61633ad798cSAdam Langley opts->mss = tcp_advertise_mss(sk); 617bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 61833ad798cSAdam Langley 6195d2ed052SEric Dumazet if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { 62033ad798cSAdam Langley opts->options |= OPTION_TS; 6217faee5c0SEric Dumazet opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; 62233ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 623bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 62433ad798cSAdam Langley } 6259bb37ef0SEric Dumazet if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { 62633ad798cSAdam Langley opts->ws = tp->rx_opt.rcv_wscale; 62789e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 628bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 62933ad798cSAdam Langley } 630f9301034SEric Dumazet if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { 63133ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 632b32d1310SDavid S. Miller if (unlikely(!(OPTION_TS & opts->options))) 633bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 63433ad798cSAdam Langley } 63533ad798cSAdam Langley 636783237e8SYuchung Cheng if (fastopen && fastopen->cookie.len >= 0) { 6372646c831SDaniel Lee u32 need = fastopen->cookie.len; 6382646c831SDaniel Lee 6392646c831SDaniel Lee need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : 6402646c831SDaniel Lee TCPOLEN_FASTOPEN_BASE; 641783237e8SYuchung Cheng need = (need + 3) & ~3U; /* Align to 32 bits */ 642783237e8SYuchung Cheng if (remaining >= need) { 643783237e8SYuchung Cheng opts->options |= OPTION_FAST_OPEN_COOKIE; 644783237e8SYuchung Cheng opts->fastopen_cookie = &fastopen->cookie; 645783237e8SYuchung Cheng remaining -= need; 646783237e8SYuchung Cheng tp->syn_fastopen = 1; 6472646c831SDaniel Lee tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; 648783237e8SYuchung Cheng } 649783237e8SYuchung Cheng } 650bd0388aeSWilliam Allen Simpson 65160e2a778SUrsula Braun smc_set_option(tp, opts, &remaining); 65260e2a778SUrsula Braun 653bd0388aeSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 65433ad798cSAdam Langley } 65533ad798cSAdam Langley 65667edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */ 65760e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk, 65860e2a778SUrsula Braun struct request_sock *req, 65995c96174SEric Dumazet unsigned int mss, struct sk_buff *skb, 66033ad798cSAdam Langley struct tcp_out_options *opts, 66180f03e27SEric Dumazet const struct tcp_md5sig_key *md5, 6628336886fSJerry Chu struct tcp_fastopen_cookie *foc) 6634957faadSWilliam Allen Simpson { 66433ad798cSAdam Langley struct inet_request_sock *ireq = inet_rsk(req); 66595c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 66633ad798cSAdam Langley 66733ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 66880f03e27SEric Dumazet if (md5) { 66933ad798cSAdam Langley opts->options |= OPTION_MD5; 6704957faadSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 6714957faadSWilliam Allen Simpson 6724957faadSWilliam Allen Simpson /* We can't fit any SACK blocks in a packet with MD5 + TS 6734957faadSWilliam Allen Simpson * options. There was discussion about disabling SACK 6744957faadSWilliam Allen Simpson * rather than TS in order to fit in better with old, 6754957faadSWilliam Allen Simpson * buggy kernels, but that was deemed to be unnecessary. 6764957faadSWilliam Allen Simpson */ 677de213e5eSEric Dumazet ireq->tstamp_ok &= !ireq->sack_ok; 67833ad798cSAdam Langley } 67933ad798cSAdam Langley #endif 68033ad798cSAdam Langley 6814957faadSWilliam Allen Simpson /* We always send an MSS option. */ 68233ad798cSAdam Langley opts->mss = mss; 6834957faadSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 68433ad798cSAdam Langley 68533ad798cSAdam Langley if (likely(ireq->wscale_ok)) { 68633ad798cSAdam Langley opts->ws = ireq->rcv_wscale; 68789e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 6884957faadSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 68933ad798cSAdam Langley } 690de213e5eSEric Dumazet if (likely(ireq->tstamp_ok)) { 69133ad798cSAdam Langley opts->options |= OPTION_TS; 69295a22caeSFlorian Westphal opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off; 69333ad798cSAdam Langley opts->tsecr = req->ts_recent; 6944957faadSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 69533ad798cSAdam Langley } 69633ad798cSAdam Langley if (likely(ireq->sack_ok)) { 69733ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 698de213e5eSEric Dumazet if (unlikely(!ireq->tstamp_ok)) 6994957faadSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 70033ad798cSAdam Langley } 7017f9b838bSDaniel Lee if (foc != NULL && foc->len >= 0) { 7027f9b838bSDaniel Lee u32 need = foc->len; 7037f9b838bSDaniel Lee 7047f9b838bSDaniel Lee need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : 7057f9b838bSDaniel Lee TCPOLEN_FASTOPEN_BASE; 7068336886fSJerry Chu need = (need + 3) & ~3U; /* Align to 32 bits */ 7078336886fSJerry Chu if (remaining >= need) { 7088336886fSJerry Chu opts->options |= OPTION_FAST_OPEN_COOKIE; 7098336886fSJerry Chu opts->fastopen_cookie = foc; 7108336886fSJerry Chu remaining -= need; 7118336886fSJerry Chu } 7128336886fSJerry Chu } 7134957faadSWilliam Allen Simpson 71460e2a778SUrsula Braun smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); 71560e2a778SUrsula Braun 7164957faadSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 71733ad798cSAdam Langley } 71833ad798cSAdam Langley 71967edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the 72067edfef7SAndi Kleen * final wire format yet. 72167edfef7SAndi Kleen */ 72295c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 72333ad798cSAdam Langley struct tcp_out_options *opts, 724cf533ea5SEric Dumazet struct tcp_md5sig_key **md5) 725cf533ea5SEric Dumazet { 72633ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 72795c96174SEric Dumazet unsigned int size = 0; 728cabeccbdSIlpo Järvinen unsigned int eff_sacks; 72933ad798cSAdam Langley 7305843ef42SAndi Kleen opts->options = 0; 7315843ef42SAndi Kleen 7328c2320e8SEric Dumazet *md5 = NULL; 73333ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 734921f9a0fSEric Dumazet if (static_branch_unlikely(&tcp_md5_needed) && 7356015c71eSEric Dumazet rcu_access_pointer(tp->md5sig_info)) { 73633ad798cSAdam Langley *md5 = tp->af_specific->md5_lookup(sk, sk); 7378c2320e8SEric Dumazet if (*md5) { 73833ad798cSAdam Langley opts->options |= OPTION_MD5; 73933ad798cSAdam Langley size += TCPOLEN_MD5SIG_ALIGNED; 74033ad798cSAdam Langley } 7418c2320e8SEric Dumazet } 74233ad798cSAdam Langley #endif 74333ad798cSAdam Langley 74433ad798cSAdam Langley if (likely(tp->rx_opt.tstamp_ok)) { 74533ad798cSAdam Langley opts->options |= OPTION_TS; 7467faee5c0SEric Dumazet opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; 74733ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 74833ad798cSAdam Langley size += TCPOLEN_TSTAMP_ALIGNED; 74933ad798cSAdam Langley } 75033ad798cSAdam Langley 751cabeccbdSIlpo Järvinen eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 752cabeccbdSIlpo Järvinen if (unlikely(eff_sacks)) { 75395c96174SEric Dumazet const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 75433ad798cSAdam Langley opts->num_sack_blocks = 75595c96174SEric Dumazet min_t(unsigned int, eff_sacks, 75633ad798cSAdam Langley (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 75733ad798cSAdam Langley TCPOLEN_SACK_PERBLOCK); 75833ad798cSAdam Langley size += TCPOLEN_SACK_BASE_ALIGNED + 75933ad798cSAdam Langley opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 76033ad798cSAdam Langley } 76133ad798cSAdam Langley 76233ad798cSAdam Langley return size; 76340efc6faSStephen Hemminger } 7641da177e4SLinus Torvalds 76546d3ceabSEric Dumazet 76646d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ) 76746d3ceabSEric Dumazet * 76846d3ceabSEric Dumazet * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 76946d3ceabSEric Dumazet * to reduce RTT and bufferbloat. 77046d3ceabSEric Dumazet * We do this using a special skb destructor (tcp_wfree). 77146d3ceabSEric Dumazet * 77246d3ceabSEric Dumazet * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 77346d3ceabSEric Dumazet * needs to be reallocated in a driver. 7748e3bff96Sstephen hemminger * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 77546d3ceabSEric Dumazet * 77646d3ceabSEric Dumazet * Since transmit from skb destructor is forbidden, we use a tasklet 77746d3ceabSEric Dumazet * to process all sockets that eventually need to send more skbs. 77846d3ceabSEric Dumazet * We use one tasklet per cpu, with its own queue of sockets. 77946d3ceabSEric Dumazet */ 78046d3ceabSEric Dumazet struct tsq_tasklet { 78146d3ceabSEric Dumazet struct tasklet_struct tasklet; 78246d3ceabSEric Dumazet struct list_head head; /* queue of tcp sockets */ 78346d3ceabSEric Dumazet }; 78446d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 78546d3ceabSEric Dumazet 78673a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk) 7876f458dfbSEric Dumazet { 7886f458dfbSEric Dumazet if ((1 << sk->sk_state) & 7896f458dfbSEric Dumazet (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 790f9616c35SEric Dumazet TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) { 791f9616c35SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 792f9616c35SEric Dumazet 793f9616c35SEric Dumazet if (tp->lost_out > tp->retrans_out && 7943a91d29fSKoichiro Den tp->snd_cwnd > tcp_packets_in_flight(tp)) { 7953a91d29fSKoichiro Den tcp_mstamp_refresh(tp); 796f9616c35SEric Dumazet tcp_xmit_retransmit_queue(sk); 7973a91d29fSKoichiro Den } 798f9616c35SEric Dumazet 799f9616c35SEric Dumazet tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 800bf06200eSJohn Ogness 0, GFP_ATOMIC); 8016f458dfbSEric Dumazet } 802f9616c35SEric Dumazet } 80373a6bab5SEric Dumazet 80473a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk) 80573a6bab5SEric Dumazet { 80673a6bab5SEric Dumazet bh_lock_sock(sk); 80773a6bab5SEric Dumazet if (!sock_owned_by_user(sk)) 80873a6bab5SEric Dumazet tcp_tsq_write(sk); 80973a6bab5SEric Dumazet else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) 81073a6bab5SEric Dumazet sock_hold(sk); 81173a6bab5SEric Dumazet bh_unlock_sock(sk); 81273a6bab5SEric Dumazet } 81346d3ceabSEric Dumazet /* 8148e3bff96Sstephen hemminger * One tasklet per cpu tries to send more skbs. 81546d3ceabSEric Dumazet * We run in tasklet context but need to disable irqs when 8168e3bff96Sstephen hemminger * transferring tsq->head because tcp_wfree() might 81746d3ceabSEric Dumazet * interrupt us (non NAPI drivers) 81846d3ceabSEric Dumazet */ 81946d3ceabSEric Dumazet static void tcp_tasklet_func(unsigned long data) 82046d3ceabSEric Dumazet { 82146d3ceabSEric Dumazet struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 82246d3ceabSEric Dumazet LIST_HEAD(list); 82346d3ceabSEric Dumazet unsigned long flags; 82446d3ceabSEric Dumazet struct list_head *q, *n; 82546d3ceabSEric Dumazet struct tcp_sock *tp; 82646d3ceabSEric Dumazet struct sock *sk; 82746d3ceabSEric Dumazet 82846d3ceabSEric Dumazet local_irq_save(flags); 82946d3ceabSEric Dumazet list_splice_init(&tsq->head, &list); 83046d3ceabSEric Dumazet local_irq_restore(flags); 83146d3ceabSEric Dumazet 83246d3ceabSEric Dumazet list_for_each_safe(q, n, &list) { 83346d3ceabSEric Dumazet tp = list_entry(q, struct tcp_sock, tsq_node); 83446d3ceabSEric Dumazet list_del(&tp->tsq_node); 83546d3ceabSEric Dumazet 83646d3ceabSEric Dumazet sk = (struct sock *)tp; 8370a9648f1SEric Dumazet smp_mb__before_atomic(); 8387aa5470cSEric Dumazet clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); 8397aa5470cSEric Dumazet 8406f458dfbSEric Dumazet tcp_tsq_handler(sk); 84146d3ceabSEric Dumazet sk_free(sk); 84246d3ceabSEric Dumazet } 84346d3ceabSEric Dumazet } 84446d3ceabSEric Dumazet 84540fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ 84640fc3423SEric Dumazet TCPF_WRITE_TIMER_DEFERRED | \ 84740fc3423SEric Dumazet TCPF_DELACK_TIMER_DEFERRED | \ 84840fc3423SEric Dumazet TCPF_MTU_REDUCED_DEFERRED) 84946d3ceabSEric Dumazet /** 85046d3ceabSEric Dumazet * tcp_release_cb - tcp release_sock() callback 85146d3ceabSEric Dumazet * @sk: socket 85246d3ceabSEric Dumazet * 85346d3ceabSEric Dumazet * called from release_sock() to perform protocol dependent 85446d3ceabSEric Dumazet * actions before socket release. 85546d3ceabSEric Dumazet */ 85646d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk) 85746d3ceabSEric Dumazet { 8586f458dfbSEric Dumazet unsigned long flags, nflags; 85946d3ceabSEric Dumazet 8606f458dfbSEric Dumazet /* perform an atomic operation only if at least one flag is set */ 8616f458dfbSEric Dumazet do { 8627aa5470cSEric Dumazet flags = sk->sk_tsq_flags; 8636f458dfbSEric Dumazet if (!(flags & TCP_DEFERRED_ALL)) 8646f458dfbSEric Dumazet return; 8656f458dfbSEric Dumazet nflags = flags & ~TCP_DEFERRED_ALL; 8667aa5470cSEric Dumazet } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); 8676f458dfbSEric Dumazet 86873a6bab5SEric Dumazet if (flags & TCPF_TSQ_DEFERRED) { 86973a6bab5SEric Dumazet tcp_tsq_write(sk); 87073a6bab5SEric Dumazet __sock_put(sk); 87173a6bab5SEric Dumazet } 872c3f9b018SEric Dumazet /* Here begins the tricky part : 873c3f9b018SEric Dumazet * We are called from release_sock() with : 874c3f9b018SEric Dumazet * 1) BH disabled 875c3f9b018SEric Dumazet * 2) sk_lock.slock spinlock held 876c3f9b018SEric Dumazet * 3) socket owned by us (sk->sk_lock.owned == 1) 877c3f9b018SEric Dumazet * 878c3f9b018SEric Dumazet * But following code is meant to be called from BH handlers, 879c3f9b018SEric Dumazet * so we should keep BH disabled, but early release socket ownership 880c3f9b018SEric Dumazet */ 881c3f9b018SEric Dumazet sock_release_ownership(sk); 882c3f9b018SEric Dumazet 88340fc3423SEric Dumazet if (flags & TCPF_WRITE_TIMER_DEFERRED) { 8846f458dfbSEric Dumazet tcp_write_timer_handler(sk); 885144d56e9SEric Dumazet __sock_put(sk); 886144d56e9SEric Dumazet } 88740fc3423SEric Dumazet if (flags & TCPF_DELACK_TIMER_DEFERRED) { 8886f458dfbSEric Dumazet tcp_delack_timer_handler(sk); 889144d56e9SEric Dumazet __sock_put(sk); 890144d56e9SEric Dumazet } 89140fc3423SEric Dumazet if (flags & TCPF_MTU_REDUCED_DEFERRED) { 8924fab9071SNeal Cardwell inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); 893144d56e9SEric Dumazet __sock_put(sk); 894144d56e9SEric Dumazet } 89546d3ceabSEric Dumazet } 89646d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb); 89746d3ceabSEric Dumazet 89846d3ceabSEric Dumazet void __init tcp_tasklet_init(void) 89946d3ceabSEric Dumazet { 90046d3ceabSEric Dumazet int i; 90146d3ceabSEric Dumazet 90246d3ceabSEric Dumazet for_each_possible_cpu(i) { 90346d3ceabSEric Dumazet struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 90446d3ceabSEric Dumazet 90546d3ceabSEric Dumazet INIT_LIST_HEAD(&tsq->head); 90646d3ceabSEric Dumazet tasklet_init(&tsq->tasklet, 90746d3ceabSEric Dumazet tcp_tasklet_func, 90846d3ceabSEric Dumazet (unsigned long)tsq); 90946d3ceabSEric Dumazet } 91046d3ceabSEric Dumazet } 91146d3ceabSEric Dumazet 91246d3ceabSEric Dumazet /* 91346d3ceabSEric Dumazet * Write buffer destructor automatically called from kfree_skb. 9148e3bff96Sstephen hemminger * We can't xmit new skbs from this context, as we might already 91546d3ceabSEric Dumazet * hold qdisc lock. 91646d3ceabSEric Dumazet */ 917d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb) 91846d3ceabSEric Dumazet { 91946d3ceabSEric Dumazet struct sock *sk = skb->sk; 92046d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 921408f0a6cSEric Dumazet unsigned long flags, nval, oval; 9229b462d02SEric Dumazet 9239b462d02SEric Dumazet /* Keep one reference on sk_wmem_alloc. 9249b462d02SEric Dumazet * Will be released by sk_free() from here or tcp_tasklet_func() 9259b462d02SEric Dumazet */ 92614afee4bSReshetova, Elena WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); 9279b462d02SEric Dumazet 9289b462d02SEric Dumazet /* If this softirq is serviced by ksoftirqd, we are likely under stress. 9299b462d02SEric Dumazet * Wait until our queues (qdisc + devices) are drained. 9309b462d02SEric Dumazet * This gives : 9319b462d02SEric Dumazet * - less callbacks to tcp_write_xmit(), reducing stress (batches) 9329b462d02SEric Dumazet * - chance for incoming ACK (processed by another cpu maybe) 9339b462d02SEric Dumazet * to migrate this flow (skb->ooo_okay will be eventually set) 9349b462d02SEric Dumazet */ 93514afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 9369b462d02SEric Dumazet goto out; 93746d3ceabSEric Dumazet 9387aa5470cSEric Dumazet for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) { 93946d3ceabSEric Dumazet struct tsq_tasklet *tsq; 940a9b204d1SEric Dumazet bool empty; 94146d3ceabSEric Dumazet 942408f0a6cSEric Dumazet if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) 943408f0a6cSEric Dumazet goto out; 944408f0a6cSEric Dumazet 94573a6bab5SEric Dumazet nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; 9467aa5470cSEric Dumazet nval = cmpxchg(&sk->sk_tsq_flags, oval, nval); 947408f0a6cSEric Dumazet if (nval != oval) 948408f0a6cSEric Dumazet continue; 949408f0a6cSEric Dumazet 95046d3ceabSEric Dumazet /* queue this socket to tasklet queue */ 95146d3ceabSEric Dumazet local_irq_save(flags); 952903ceff7SChristoph Lameter tsq = this_cpu_ptr(&tsq_tasklet); 953a9b204d1SEric Dumazet empty = list_empty(&tsq->head); 95446d3ceabSEric Dumazet list_add(&tp->tsq_node, &tsq->head); 955a9b204d1SEric Dumazet if (empty) 95646d3ceabSEric Dumazet tasklet_schedule(&tsq->tasklet); 95746d3ceabSEric Dumazet local_irq_restore(flags); 9589b462d02SEric Dumazet return; 95946d3ceabSEric Dumazet } 9609b462d02SEric Dumazet out: 9619b462d02SEric Dumazet sk_free(sk); 96246d3ceabSEric Dumazet } 96346d3ceabSEric Dumazet 96473a6bab5SEric Dumazet /* Note: Called under soft irq. 96573a6bab5SEric Dumazet * We can call TCP stack right away, unless socket is owned by user. 966218af599SEric Dumazet */ 967218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) 968218af599SEric Dumazet { 969218af599SEric Dumazet struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); 970218af599SEric Dumazet struct sock *sk = (struct sock *)tp; 971218af599SEric Dumazet 97273a6bab5SEric Dumazet tcp_tsq_handler(sk); 97373a6bab5SEric Dumazet sock_put(sk); 974218af599SEric Dumazet 975218af599SEric Dumazet return HRTIMER_NORESTART; 976218af599SEric Dumazet } 977218af599SEric Dumazet 978a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, 979a7a25630SEric Dumazet u64 prior_wstamp) 980e2080072SEric Dumazet { 981ab408b6dSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 982ab408b6dSEric Dumazet 983ab408b6dSEric Dumazet if (sk->sk_pacing_status != SK_PACING_NONE) { 98476a9ebe8SEric Dumazet unsigned long rate = sk->sk_pacing_rate; 985ab408b6dSEric Dumazet 986ab408b6dSEric Dumazet /* Original sch_fq does not pace first 10 MSS 987ab408b6dSEric Dumazet * Note that tp->data_segs_out overflows after 2^32 packets, 988ab408b6dSEric Dumazet * this is a minor annoyance. 989ab408b6dSEric Dumazet */ 99076a9ebe8SEric Dumazet if (rate != ~0UL && rate && tp->data_segs_out >= 10) { 991a7a25630SEric Dumazet u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); 992a7a25630SEric Dumazet u64 credit = tp->tcp_wstamp_ns - prior_wstamp; 993a7a25630SEric Dumazet 994a7a25630SEric Dumazet /* take into account OS jitter */ 995a7a25630SEric Dumazet len_ns -= min_t(u64, len_ns / 2, credit); 996a7a25630SEric Dumazet tp->tcp_wstamp_ns += len_ns; 997ab408b6dSEric Dumazet } 998ab408b6dSEric Dumazet } 999e2080072SEric Dumazet list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 1000e2080072SEric Dumazet } 1001e2080072SEric Dumazet 10021da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 10031da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 10041da177e4SLinus Torvalds * transmission and possible later retransmissions. 10051da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 10061da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 10071da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 10081da177e4SLinus Torvalds * device. 10091da177e4SLinus Torvalds * 10101da177e4SLinus Torvalds * We are working here with either a clone of the original 10111da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 10121da177e4SLinus Torvalds */ 10132987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, 10142987babbSYuchung Cheng int clone_it, gfp_t gfp_mask, u32 rcv_nxt) 10151da177e4SLinus Torvalds { 10166687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1017dfb4b9dcSDavid S. Miller struct inet_sock *inet; 1018dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 1019dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 102033ad798cSAdam Langley struct tcp_out_options opts; 102195c96174SEric Dumazet unsigned int tcp_options_size, tcp_header_size; 10228c72c65bSEric Dumazet struct sk_buff *oskb = NULL; 1023cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key *md5; 10241da177e4SLinus Torvalds struct tcphdr *th; 1025a7a25630SEric Dumazet u64 prior_wstamp; 10261da177e4SLinus Torvalds int err; 10271da177e4SLinus Torvalds 1028dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 10296f094b9eSLawrence Brakmo tp = tcp_sk(sk); 10307f12422cSYuchung Cheng prior_wstamp = tp->tcp_wstamp_ns; 10317f12422cSYuchung Cheng tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); 10327f12422cSYuchung Cheng skb->skb_mstamp_ns = tp->tcp_wstamp_ns; 1033ccdbb6e9SEric Dumazet if (clone_it) { 10346f094b9eSLawrence Brakmo TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq 10356f094b9eSLawrence Brakmo - tp->snd_una; 10368c72c65bSEric Dumazet oskb = skb; 1037e2080072SEric Dumazet 1038e2080072SEric Dumazet tcp_skb_tsorted_save(oskb) { 1039e2080072SEric Dumazet if (unlikely(skb_cloned(oskb))) 1040e2080072SEric Dumazet skb = pskb_copy(oskb, gfp_mask); 1041dfb4b9dcSDavid S. Miller else 1042e2080072SEric Dumazet skb = skb_clone(oskb, gfp_mask); 1043e2080072SEric Dumazet } tcp_skb_tsorted_restore(oskb); 1044e2080072SEric Dumazet 1045dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 1046dfb4b9dcSDavid S. Miller return -ENOBUFS; 1047dfb4b9dcSDavid S. Miller } 10485f6188a8SEric Dumazet 1049dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 1050dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 105133ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 10521da177e4SLinus Torvalds 1053051ba674SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 105433ad798cSAdam Langley tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 1055051ba674SEric Dumazet } else { 105633ad798cSAdam Langley tcp_options_size = tcp_established_options(sk, skb, &opts, 105733ad798cSAdam Langley &md5); 1058051ba674SEric Dumazet /* Force a PSH flag on all (GSO) packets to expedite GRO flush 1059051ba674SEric Dumazet * at receiver : This slightly improve GRO performance. 1060051ba674SEric Dumazet * Note that we do not force the PSH flag for non GSO packets, 1061051ba674SEric Dumazet * because they might be sent under high congestion events, 1062051ba674SEric Dumazet * and in this case it is better to delay the delivery of 1-MSS 1063051ba674SEric Dumazet * packets and thus the corresponding ACK packet that would 1064051ba674SEric Dumazet * release the following packet. 1065051ba674SEric Dumazet */ 1066051ba674SEric Dumazet if (tcp_skb_pcount(skb) > 1) 1067051ba674SEric Dumazet tcb->tcp_flags |= TCPHDR_PSH; 1068051ba674SEric Dumazet } 106933ad798cSAdam Langley tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 10701da177e4SLinus Torvalds 1071547669d4SEric Dumazet /* if no packet is in qdisc/device queue, then allow XPS to select 1072b2532eb9SEric Dumazet * another queue. We can be called from tcp_tsq_handler() 107373a6bab5SEric Dumazet * which holds one reference to sk. 1074b2532eb9SEric Dumazet * 1075b2532eb9SEric Dumazet * TODO: Ideally, in-flight pure ACK packets should not matter here. 1076b2532eb9SEric Dumazet * One way to get this would be to set skb->truesize = 2 on them. 1077547669d4SEric Dumazet */ 1078b2532eb9SEric Dumazet skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); 10791da177e4SLinus Torvalds 108038ab52e8SEric Dumazet /* If we had to use memory reserve to allocate this skb, 108138ab52e8SEric Dumazet * this might cause drops if packet is looped back : 108238ab52e8SEric Dumazet * Other socket might not have SOCK_MEMALLOC. 108338ab52e8SEric Dumazet * Packets not looped back do not care about pfmemalloc. 108438ab52e8SEric Dumazet */ 108538ab52e8SEric Dumazet skb->pfmemalloc = 0; 108638ab52e8SEric Dumazet 1087aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 1088aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 108946d3ceabSEric Dumazet 109046d3ceabSEric Dumazet skb_orphan(skb); 109146d3ceabSEric Dumazet skb->sk = sk; 10921d2077acSEric Dumazet skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 1093b73c3d0eSTom Herbert skb_set_hash_from_sk(skb, sk); 109414afee4bSReshetova, Elena refcount_add(skb->truesize, &sk->sk_wmem_alloc); 10951da177e4SLinus Torvalds 1096c3a2e837SJulian Anastasov skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm); 1097c3a2e837SJulian Anastasov 10981da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 1099ea1627c2SEric Dumazet th = (struct tcphdr *)skb->data; 1100c720c7e8SEric Dumazet th->source = inet->inet_sport; 1101c720c7e8SEric Dumazet th->dest = inet->inet_dport; 11021da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 11032987babbSYuchung Cheng th->ack_seq = htonl(rcv_nxt); 1104df7a3b07SAl Viro *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 11054de075e0SEric Dumazet tcb->tcp_flags); 1106dfb4b9dcSDavid S. Miller 11071da177e4SLinus Torvalds th->check = 0; 11081da177e4SLinus Torvalds th->urg_ptr = 0; 11091da177e4SLinus Torvalds 111033f5f57eSIlpo Järvinen /* The urg_mode check is necessary during a below snd_una win probe */ 11117691367dSHerbert Xu if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 11127691367dSHerbert Xu if (before(tp->snd_up, tcb->seq + 0x10000)) { 11131da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up - tcb->seq); 11141da177e4SLinus Torvalds th->urg = 1; 11157691367dSHerbert Xu } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 11160eae88f3SEric Dumazet th->urg_ptr = htons(0xFFFF); 11177691367dSHerbert Xu th->urg = 1; 11187691367dSHerbert Xu } 11191da177e4SLinus Torvalds } 11201da177e4SLinus Torvalds 1121bd0388aeSWilliam Allen Simpson tcp_options_write((__be32 *)(th + 1), tp, &opts); 112251466a75SEric Dumazet skb_shinfo(skb)->gso_type = sk->sk_gso_type; 1123ea1627c2SEric Dumazet if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { 1124ea1627c2SEric Dumazet th->window = htons(tcp_select_window(sk)); 1125ea1627c2SEric Dumazet tcp_ecn_send(sk, skb, th, tcp_header_size); 1126ea1627c2SEric Dumazet } else { 1127ea1627c2SEric Dumazet /* RFC1323: The window in SYN & SYN/ACK segments 1128ea1627c2SEric Dumazet * is never scaled. 1129ea1627c2SEric Dumazet */ 1130ea1627c2SEric Dumazet th->window = htons(min(tp->rcv_wnd, 65535U)); 1131ea1627c2SEric Dumazet } 1132cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 1133cfb6eeb4SYOSHIFUJI Hideaki /* Calculate the MD5 hash, as we have all we need now */ 1134cfb6eeb4SYOSHIFUJI Hideaki if (md5) { 1135a465419bSEric Dumazet sk_nocaps_add(sk, NETIF_F_GSO_MASK); 1136bd0388aeSWilliam Allen Simpson tp->af_specific->calc_md5_hash(opts.hash_location, 113739f8e58eSEric Dumazet md5, sk, skb); 1138cfb6eeb4SYOSHIFUJI Hideaki } 1139cfb6eeb4SYOSHIFUJI Hideaki #endif 1140cfb6eeb4SYOSHIFUJI Hideaki 1141bb296246SHerbert Xu icsk->icsk_af_ops->send_check(sk, skb); 11421da177e4SLinus Torvalds 11434de075e0SEric Dumazet if (likely(tcb->tcp_flags & TCPHDR_ACK)) 114427cde44aSYuchung Cheng tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); 11451da177e4SLinus Torvalds 1146a44d6eacSMartin KaFai Lau if (skb->len != tcp_header_size) { 1147cf533ea5SEric Dumazet tcp_event_data_sent(tp, sk); 1148a44d6eacSMartin KaFai Lau tp->data_segs_out += tcp_skb_pcount(skb); 1149ba113c3aSWei Wang tp->bytes_sent += skb->len - tcp_header_size; 1150a44d6eacSMartin KaFai Lau } 11511da177e4SLinus Torvalds 1152bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 1153aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 1154aa2ea058STom Herbert tcp_skb_pcount(skb)); 11551da177e4SLinus Torvalds 11562efd055cSMarcelo Ricardo Leitner tp->segs_out += tcp_skb_pcount(skb); 1157f69ad292SEric Dumazet /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ 1158cd7d8498SEric Dumazet skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); 1159f69ad292SEric Dumazet skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1160cd7d8498SEric Dumazet 1161d3edd06eSEric Dumazet /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ 1162971f10ecSEric Dumazet 1163971f10ecSEric Dumazet /* Cleanup our debris for IP stacks */ 1164971f10ecSEric Dumazet memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1165971f10ecSEric Dumazet sizeof(struct inet6_skb_parm))); 1166971f10ecSEric Dumazet 1167a842fe14SEric Dumazet tcp_add_tx_delay(skb, tp); 1168a842fe14SEric Dumazet 1169b0270e91SEric Dumazet err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 11707faee5c0SEric Dumazet 11718c72c65bSEric Dumazet if (unlikely(err > 0)) { 11725ee2c941SChristoph Paasch tcp_enter_cwr(sk); 11738c72c65bSEric Dumazet err = net_xmit_eval(err); 11748c72c65bSEric Dumazet } 1175fc225799SEric Dumazet if (!err && oskb) { 1176a7a25630SEric Dumazet tcp_update_skb_after_send(sk, oskb, prior_wstamp); 1177fc225799SEric Dumazet tcp_rate_skb_sent(sk, oskb); 1178fc225799SEric Dumazet } 11798c72c65bSEric Dumazet return err; 11801da177e4SLinus Torvalds } 11811da177e4SLinus Torvalds 11822987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 11832987babbSYuchung Cheng gfp_t gfp_mask) 11842987babbSYuchung Cheng { 11852987babbSYuchung Cheng return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, 11862987babbSYuchung Cheng tcp_sk(sk)->rcv_nxt); 11872987babbSYuchung Cheng } 11882987babbSYuchung Cheng 118967edfef7SAndi Kleen /* This routine just queues the buffer for sending. 11901da177e4SLinus Torvalds * 11911da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 11921da177e4SLinus Torvalds * otherwise socket can stall. 11931da177e4SLinus Torvalds */ 11941da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 11951da177e4SLinus Torvalds { 11961da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 11971da177e4SLinus Torvalds 11981da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 1199*0f317464SEric Dumazet WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); 1200f4a775d1SEric Dumazet __skb_header_release(skb); 1201fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 12023ab224beSHideo Aoki sk->sk_wmem_queued += skb->truesize; 12033ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 12041da177e4SLinus Torvalds } 12051da177e4SLinus Torvalds 120667edfef7SAndi Kleen /* Initialize TSO segments for a packet. */ 12075bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1208f6302d1dSDavid S. Miller { 12094a64fd6cSEric Dumazet if (skb->len <= mss_now) { 1210f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 1211f6302d1dSDavid S. Miller * non-TSO case. 1212f6302d1dSDavid S. Miller */ 1213cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 1); 1214f69ad292SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_size = 0; 1215f6302d1dSDavid S. Miller } else { 1216cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); 1217f69ad292SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_size = mss_now; 12181da177e4SLinus Torvalds } 12191da177e4SLinus Torvalds } 12201da177e4SLinus Torvalds 1221797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various 1222797108d1SIlpo Järvinen * tweaks to fix counters 1223797108d1SIlpo Järvinen */ 1224cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1225797108d1SIlpo Järvinen { 1226797108d1SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1227797108d1SIlpo Järvinen 1228797108d1SIlpo Järvinen tp->packets_out -= decr; 1229797108d1SIlpo Järvinen 1230797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1231797108d1SIlpo Järvinen tp->sacked_out -= decr; 1232797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1233797108d1SIlpo Järvinen tp->retrans_out -= decr; 1234797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1235797108d1SIlpo Järvinen tp->lost_out -= decr; 1236797108d1SIlpo Järvinen 1237797108d1SIlpo Järvinen /* Reno case is special. Sigh... */ 1238797108d1SIlpo Järvinen if (tcp_is_reno(tp) && decr > 0) 1239797108d1SIlpo Järvinen tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1240797108d1SIlpo Järvinen 1241797108d1SIlpo Järvinen if (tp->lost_skb_hint && 1242797108d1SIlpo Järvinen before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 1243713bafeaSYuchung Cheng (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 1244797108d1SIlpo Järvinen tp->lost_cnt_hint -= decr; 1245797108d1SIlpo Järvinen 1246797108d1SIlpo Järvinen tcp_verify_left_out(tp); 1247797108d1SIlpo Järvinen } 1248797108d1SIlpo Järvinen 12490a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb) 12500a2cf20cSSoheil Hassas Yeganeh { 12510a2cf20cSSoheil Hassas Yeganeh return TCP_SKB_CB(skb)->txstamp_ack || 12520a2cf20cSSoheil Hassas Yeganeh (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); 12530a2cf20cSSoheil Hassas Yeganeh } 12540a2cf20cSSoheil Hassas Yeganeh 1255490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) 1256490cc7d0SWillem de Bruijn { 1257490cc7d0SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 1258490cc7d0SWillem de Bruijn 12590a2cf20cSSoheil Hassas Yeganeh if (unlikely(tcp_has_tx_tstamp(skb)) && 1260490cc7d0SWillem de Bruijn !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { 1261490cc7d0SWillem de Bruijn struct skb_shared_info *shinfo2 = skb_shinfo(skb2); 1262490cc7d0SWillem de Bruijn u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; 1263490cc7d0SWillem de Bruijn 1264490cc7d0SWillem de Bruijn shinfo->tx_flags &= ~tsflags; 1265490cc7d0SWillem de Bruijn shinfo2->tx_flags |= tsflags; 1266490cc7d0SWillem de Bruijn swap(shinfo->tskey, shinfo2->tskey); 1267b51e13faSMartin KaFai Lau TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; 1268b51e13faSMartin KaFai Lau TCP_SKB_CB(skb)->txstamp_ack = 0; 1269490cc7d0SWillem de Bruijn } 1270490cc7d0SWillem de Bruijn } 1271490cc7d0SWillem de Bruijn 1272a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) 1273a166140eSMartin KaFai Lau { 1274a166140eSMartin KaFai Lau TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; 1275a166140eSMartin KaFai Lau TCP_SKB_CB(skb)->eor = 0; 1276a166140eSMartin KaFai Lau } 1277a166140eSMartin KaFai Lau 127875c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk. */ 127975c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb, 128075c119afSEric Dumazet struct sk_buff *buff, 128175c119afSEric Dumazet struct sock *sk, 128275c119afSEric Dumazet enum tcp_queue tcp_queue) 128375c119afSEric Dumazet { 128475c119afSEric Dumazet if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE) 128575c119afSEric Dumazet __skb_queue_after(&sk->sk_write_queue, skb, buff); 128675c119afSEric Dumazet else 128775c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); 128875c119afSEric Dumazet } 128975c119afSEric Dumazet 12901da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 12911da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 12921da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 12931da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 12941da177e4SLinus Torvalds */ 129575c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 129675c119afSEric Dumazet struct sk_buff *skb, u32 len, 12976cc55e09SOctavian Purdila unsigned int mss_now, gfp_t gfp) 12981da177e4SLinus Torvalds { 12991da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13001da177e4SLinus Torvalds struct sk_buff *buff; 13016475be16SDavid S. Miller int nsize, old_factor; 1302b617158dSEric Dumazet long limit; 1303b60b49eaSHerbert Xu int nlen; 13049ce01461SIlpo Järvinen u8 flags; 13051da177e4SLinus Torvalds 13062fceec13SIlpo Järvinen if (WARN_ON(len > skb->len)) 13072fceec13SIlpo Järvinen return -EINVAL; 13086a438bbeSStephen Hemminger 13091da177e4SLinus Torvalds nsize = skb_headlen(skb) - len; 13101da177e4SLinus Torvalds if (nsize < 0) 13111da177e4SLinus Torvalds nsize = 0; 13121da177e4SLinus Torvalds 1313b617158dSEric Dumazet /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. 1314b617158dSEric Dumazet * We need some allowance to not penalize applications setting small 1315b617158dSEric Dumazet * SO_SNDBUF values. 1316b617158dSEric Dumazet * Also allow first and last skb in retransmit queue to be split. 1317b617158dSEric Dumazet */ 1318b617158dSEric Dumazet limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); 1319b617158dSEric Dumazet if (unlikely((sk->sk_wmem_queued >> 1) > limit && 1320b617158dSEric Dumazet tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && 1321b617158dSEric Dumazet skb != tcp_rtx_queue_head(sk) && 1322b617158dSEric Dumazet skb != tcp_rtx_queue_tail(sk))) { 1323f070ef2aSEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); 1324f070ef2aSEric Dumazet return -ENOMEM; 1325f070ef2aSEric Dumazet } 1326f070ef2aSEric Dumazet 13276cc55e09SOctavian Purdila if (skb_unclone(skb, gfp)) 13281da177e4SLinus Torvalds return -ENOMEM; 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 1331eb934478SEric Dumazet buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 133251456b29SIan Morris if (!buff) 13331da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 133441477662SJakub Kicinski skb_copy_decrypted(buff, skb); 1335ef5cb973SHerbert Xu 13363ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 13373ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1338b60b49eaSHerbert Xu nlen = skb->len - len - nsize; 1339b60b49eaSHerbert Xu buff->truesize += nlen; 1340b60b49eaSHerbert Xu skb->truesize -= nlen; 13411da177e4SLinus Torvalds 13421da177e4SLinus Torvalds /* Correct the sequence numbers. */ 13431da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 13441da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 13451da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 13461da177e4SLinus Torvalds 13471da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 13484de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 13494de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 13504de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1351e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1352a166140eSMartin KaFai Lau tcp_skb_fragment_eor(skb, buff); 13531da177e4SLinus Torvalds 13541da177e4SLinus Torvalds skb_split(skb, buff, len); 13551da177e4SLinus Torvalds 135698be9b12SEric Dumazet buff->ip_summed = CHECKSUM_PARTIAL; 13571da177e4SLinus Torvalds 1358a61bbcf2SPatrick McHardy buff->tstamp = skb->tstamp; 1359490cc7d0SWillem de Bruijn tcp_fragment_tstamp(skb, buff); 13601da177e4SLinus Torvalds 13616475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 13626475be16SDavid S. Miller 13631da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 13645bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 13655bbb432cSEric Dumazet tcp_set_skb_tso_segs(buff, mss_now); 13661da177e4SLinus Torvalds 1367b9f64820SYuchung Cheng /* Update delivered info for the new segment */ 1368b9f64820SYuchung Cheng TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; 1369b9f64820SYuchung Cheng 13706475be16SDavid S. Miller /* If this packet has been sent out already, we must 13716475be16SDavid S. Miller * adjust the various packet counters. 13726475be16SDavid S. Miller */ 1373cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 13746475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 13756475be16SDavid S. Miller tcp_skb_pcount(buff); 13761da177e4SLinus Torvalds 1377797108d1SIlpo Järvinen if (diff) 1378797108d1SIlpo Järvinen tcp_adjust_pcount(sk, skb, diff); 13791da177e4SLinus Torvalds } 13801da177e4SLinus Torvalds 13811da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 1382f4a775d1SEric Dumazet __skb_header_release(buff); 138375c119afSEric Dumazet tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); 1384f67971e6SEric Dumazet if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE) 1385e2080072SEric Dumazet list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); 13861da177e4SLinus Torvalds 13871da177e4SLinus Torvalds return 0; 13881da177e4SLinus Torvalds } 13891da177e4SLinus Torvalds 1390f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled 1391f4d01666SEric Dumazet * data is not copied, but immediately discarded. 13921da177e4SLinus Torvalds */ 13937162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len) 13941da177e4SLinus Torvalds { 13957b7fc97aSEric Dumazet struct skb_shared_info *shinfo; 13961da177e4SLinus Torvalds int i, k, eat; 13971da177e4SLinus Torvalds 13984fa48bf3SEric Dumazet eat = min_t(int, len, skb_headlen(skb)); 13994fa48bf3SEric Dumazet if (eat) { 14004fa48bf3SEric Dumazet __skb_pull(skb, eat); 14014fa48bf3SEric Dumazet len -= eat; 14024fa48bf3SEric Dumazet if (!len) 14037162fb24SEric Dumazet return 0; 14044fa48bf3SEric Dumazet } 14051da177e4SLinus Torvalds eat = len; 14061da177e4SLinus Torvalds k = 0; 14077b7fc97aSEric Dumazet shinfo = skb_shinfo(skb); 14087b7fc97aSEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) { 14097b7fc97aSEric Dumazet int size = skb_frag_size(&shinfo->frags[i]); 14109e903e08SEric Dumazet 14119e903e08SEric Dumazet if (size <= eat) { 1412aff65da0SIan Campbell skb_frag_unref(skb, i); 14139e903e08SEric Dumazet eat -= size; 14141da177e4SLinus Torvalds } else { 14157b7fc97aSEric Dumazet shinfo->frags[k] = shinfo->frags[i]; 14161da177e4SLinus Torvalds if (eat) { 1417b54c9d5bSJonathan Lemon skb_frag_off_add(&shinfo->frags[k], eat); 14187b7fc97aSEric Dumazet skb_frag_size_sub(&shinfo->frags[k], eat); 14191da177e4SLinus Torvalds eat = 0; 14201da177e4SLinus Torvalds } 14211da177e4SLinus Torvalds k++; 14221da177e4SLinus Torvalds } 14231da177e4SLinus Torvalds } 14247b7fc97aSEric Dumazet shinfo->nr_frags = k; 14251da177e4SLinus Torvalds 14261da177e4SLinus Torvalds skb->data_len -= len; 14271da177e4SLinus Torvalds skb->len = skb->data_len; 14287162fb24SEric Dumazet return len; 14291da177e4SLinus Torvalds } 14301da177e4SLinus Torvalds 143167edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */ 14321da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 14331da177e4SLinus Torvalds { 14347162fb24SEric Dumazet u32 delta_truesize; 14357162fb24SEric Dumazet 143614bbd6a5SPravin B Shelar if (skb_unclone(skb, GFP_ATOMIC)) 14371da177e4SLinus Torvalds return -ENOMEM; 14381da177e4SLinus Torvalds 14397162fb24SEric Dumazet delta_truesize = __pskb_trim_head(skb, len); 14401da177e4SLinus Torvalds 14411da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 144284fa7933SPatrick McHardy skb->ip_summed = CHECKSUM_PARTIAL; 14431da177e4SLinus Torvalds 14447162fb24SEric Dumazet if (delta_truesize) { 14457162fb24SEric Dumazet skb->truesize -= delta_truesize; 14467162fb24SEric Dumazet sk->sk_wmem_queued -= delta_truesize; 14477162fb24SEric Dumazet sk_mem_uncharge(sk, delta_truesize); 14481da177e4SLinus Torvalds sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 14497162fb24SEric Dumazet } 14501da177e4SLinus Torvalds 14515b35e1e6SNeal Cardwell /* Any change of skb->len requires recalculation of tso factor. */ 14521da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 14535bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); 14541da177e4SLinus Torvalds 14551da177e4SLinus Torvalds return 0; 14561da177e4SLinus Torvalds } 14571da177e4SLinus Torvalds 14581b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options. */ 14591b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 14605d424d5aSJohn Heffner { 1461cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1462cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 14635d424d5aSJohn Heffner int mss_now; 14645d424d5aSJohn Heffner 14655d424d5aSJohn Heffner /* Calculate base mss without TCP options: 14665d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 14675d424d5aSJohn Heffner */ 14685d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 14695d424d5aSJohn Heffner 147067469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 147167469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 147267469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 147367469601SEric Dumazet 147467469601SEric Dumazet if (dst && dst_allfrag(dst)) 147567469601SEric Dumazet mss_now -= icsk->icsk_af_ops->net_frag_header_len; 147667469601SEric Dumazet } 147767469601SEric Dumazet 14785d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 14795d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 14805d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 14815d424d5aSJohn Heffner 14825d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 14835d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 14845d424d5aSJohn Heffner 14855d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 14865f3e2bf0SEric Dumazet mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); 14875d424d5aSJohn Heffner return mss_now; 14885d424d5aSJohn Heffner } 14895d424d5aSJohn Heffner 14901b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here. */ 14911b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu) 14921b63edd6SYuchung Cheng { 14931b63edd6SYuchung Cheng /* Subtract TCP options size, not including SACKs */ 14941b63edd6SYuchung Cheng return __tcp_mtu_to_mss(sk, pmtu) - 14951b63edd6SYuchung Cheng (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 14961b63edd6SYuchung Cheng } 14971b63edd6SYuchung Cheng 14985d424d5aSJohn Heffner /* Inverse of above */ 149967469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss) 15005d424d5aSJohn Heffner { 1501cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1502cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 15035d424d5aSJohn Heffner int mtu; 15045d424d5aSJohn Heffner 15055d424d5aSJohn Heffner mtu = mss + 15065d424d5aSJohn Heffner tp->tcp_header_len + 15075d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 15085d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 15095d424d5aSJohn Heffner 151067469601SEric Dumazet /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 151167469601SEric Dumazet if (icsk->icsk_af_ops->net_frag_header_len) { 151267469601SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 151367469601SEric Dumazet 151467469601SEric Dumazet if (dst && dst_allfrag(dst)) 151567469601SEric Dumazet mtu += icsk->icsk_af_ops->net_frag_header_len; 151667469601SEric Dumazet } 15175d424d5aSJohn Heffner return mtu; 15185d424d5aSJohn Heffner } 1519556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu); 15205d424d5aSJohn Heffner 152167edfef7SAndi Kleen /* MTU probing init per socket */ 15225d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 15235d424d5aSJohn Heffner { 15245d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 15255d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 1526b0f9ca53SFan Du struct net *net = sock_net(sk); 15275d424d5aSJohn Heffner 1528b0f9ca53SFan Du icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; 15295d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 15305d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 1531b0f9ca53SFan Du icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); 15325d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 153305cbc0dbSFan Du if (icsk->icsk_mtup.enabled) 1534c74df29aSEric Dumazet icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 15355d424d5aSJohn Heffner } 15364bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init); 15375d424d5aSJohn Heffner 15381da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 15391da177e4SLinus Torvalds 15401da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 15411da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 15421da177e4SLinus Torvalds 15431da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1544caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 15451da177e4SLinus Torvalds It also does not include TCP options. 15461da177e4SLinus Torvalds 1547d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 15481da177e4SLinus Torvalds 15491da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 15501da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 15511da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 15521da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 15531da177e4SLinus Torvalds 15541da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 15551da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 15561da177e4SLinus Torvalds 1557d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1558d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 15591da177e4SLinus Torvalds */ 15601da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 15611da177e4SLinus Torvalds { 15621da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1563d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 15645d424d5aSJohn Heffner int mss_now; 15651da177e4SLinus Torvalds 15665d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 15675d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 15681da177e4SLinus Torvalds 15695d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 1570409d22b4SIlpo Järvinen mss_now = tcp_bound_to_half_wnd(tp, mss_now); 15711da177e4SLinus Torvalds 15721da177e4SLinus Torvalds /* And store cached results */ 1573d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 15745d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 15755d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1576c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 15771da177e4SLinus Torvalds 15781da177e4SLinus Torvalds return mss_now; 15791da177e4SLinus Torvalds } 15804bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss); 15811da177e4SLinus Torvalds 15821da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 15831da177e4SLinus Torvalds * and even PMTU discovery events into account. 15841da177e4SLinus Torvalds */ 15850c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk) 15861da177e4SLinus Torvalds { 1587cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1588cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1589c1b4a7e6SDavid S. Miller u32 mss_now; 159095c96174SEric Dumazet unsigned int header_len; 159133ad798cSAdam Langley struct tcp_out_options opts; 159233ad798cSAdam Langley struct tcp_md5sig_key *md5; 15931da177e4SLinus Torvalds 1594c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 1595c1b4a7e6SDavid S. Miller 15961da177e4SLinus Torvalds if (dst) { 15971da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 1598d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 15991da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 16001da177e4SLinus Torvalds } 16011da177e4SLinus Torvalds 160233ad798cSAdam Langley header_len = tcp_established_options(sk, NULL, &opts, &md5) + 160333ad798cSAdam Langley sizeof(struct tcphdr); 160433ad798cSAdam Langley /* The mss_cache is sized based on tp->tcp_header_len, which assumes 160533ad798cSAdam Langley * some common options. If this is an odd packet (because we have SACK 160633ad798cSAdam Langley * blocks etc) then our calculated header_len will be different, and 160733ad798cSAdam Langley * we have to adjust mss_now correspondingly */ 160833ad798cSAdam Langley if (header_len != tp->tcp_header_len) { 160933ad798cSAdam Langley int delta = (int) header_len - tp->tcp_header_len; 161033ad798cSAdam Langley mss_now -= delta; 161133ad798cSAdam Langley } 1612cfb6eeb4SYOSHIFUJI Hideaki 16131da177e4SLinus Torvalds return mss_now; 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds 161686fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 161786fd14adSWeiping Pan * As additional protections, we do not touch cwnd in retransmission phases, 161886fd14adSWeiping Pan * and if application hit its sndbuf limit recently. 161986fd14adSWeiping Pan */ 162086fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk) 1621a762a980SDavid S. Miller { 16229e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1623a762a980SDavid S. Miller 162486fd14adSWeiping Pan if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 162586fd14adSWeiping Pan sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 162686fd14adSWeiping Pan /* Limited by application or receiver window. */ 162786fd14adSWeiping Pan u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 162886fd14adSWeiping Pan u32 win_used = max(tp->snd_cwnd_used, init_win); 162986fd14adSWeiping Pan if (win_used < tp->snd_cwnd) { 163086fd14adSWeiping Pan tp->snd_ssthresh = tcp_current_ssthresh(sk); 163186fd14adSWeiping Pan tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 163286fd14adSWeiping Pan } 163386fd14adSWeiping Pan tp->snd_cwnd_used = 0; 163486fd14adSWeiping Pan } 1635c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 163686fd14adSWeiping Pan } 163786fd14adSWeiping Pan 1638ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) 1639a762a980SDavid S. Miller { 16401b1fc3fdSWei Wang const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1641a762a980SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1642a762a980SDavid S. Miller 1643ca8a2263SNeal Cardwell /* Track the maximum number of outstanding packets in each 1644ca8a2263SNeal Cardwell * window, and remember whether we were cwnd-limited then. 1645ca8a2263SNeal Cardwell */ 1646ca8a2263SNeal Cardwell if (!before(tp->snd_una, tp->max_packets_seq) || 1647ca8a2263SNeal Cardwell tp->packets_out > tp->max_packets_out) { 1648ca8a2263SNeal Cardwell tp->max_packets_out = tp->packets_out; 1649ca8a2263SNeal Cardwell tp->max_packets_seq = tp->snd_nxt; 1650ca8a2263SNeal Cardwell tp->is_cwnd_limited = is_cwnd_limited; 1651ca8a2263SNeal Cardwell } 1652e114a710SEric Dumazet 165324901551SEric Dumazet if (tcp_is_cwnd_limited(sk)) { 1654a762a980SDavid S. Miller /* Network is feed fully. */ 1655a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 1656c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 1657a762a980SDavid S. Miller } else { 1658a762a980SDavid S. Miller /* Network starves. */ 1659a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 1660a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 1661a762a980SDavid S. Miller 1662b510f0d2SEric Dumazet if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && 1663c2203cf7SEric Dumazet (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && 16641b1fc3fdSWei Wang !ca_ops->cong_control) 1665a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 1666b0f71bd3SFrancis Yan 1667b0f71bd3SFrancis Yan /* The following conditions together indicate the starvation 1668b0f71bd3SFrancis Yan * is caused by insufficient sender buffer: 1669b0f71bd3SFrancis Yan * 1) just sent some data (see tcp_write_xmit) 1670b0f71bd3SFrancis Yan * 2) not cwnd limited (this else condition) 167175c119afSEric Dumazet * 3) no more data to send (tcp_write_queue_empty()) 1672b0f71bd3SFrancis Yan * 4) application is hitting buffer limit (SOCK_NOSPACE) 1673b0f71bd3SFrancis Yan */ 167475c119afSEric Dumazet if (tcp_write_queue_empty(sk) && sk->sk_socket && 1675b0f71bd3SFrancis Yan test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && 1676b0f71bd3SFrancis Yan (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 1677b0f71bd3SFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); 1678a762a980SDavid S. Miller } 1679a762a980SDavid S. Miller } 1680a762a980SDavid S. Miller 1681d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */ 1682d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp) 1683d4589926SEric Dumazet { 1684d4589926SEric Dumazet return after(tp->snd_sml, tp->snd_una) && 1685d4589926SEric Dumazet !after(tp->snd_sml, tp->snd_nxt); 1686d4589926SEric Dumazet } 1687d4589926SEric Dumazet 1688d4589926SEric Dumazet /* Update snd_sml if this skb is under mss 1689d4589926SEric Dumazet * Note that a TSO packet might end with a sub-mss segment 1690d4589926SEric Dumazet * The test is really : 1691d4589926SEric Dumazet * if ((skb->len % mss) != 0) 1692d4589926SEric Dumazet * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1693d4589926SEric Dumazet * But we can avoid doing the divide again given we already have 1694d4589926SEric Dumazet * skb_pcount = skb->len / mss_now 16950e3a4803SIlpo Järvinen */ 1696d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, 1697d4589926SEric Dumazet const struct sk_buff *skb) 1698d4589926SEric Dumazet { 1699d4589926SEric Dumazet if (skb->len < tcp_skb_pcount(skb) * mss_now) 1700d4589926SEric Dumazet tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1701d4589926SEric Dumazet } 1702d4589926SEric Dumazet 1703d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules: 1704d4589926SEric Dumazet * 1. It is full sized. (provided by caller in %partial bool) 1705d4589926SEric Dumazet * 2. Or it contains FIN. (already checked by caller) 1706d4589926SEric Dumazet * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1707d4589926SEric Dumazet * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1708d4589926SEric Dumazet * With Minshall's modification: all sent small packets are ACKed. 1709d4589926SEric Dumazet */ 1710d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, 1711cc93fc51SPeter Pan(潘卫平) int nonagle) 1712d4589926SEric Dumazet { 1713d4589926SEric Dumazet return partial && 1714d4589926SEric Dumazet ((nonagle & TCP_NAGLE_CORK) || 1715d4589926SEric Dumazet (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1716d4589926SEric Dumazet } 1717605ad7f1SEric Dumazet 1718605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet, 1719605ad7f1SEric Dumazet * to send one TSO packet per ms 1720605ad7f1SEric Dumazet */ 1721dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, 17221b3878caSNeal Cardwell int min_tso_segs) 1723605ad7f1SEric Dumazet { 1724605ad7f1SEric Dumazet u32 bytes, segs; 1725605ad7f1SEric Dumazet 172676a9ebe8SEric Dumazet bytes = min_t(unsigned long, 172776a9ebe8SEric Dumazet sk->sk_pacing_rate >> sk->sk_pacing_shift, 1728605ad7f1SEric Dumazet sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); 1729605ad7f1SEric Dumazet 1730605ad7f1SEric Dumazet /* Goal is to send at least one packet per ms, 1731605ad7f1SEric Dumazet * not one big TSO packet every 100 ms. 1732605ad7f1SEric Dumazet * This preserves ACK clocking and is consistent 1733605ad7f1SEric Dumazet * with tcp_tso_should_defer() heuristic. 1734605ad7f1SEric Dumazet */ 17351b3878caSNeal Cardwell segs = max_t(u32, bytes / mss_now, min_tso_segs); 1736605ad7f1SEric Dumazet 1737350c9f48SEric Dumazet return segs; 1738605ad7f1SEric Dumazet } 1739605ad7f1SEric Dumazet 1740ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting. 1741ed6e7268SNeal Cardwell * See if congestion control module wants to decide; otherwise, autosize. 1742ed6e7268SNeal Cardwell */ 1743ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) 1744ed6e7268SNeal Cardwell { 1745ed6e7268SNeal Cardwell const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1746dcb8c9b4SEric Dumazet u32 min_tso, tso_segs; 1747ed6e7268SNeal Cardwell 1748dcb8c9b4SEric Dumazet min_tso = ca_ops->min_tso_segs ? 1749dcb8c9b4SEric Dumazet ca_ops->min_tso_segs(sk) : 1750dcb8c9b4SEric Dumazet sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs; 1751dcb8c9b4SEric Dumazet 1752dcb8c9b4SEric Dumazet tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); 1753350c9f48SEric Dumazet return min_t(u32, tso_segs, sk->sk_gso_max_segs); 1754ed6e7268SNeal Cardwell } 1755ed6e7268SNeal Cardwell 1756d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */ 1757d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, 1758d4589926SEric Dumazet const struct sk_buff *skb, 1759d4589926SEric Dumazet unsigned int mss_now, 1760d4589926SEric Dumazet unsigned int max_segs, 1761d4589926SEric Dumazet int nonagle) 1762c1b4a7e6SDavid S. Miller { 1763cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1764d4589926SEric Dumazet u32 partial, needed, window, max_len; 1765c1b4a7e6SDavid S. Miller 176690840defSIlpo Järvinen window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 17671485348dSBen Hutchings max_len = mss_now * max_segs; 17680e3a4803SIlpo Järvinen 17691485348dSBen Hutchings if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 17701485348dSBen Hutchings return max_len; 17710e3a4803SIlpo Järvinen 17725ea3a748SIlpo Järvinen needed = min(skb->len, window); 17735ea3a748SIlpo Järvinen 17741485348dSBen Hutchings if (max_len <= needed) 17751485348dSBen Hutchings return max_len; 17760e3a4803SIlpo Järvinen 1777d4589926SEric Dumazet partial = needed % mss_now; 1778d4589926SEric Dumazet /* If last segment is not a full MSS, check if Nagle rules allow us 1779d4589926SEric Dumazet * to include this last segment in this skb. 1780d4589926SEric Dumazet * Otherwise, we'll split the skb at last MSS boundary 1781d4589926SEric Dumazet */ 1782cc93fc51SPeter Pan(潘卫平) if (tcp_nagle_check(partial != 0, tp, nonagle)) 1783d4589926SEric Dumazet return needed - partial; 1784d4589926SEric Dumazet 1785d4589926SEric Dumazet return needed; 1786c1b4a7e6SDavid S. Miller } 1787c1b4a7e6SDavid S. Miller 1788c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 1789c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 1790c1b4a7e6SDavid S. Miller */ 1791cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1792cf533ea5SEric Dumazet const struct sk_buff *skb) 1793c1b4a7e6SDavid S. Miller { 1794d649a7a8SEric Dumazet u32 in_flight, cwnd, halfcwnd; 1795c1b4a7e6SDavid S. Miller 1796c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 17974de075e0SEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 17984de075e0SEric Dumazet tcp_skb_pcount(skb) == 1) 1799c1b4a7e6SDavid S. Miller return 1; 1800c1b4a7e6SDavid S. Miller 1801c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1802c1b4a7e6SDavid S. Miller cwnd = tp->snd_cwnd; 1803d649a7a8SEric Dumazet if (in_flight >= cwnd) 1804c1b4a7e6SDavid S. Miller return 0; 1805d649a7a8SEric Dumazet 1806d649a7a8SEric Dumazet /* For better scheduling, ensure we have at least 1807d649a7a8SEric Dumazet * 2 GSO packets in flight. 1808d649a7a8SEric Dumazet */ 1809d649a7a8SEric Dumazet halfcwnd = max(cwnd >> 1, 1U); 1810d649a7a8SEric Dumazet return min(halfcwnd, cwnd - in_flight); 1811c1b4a7e6SDavid S. Miller } 1812c1b4a7e6SDavid S. Miller 1813b595076aSUwe Kleine-König /* Initialize TSO state of a skb. 181467edfef7SAndi Kleen * This must be invoked the first time we consider transmitting 1815c1b4a7e6SDavid S. Miller * SKB onto the wire. 1816c1b4a7e6SDavid S. Miller */ 18175bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1818c1b4a7e6SDavid S. Miller { 1819c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 1820c1b4a7e6SDavid S. Miller 1821f8269a49SIlpo Järvinen if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 18225bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 1823c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 1824c1b4a7e6SDavid S. Miller } 1825c1b4a7e6SDavid S. Miller return tso_segs; 1826c1b4a7e6SDavid S. Miller } 1827c1b4a7e6SDavid S. Miller 1828c1b4a7e6SDavid S. Miller 1829a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be 1830c1b4a7e6SDavid S. Miller * sent now. 1831c1b4a7e6SDavid S. Miller */ 1832a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1833c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 1834c1b4a7e6SDavid S. Miller { 1835c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 1836c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 1837c1b4a7e6SDavid S. Miller * 1838c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 1839c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 1840c1b4a7e6SDavid S. Miller */ 1841c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 1842a2a385d6SEric Dumazet return true; 1843c1b4a7e6SDavid S. Miller 18449b44190dSYuchung Cheng /* Don't use the nagle rule for urgent data (or for the final FIN). */ 18459b44190dSYuchung Cheng if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1846a2a385d6SEric Dumazet return true; 1847c1b4a7e6SDavid S. Miller 1848cc93fc51SPeter Pan(潘卫平) if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) 1849a2a385d6SEric Dumazet return true; 1850c1b4a7e6SDavid S. Miller 1851a2a385d6SEric Dumazet return false; 1852c1b4a7e6SDavid S. Miller } 1853c1b4a7e6SDavid S. Miller 1854c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 1855a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1856a2a385d6SEric Dumazet const struct sk_buff *skb, 1857056834d9SIlpo Järvinen unsigned int cur_mss) 1858c1b4a7e6SDavid S. Miller { 1859c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1860c1b4a7e6SDavid S. Miller 1861c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 1862c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1863c1b4a7e6SDavid S. Miller 186490840defSIlpo Järvinen return !after(end_seq, tcp_wnd_end(tp)); 1865c1b4a7e6SDavid S. Miller } 1866c1b4a7e6SDavid S. Miller 1867c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1868c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 1869c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 1870c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 1871c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 1872c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 1873c1b4a7e6SDavid S. Miller */ 187456483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1875c4ead4c5SEric Dumazet unsigned int mss_now, gfp_t gfp) 1876c1b4a7e6SDavid S. Miller { 1877c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 187856483341SEric Dumazet struct sk_buff *buff; 18799ce01461SIlpo Järvinen u8 flags; 1880c1b4a7e6SDavid S. Miller 1881c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 1882c8ac3774SHerbert Xu if (skb->len != skb->data_len) 188356483341SEric Dumazet return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, 188456483341SEric Dumazet skb, len, mss_now, gfp); 1885c1b4a7e6SDavid S. Miller 1886eb934478SEric Dumazet buff = sk_stream_alloc_skb(sk, 0, gfp, true); 188751456b29SIan Morris if (unlikely(!buff)) 1888c1b4a7e6SDavid S. Miller return -ENOMEM; 188941477662SJakub Kicinski skb_copy_decrypted(buff, skb); 1890c1b4a7e6SDavid S. Miller 18913ab224beSHideo Aoki sk->sk_wmem_queued += buff->truesize; 18923ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1893b60b49eaSHerbert Xu buff->truesize += nlen; 1894c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 1895c1b4a7e6SDavid S. Miller 1896c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 1897c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1898c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1899c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1900c1b4a7e6SDavid S. Miller 1901c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 19024de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 19034de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 19044de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1905c1b4a7e6SDavid S. Miller 1906c1b4a7e6SDavid S. Miller /* This packet was never sent out yet, so no SACK bits. */ 1907c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->sacked = 0; 1908c1b4a7e6SDavid S. Miller 1909a166140eSMartin KaFai Lau tcp_skb_fragment_eor(skb, buff); 1910a166140eSMartin KaFai Lau 191198be9b12SEric Dumazet buff->ip_summed = CHECKSUM_PARTIAL; 1912c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 1913490cc7d0SWillem de Bruijn tcp_fragment_tstamp(skb, buff); 1914c1b4a7e6SDavid S. Miller 1915c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 19165bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 19175bbb432cSEric Dumazet tcp_set_skb_tso_segs(buff, mss_now); 1918c1b4a7e6SDavid S. Miller 1919c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 1920f4a775d1SEric Dumazet __skb_header_release(buff); 192156483341SEric Dumazet tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE); 1922c1b4a7e6SDavid S. Miller 1923c1b4a7e6SDavid S. Miller return 0; 1924c1b4a7e6SDavid S. Miller } 1925c1b4a7e6SDavid S. Miller 1926c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 1927c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 1928c1b4a7e6SDavid S. Miller * 1929c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 1930c1b4a7e6SDavid S. Miller */ 1931ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 1932f9bfe4e6SEric Dumazet bool *is_cwnd_limited, 1933f9bfe4e6SEric Dumazet bool *is_rwnd_limited, 1934f9bfe4e6SEric Dumazet u32 max_segs) 1935c1b4a7e6SDavid S. Miller { 19366687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1937f1c6ea38SEric Dumazet u32 send_win, cong_win, limit, in_flight; 193850c8339eSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 193950c8339eSEric Dumazet struct sk_buff *head; 1940ad9f4f50SEric Dumazet int win_divisor; 1941f1c6ea38SEric Dumazet s64 delta; 1942c1b4a7e6SDavid S. Miller 194399d7662aSEric Dumazet if (icsk->icsk_ca_state >= TCP_CA_Recovery) 1944ae8064acSJohn Heffner goto send_now; 1945ae8064acSJohn Heffner 19465f852eb5SEric Dumazet /* Avoid bursty behavior by allowing defer 1947a682850aSEric Dumazet * only if the last write was recent (1 ms). 1948a682850aSEric Dumazet * Note that tp->tcp_wstamp_ns can be in the future if we have 1949a682850aSEric Dumazet * packets waiting in a qdisc or device for EDT delivery. 19505f852eb5SEric Dumazet */ 1951a682850aSEric Dumazet delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; 1952a682850aSEric Dumazet if (delta > 0) 1953ae8064acSJohn Heffner goto send_now; 1954908a75c1SDavid S. Miller 1955c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 1956c1b4a7e6SDavid S. Miller 1957c8c9aeb5SStefano Brivio BUG_ON(tcp_skb_pcount(skb) <= 1); 1958c8c9aeb5SStefano Brivio BUG_ON(tp->snd_cwnd <= in_flight); 1959c1b4a7e6SDavid S. Miller 196090840defSIlpo Järvinen send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1961c1b4a7e6SDavid S. Miller 1962c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 1963c1b4a7e6SDavid S. Miller cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1964c1b4a7e6SDavid S. Miller 1965c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 1966c1b4a7e6SDavid S. Miller 1967ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 1968605ad7f1SEric Dumazet if (limit >= max_segs * tp->mss_cache) 1969ae8064acSJohn Heffner goto send_now; 1970ba244fe9SDavid S. Miller 197162ad2761SIlpo Järvinen /* Middle in queue won't get any more data, full sendable already? */ 197262ad2761SIlpo Järvinen if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 197362ad2761SIlpo Järvinen goto send_now; 197462ad2761SIlpo Järvinen 19755bbcc0f5SLinus Torvalds win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); 1976ad9f4f50SEric Dumazet if (win_divisor) { 1977c1b4a7e6SDavid S. Miller u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1978c1b4a7e6SDavid S. Miller 1979c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 1980c1b4a7e6SDavid S. Miller * just use it. 1981c1b4a7e6SDavid S. Miller */ 1982ad9f4f50SEric Dumazet chunk /= win_divisor; 1983c1b4a7e6SDavid S. Miller if (limit >= chunk) 1984ae8064acSJohn Heffner goto send_now; 1985c1b4a7e6SDavid S. Miller } else { 1986c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 1987c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 1988c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 1989c1b4a7e6SDavid S. Miller * then send now. 1990c1b4a7e6SDavid S. Miller */ 19916b5a5c0dSNeal Cardwell if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1992ae8064acSJohn Heffner goto send_now; 1993c1b4a7e6SDavid S. Miller } 1994c1b4a7e6SDavid S. Miller 199575c119afSEric Dumazet /* TODO : use tsorted_sent_queue ? */ 199675c119afSEric Dumazet head = tcp_rtx_queue_head(sk); 199775c119afSEric Dumazet if (!head) 199875c119afSEric Dumazet goto send_now; 1999f1c6ea38SEric Dumazet delta = tp->tcp_clock_cache - head->tstamp; 200050c8339eSEric Dumazet /* If next ACK is likely to come too late (half srtt), do not defer */ 2001f1c6ea38SEric Dumazet if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) 200250c8339eSEric Dumazet goto send_now; 200350c8339eSEric Dumazet 2004f9bfe4e6SEric Dumazet /* Ok, it looks like it is advisable to defer. 2005f9bfe4e6SEric Dumazet * Three cases are tracked : 2006f9bfe4e6SEric Dumazet * 1) We are cwnd-limited 2007f9bfe4e6SEric Dumazet * 2) We are rwnd-limited 2008f9bfe4e6SEric Dumazet * 3) We are application limited. 2009f9bfe4e6SEric Dumazet */ 2010f9bfe4e6SEric Dumazet if (cong_win < send_win) { 2011f9bfe4e6SEric Dumazet if (cong_win <= skb->len) { 2012ca8a2263SNeal Cardwell *is_cwnd_limited = true; 2013f9bfe4e6SEric Dumazet return true; 2014f9bfe4e6SEric Dumazet } 2015f9bfe4e6SEric Dumazet } else { 2016f9bfe4e6SEric Dumazet if (send_win <= skb->len) { 2017f9bfe4e6SEric Dumazet *is_rwnd_limited = true; 2018f9bfe4e6SEric Dumazet return true; 2019f9bfe4e6SEric Dumazet } 2020f9bfe4e6SEric Dumazet } 2021f9bfe4e6SEric Dumazet 2022f9bfe4e6SEric Dumazet /* If this packet won't get more data, do not wait. */ 2023d8ed257fSEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || 2024d8ed257fSEric Dumazet TCP_SKB_CB(skb)->eor) 2025f9bfe4e6SEric Dumazet goto send_now; 2026ca8a2263SNeal Cardwell 2027a2a385d6SEric Dumazet return true; 2028ae8064acSJohn Heffner 2029ae8064acSJohn Heffner send_now: 2030a2a385d6SEric Dumazet return false; 2031c1b4a7e6SDavid S. Miller } 2032c1b4a7e6SDavid S. Miller 203305cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk) 203405cbc0dbSFan Du { 203505cbc0dbSFan Du struct inet_connection_sock *icsk = inet_csk(sk); 203605cbc0dbSFan Du struct tcp_sock *tp = tcp_sk(sk); 203705cbc0dbSFan Du struct net *net = sock_net(sk); 203805cbc0dbSFan Du u32 interval; 203905cbc0dbSFan Du s32 delta; 204005cbc0dbSFan Du 204105cbc0dbSFan Du interval = net->ipv4.sysctl_tcp_probe_interval; 2042c74df29aSEric Dumazet delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; 204305cbc0dbSFan Du if (unlikely(delta >= interval * HZ)) { 204405cbc0dbSFan Du int mss = tcp_current_mss(sk); 204505cbc0dbSFan Du 204605cbc0dbSFan Du /* Update current search range */ 204705cbc0dbSFan Du icsk->icsk_mtup.probe_size = 0; 204805cbc0dbSFan Du icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + 204905cbc0dbSFan Du sizeof(struct tcphdr) + 205005cbc0dbSFan Du icsk->icsk_af_ops->net_header_len; 205105cbc0dbSFan Du icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 205205cbc0dbSFan Du 205305cbc0dbSFan Du /* Update probe time stamp */ 2054c74df29aSEric Dumazet icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 205505cbc0dbSFan Du } 205605cbc0dbSFan Du } 205705cbc0dbSFan Du 2058808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) 2059808cf9e3SIlya Lesokhin { 2060808cf9e3SIlya Lesokhin struct sk_buff *skb, *next; 2061808cf9e3SIlya Lesokhin 2062808cf9e3SIlya Lesokhin skb = tcp_send_head(sk); 2063808cf9e3SIlya Lesokhin tcp_for_write_queue_from_safe(skb, next, sk) { 2064808cf9e3SIlya Lesokhin if (len <= skb->len) 2065808cf9e3SIlya Lesokhin break; 2066808cf9e3SIlya Lesokhin 2067888a5c53SWillem de Bruijn if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) 2068808cf9e3SIlya Lesokhin return false; 2069808cf9e3SIlya Lesokhin 2070808cf9e3SIlya Lesokhin len -= skb->len; 2071808cf9e3SIlya Lesokhin } 2072808cf9e3SIlya Lesokhin 2073808cf9e3SIlya Lesokhin return true; 2074808cf9e3SIlya Lesokhin } 2075808cf9e3SIlya Lesokhin 20765d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 207767edfef7SAndi Kleen * MTU probe is regularly attempting to increase the path MTU by 207867edfef7SAndi Kleen * deliberately sending larger packets. This discovers routing 207967edfef7SAndi Kleen * changes resulting in larger path MTUs. 208067edfef7SAndi Kleen * 20815d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 20825d424d5aSJohn Heffner * 1 if a probe was sent, 2083056834d9SIlpo Järvinen * -1 otherwise 2084056834d9SIlpo Järvinen */ 20855d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 20865d424d5aSJohn Heffner { 20875d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 208812a59abcSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 20895d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 20906b58e0a5SFan Du struct net *net = sock_net(sk); 20915d424d5aSJohn Heffner int probe_size; 209291cc17c0SIlpo Järvinen int size_needed; 209312a59abcSEric Dumazet int copy, len; 20945d424d5aSJohn Heffner int mss_now; 20956b58e0a5SFan Du int interval; 20965d424d5aSJohn Heffner 20975d424d5aSJohn Heffner /* Not currently probing/verifying, 20985d424d5aSJohn Heffner * not in recovery, 20995d424d5aSJohn Heffner * have enough cwnd, and 210012a59abcSEric Dumazet * not SACKing (the variable headers throw things off) 210112a59abcSEric Dumazet */ 210212a59abcSEric Dumazet if (likely(!icsk->icsk_mtup.enabled || 21035d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 21045d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 21055d424d5aSJohn Heffner tp->snd_cwnd < 11 || 210612a59abcSEric Dumazet tp->rx_opt.num_sacks || tp->rx_opt.dsack)) 21075d424d5aSJohn Heffner return -1; 21085d424d5aSJohn Heffner 21096b58e0a5SFan Du /* Use binary search for probe_size between tcp_mss_base, 21106b58e0a5SFan Du * and current mss_clamp. if (search_high - search_low) 21116b58e0a5SFan Du * smaller than a threshold, backoff from probing. 21126b58e0a5SFan Du */ 21130c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 21146b58e0a5SFan Du probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + 21156b58e0a5SFan Du icsk->icsk_mtup.search_low) >> 1); 211691cc17c0SIlpo Järvinen size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 21176b58e0a5SFan Du interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; 211805cbc0dbSFan Du /* When misfortune happens, we are reprobing actively, 211905cbc0dbSFan Du * and then reprobe timer has expired. We stick with current 212005cbc0dbSFan Du * probing process by not resetting search range to its orignal. 212105cbc0dbSFan Du */ 21226b58e0a5SFan Du if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || 212305cbc0dbSFan Du interval < net->ipv4.sysctl_tcp_probe_threshold) { 212405cbc0dbSFan Du /* Check whether enough time has elaplased for 212505cbc0dbSFan Du * another round of probing. 212605cbc0dbSFan Du */ 212705cbc0dbSFan Du tcp_mtu_check_reprobe(sk); 21285d424d5aSJohn Heffner return -1; 21295d424d5aSJohn Heffner } 21305d424d5aSJohn Heffner 21315d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 21327f9c33e5SIlpo Järvinen if (tp->write_seq - tp->snd_nxt < size_needed) 21335d424d5aSJohn Heffner return -1; 21345d424d5aSJohn Heffner 213591cc17c0SIlpo Järvinen if (tp->snd_wnd < size_needed) 21365d424d5aSJohn Heffner return -1; 213790840defSIlpo Järvinen if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 21385d424d5aSJohn Heffner return 0; 21395d424d5aSJohn Heffner 2140d67c58e9SIlpo Järvinen /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 2141d67c58e9SIlpo Järvinen if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 2142d67c58e9SIlpo Järvinen if (!tcp_packets_in_flight(tp)) 21435d424d5aSJohn Heffner return -1; 21445d424d5aSJohn Heffner else 21455d424d5aSJohn Heffner return 0; 21465d424d5aSJohn Heffner } 21475d424d5aSJohn Heffner 2148808cf9e3SIlya Lesokhin if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) 2149808cf9e3SIlya Lesokhin return -1; 2150808cf9e3SIlya Lesokhin 21515d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 2152eb934478SEric Dumazet nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); 215351456b29SIan Morris if (!nskb) 21545d424d5aSJohn Heffner return -1; 21553ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 21563ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 21575d424d5aSJohn Heffner 2158fe067e8aSDavid S. Miller skb = tcp_send_head(sk); 215941477662SJakub Kicinski skb_copy_decrypted(nskb, skb); 21605d424d5aSJohn Heffner 21615d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 21625d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 21634de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 21645d424d5aSJohn Heffner TCP_SKB_CB(nskb)->sacked = 0; 21655d424d5aSJohn Heffner nskb->csum = 0; 216698be9b12SEric Dumazet nskb->ip_summed = CHECKSUM_PARTIAL; 21675d424d5aSJohn Heffner 216850c4817eSIlpo Järvinen tcp_insert_write_queue_before(nskb, skb, sk); 21692b7cda9cSEric Dumazet tcp_highest_sack_replace(sk, skb, nskb); 217050c4817eSIlpo Järvinen 21715d424d5aSJohn Heffner len = 0; 2172234b6860SIlpo Järvinen tcp_for_write_queue_from_safe(skb, next, sk) { 21735d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 21745d424d5aSJohn Heffner skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 21755d424d5aSJohn Heffner 21765d424d5aSJohn Heffner if (skb->len <= copy) { 21775d424d5aSJohn Heffner /* We've eaten all the data from this skb. 21785d424d5aSJohn Heffner * Throw it away. */ 21794de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2180808cf9e3SIlya Lesokhin /* If this is the last SKB we copy and eor is set 2181808cf9e3SIlya Lesokhin * we need to propagate it to the new skb. 2182808cf9e3SIlya Lesokhin */ 2183808cf9e3SIlya Lesokhin TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; 2184888a5c53SWillem de Bruijn tcp_skb_collapse_tstamp(nskb, skb); 2185fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 21863ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 21875d424d5aSJohn Heffner } else { 21884de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 2189a3433f35SChangli Gao ~(TCPHDR_FIN|TCPHDR_PSH); 21905d424d5aSJohn Heffner if (!skb_shinfo(skb)->nr_frags) { 21915d424d5aSJohn Heffner skb_pull(skb, copy); 21925d424d5aSJohn Heffner } else { 21935d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 21945bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 21955d424d5aSJohn Heffner } 21965d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 21975d424d5aSJohn Heffner } 21985d424d5aSJohn Heffner 21995d424d5aSJohn Heffner len += copy; 2200234b6860SIlpo Järvinen 2201234b6860SIlpo Järvinen if (len >= probe_size) 2202234b6860SIlpo Järvinen break; 22035d424d5aSJohn Heffner } 22045bbb432cSEric Dumazet tcp_init_tso_segs(nskb, nskb->len); 22055d424d5aSJohn Heffner 22065d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 22077faee5c0SEric Dumazet * be resegmented into mss-sized pieces by tcp_write_xmit(). 22087faee5c0SEric Dumazet */ 22095d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 22105d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 22115d424d5aSJohn Heffner * effectively two packets. */ 22125d424d5aSJohn Heffner tp->snd_cwnd--; 221366f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, nskb); 22145d424d5aSJohn Heffner 22155d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 22160e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 22170e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 22185d424d5aSJohn Heffner 22195d424d5aSJohn Heffner return 1; 22205d424d5aSJohn Heffner } 22215d424d5aSJohn Heffner 22225d424d5aSJohn Heffner return -1; 22235d424d5aSJohn Heffner } 22245d424d5aSJohn Heffner 2225864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk) 2226218af599SEric Dumazet { 2227864e5c09SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 2228864e5c09SEric Dumazet 2229864e5c09SEric Dumazet if (!tcp_needs_internal_pacing(sk)) 2230864e5c09SEric Dumazet return false; 2231864e5c09SEric Dumazet 2232864e5c09SEric Dumazet if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) 2233864e5c09SEric Dumazet return false; 2234864e5c09SEric Dumazet 2235864e5c09SEric Dumazet if (!hrtimer_is_queued(&tp->pacing_timer)) { 2236864e5c09SEric Dumazet hrtimer_start(&tp->pacing_timer, 2237864e5c09SEric Dumazet ns_to_ktime(tp->tcp_wstamp_ns), 2238864e5c09SEric Dumazet HRTIMER_MODE_ABS_PINNED_SOFT); 2239864e5c09SEric Dumazet sock_hold(sk); 2240864e5c09SEric Dumazet } 2241864e5c09SEric Dumazet return true; 2242218af599SEric Dumazet } 2243218af599SEric Dumazet 2244f9616c35SEric Dumazet /* TCP Small Queues : 2245f9616c35SEric Dumazet * Control number of packets in qdisc/devices to two packets / or ~1 ms. 2246f9616c35SEric Dumazet * (These limits are doubled for retransmits) 2247f9616c35SEric Dumazet * This allows for : 2248f9616c35SEric Dumazet * - better RTT estimation and ACK scheduling 2249f9616c35SEric Dumazet * - faster recovery 2250f9616c35SEric Dumazet * - high rates 2251f9616c35SEric Dumazet * Alas, some drivers / subsystems require a fair amount 2252f9616c35SEric Dumazet * of queued bytes to ensure line rate. 2253f9616c35SEric Dumazet * One example is wifi aggregation (802.11 AMPDU) 2254f9616c35SEric Dumazet */ 2255f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, 2256f9616c35SEric Dumazet unsigned int factor) 2257f9616c35SEric Dumazet { 225876a9ebe8SEric Dumazet unsigned long limit; 2259f9616c35SEric Dumazet 226076a9ebe8SEric Dumazet limit = max_t(unsigned long, 226176a9ebe8SEric Dumazet 2 * skb->truesize, 226276a9ebe8SEric Dumazet sk->sk_pacing_rate >> sk->sk_pacing_shift); 2263c73e5807SEric Dumazet if (sk->sk_pacing_status == SK_PACING_NONE) 226476a9ebe8SEric Dumazet limit = min_t(unsigned long, limit, 22659184d8bbSEric Dumazet sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); 2266f9616c35SEric Dumazet limit <<= factor; 2267f9616c35SEric Dumazet 2268a842fe14SEric Dumazet if (static_branch_unlikely(&tcp_tx_delay_enabled) && 2269a842fe14SEric Dumazet tcp_sk(sk)->tcp_tx_delay) { 2270a842fe14SEric Dumazet u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; 2271a842fe14SEric Dumazet 2272a842fe14SEric Dumazet /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we 2273a842fe14SEric Dumazet * approximate our needs assuming an ~100% skb->truesize overhead. 2274a842fe14SEric Dumazet * USEC_PER_SEC is approximated by 2^20. 2275a842fe14SEric Dumazet * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. 2276a842fe14SEric Dumazet */ 2277a842fe14SEric Dumazet extra_bytes >>= (20 - 1); 2278a842fe14SEric Dumazet limit += extra_bytes; 2279a842fe14SEric Dumazet } 228014afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > limit) { 228175c119afSEric Dumazet /* Always send skb if rtx queue is empty. 228275eefc6cSEric Dumazet * No need to wait for TX completion to call us back, 228375eefc6cSEric Dumazet * after softirq/tasklet schedule. 228475eefc6cSEric Dumazet * This helps when TX completions are delayed too much. 228575eefc6cSEric Dumazet */ 228675c119afSEric Dumazet if (tcp_rtx_queue_empty(sk)) 228775eefc6cSEric Dumazet return false; 228875eefc6cSEric Dumazet 22897aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 2290f9616c35SEric Dumazet /* It is possible TX completion already happened 2291f9616c35SEric Dumazet * before we set TSQ_THROTTLED, so we must 2292f9616c35SEric Dumazet * test again the condition. 2293f9616c35SEric Dumazet */ 2294f9616c35SEric Dumazet smp_mb__after_atomic(); 229514afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > limit) 2296f9616c35SEric Dumazet return true; 2297f9616c35SEric Dumazet } 2298f9616c35SEric Dumazet return false; 2299f9616c35SEric Dumazet } 2300f9616c35SEric Dumazet 230105b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) 230205b055e8SFrancis Yan { 2303628174ccSEric Dumazet const u32 now = tcp_jiffies32; 2304efe967cdSArnd Bergmann enum tcp_chrono old = tp->chrono_type; 230505b055e8SFrancis Yan 2306efe967cdSArnd Bergmann if (old > TCP_CHRONO_UNSPEC) 2307efe967cdSArnd Bergmann tp->chrono_stat[old - 1] += now - tp->chrono_start; 230805b055e8SFrancis Yan tp->chrono_start = now; 230905b055e8SFrancis Yan tp->chrono_type = new; 231005b055e8SFrancis Yan } 231105b055e8SFrancis Yan 231205b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) 231305b055e8SFrancis Yan { 231405b055e8SFrancis Yan struct tcp_sock *tp = tcp_sk(sk); 231505b055e8SFrancis Yan 231605b055e8SFrancis Yan /* If there are multiple conditions worthy of tracking in a 23170f87230dSFrancis Yan * chronograph then the highest priority enum takes precedence 23180f87230dSFrancis Yan * over the other conditions. So that if something "more interesting" 231905b055e8SFrancis Yan * starts happening, stop the previous chrono and start a new one. 232005b055e8SFrancis Yan */ 232105b055e8SFrancis Yan if (type > tp->chrono_type) 232205b055e8SFrancis Yan tcp_chrono_set(tp, type); 232305b055e8SFrancis Yan } 232405b055e8SFrancis Yan 232505b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) 232605b055e8SFrancis Yan { 232705b055e8SFrancis Yan struct tcp_sock *tp = tcp_sk(sk); 232805b055e8SFrancis Yan 23290f87230dSFrancis Yan 23300f87230dSFrancis Yan /* There are multiple conditions worthy of tracking in a 23310f87230dSFrancis Yan * chronograph, so that the highest priority enum takes 23320f87230dSFrancis Yan * precedence over the other conditions (see tcp_chrono_start). 23330f87230dSFrancis Yan * If a condition stops, we only stop chrono tracking if 23340f87230dSFrancis Yan * it's the "most interesting" or current chrono we are 23350f87230dSFrancis Yan * tracking and starts busy chrono if we have pending data. 23360f87230dSFrancis Yan */ 233775c119afSEric Dumazet if (tcp_rtx_and_write_queues_empty(sk)) 233805b055e8SFrancis Yan tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); 23390f87230dSFrancis Yan else if (type == tp->chrono_type) 23400f87230dSFrancis Yan tcp_chrono_set(tp, TCP_CHRONO_BUSY); 234105b055e8SFrancis Yan } 234205b055e8SFrancis Yan 23431da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 23441da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 23451da177e4SLinus Torvalds * window for us. 23461da177e4SLinus Torvalds * 2347f8269a49SIlpo Järvinen * LARGESEND note: !tcp_urg_mode is overkill, only frames between 2348f8269a49SIlpo Järvinen * snd_up-64k-mss .. snd_up cannot be large. However, taking into 2349f8269a49SIlpo Järvinen * account rare use of URG, this is not a big flaw. 2350f8269a49SIlpo Järvinen * 23516ba8a3b1SNandita Dukkipati * Send at most one packet when push_one > 0. Temporarily ignore 23526ba8a3b1SNandita Dukkipati * cwnd limit to force at most one packet out when push_one == 2. 23536ba8a3b1SNandita Dukkipati 2354a2a385d6SEric Dumazet * Returns true, if no segments are in flight and we have queued segments, 2355a2a385d6SEric Dumazet * but cannot send anything now because of SWS or another problem. 23561da177e4SLinus Torvalds */ 2357a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 2358d5dd9175SIlpo Järvinen int push_one, gfp_t gfp) 23591da177e4SLinus Torvalds { 23601da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 236192df7b51SDavid S. Miller struct sk_buff *skb; 2362c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 2363c1b4a7e6SDavid S. Miller int cwnd_quota; 23645d424d5aSJohn Heffner int result; 23655615f886SFrancis Yan bool is_cwnd_limited = false, is_rwnd_limited = false; 2366605ad7f1SEric Dumazet u32 max_segs; 23671da177e4SLinus Torvalds 2368c1b4a7e6SDavid S. Miller sent_pkts = 0; 23695d424d5aSJohn Heffner 2370ee1836aeSEric Dumazet tcp_mstamp_refresh(tp); 2371d5dd9175SIlpo Järvinen if (!push_one) { 23725d424d5aSJohn Heffner /* Do MTU probing. */ 2373d5dd9175SIlpo Järvinen result = tcp_mtu_probe(sk); 2374d5dd9175SIlpo Järvinen if (!result) { 2375a2a385d6SEric Dumazet return false; 23765d424d5aSJohn Heffner } else if (result > 0) { 23775d424d5aSJohn Heffner sent_pkts = 1; 23785d424d5aSJohn Heffner } 2379d5dd9175SIlpo Järvinen } 23805d424d5aSJohn Heffner 2381ed6e7268SNeal Cardwell max_segs = tcp_tso_segs(sk, mss_now); 2382fe067e8aSDavid S. Miller while ((skb = tcp_send_head(sk))) { 2383c8ac3774SHerbert Xu unsigned int limit; 2384c8ac3774SHerbert Xu 238579861919SEric Dumazet if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 238679861919SEric Dumazet /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 238779861919SEric Dumazet skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; 238879861919SEric Dumazet list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2389bf50b606SEric Dumazet tcp_init_tso_segs(skb, mss_now); 239079861919SEric Dumazet goto repair; /* Skip network transmission */ 239179861919SEric Dumazet } 239279861919SEric Dumazet 2393218af599SEric Dumazet if (tcp_pacing_check(sk)) 2394218af599SEric Dumazet break; 2395218af599SEric Dumazet 23965bbb432cSEric Dumazet tso_segs = tcp_init_tso_segs(skb, mss_now); 2397c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 2398c1b4a7e6SDavid S. Miller 2399b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 24006ba8a3b1SNandita Dukkipati if (!cwnd_quota) { 24016ba8a3b1SNandita Dukkipati if (push_one == 2) 24026ba8a3b1SNandita Dukkipati /* Force out a loss probe pkt. */ 24036ba8a3b1SNandita Dukkipati cwnd_quota = 1; 24046ba8a3b1SNandita Dukkipati else 2405b68e9f85SHerbert Xu break; 24066ba8a3b1SNandita Dukkipati } 2407b68e9f85SHerbert Xu 24085615f886SFrancis Yan if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { 24095615f886SFrancis Yan is_rwnd_limited = true; 2410b68e9f85SHerbert Xu break; 24115615f886SFrancis Yan } 2412b68e9f85SHerbert Xu 2413d6a4e26aSEric Dumazet if (tso_segs == 1) { 2414aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2415aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 2416aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 2417aa93466bSDavid S. Miller break; 2418c1b4a7e6SDavid S. Miller } else { 2419ca8a2263SNeal Cardwell if (!push_one && 2420605ad7f1SEric Dumazet tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2421f9bfe4e6SEric Dumazet &is_rwnd_limited, max_segs)) 2422aa93466bSDavid S. Miller break; 2423c1b4a7e6SDavid S. Miller } 2424aa93466bSDavid S. Miller 2425605ad7f1SEric Dumazet limit = mss_now; 2426d6a4e26aSEric Dumazet if (tso_segs > 1 && !tcp_urg_mode(tp)) 2427605ad7f1SEric Dumazet limit = tcp_mss_split_point(sk, skb, mss_now, 2428605ad7f1SEric Dumazet min_t(unsigned int, 2429605ad7f1SEric Dumazet cwnd_quota, 2430605ad7f1SEric Dumazet max_segs), 2431605ad7f1SEric Dumazet nonagle); 2432605ad7f1SEric Dumazet 2433605ad7f1SEric Dumazet if (skb->len > limit && 243456483341SEric Dumazet unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2435605ad7f1SEric Dumazet break; 2436605ad7f1SEric Dumazet 2437f9616c35SEric Dumazet if (tcp_small_queue_check(sk, skb, 0)) 243846d3ceabSEric Dumazet break; 2439c9eeec26SEric Dumazet 2440d5dd9175SIlpo Järvinen if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 24411da177e4SLinus Torvalds break; 24421da177e4SLinus Torvalds 2443ec342325SAndrew Vagin repair: 24441da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 24451da177e4SLinus Torvalds * This call will increment packets_out. 24461da177e4SLinus Torvalds */ 244766f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 2450a262f0cdSNandita Dukkipati sent_pkts += tcp_skb_pcount(skb); 2451d5dd9175SIlpo Järvinen 2452d5dd9175SIlpo Järvinen if (push_one) 2453d5dd9175SIlpo Järvinen break; 24541da177e4SLinus Torvalds } 24551da177e4SLinus Torvalds 24565615f886SFrancis Yan if (is_rwnd_limited) 24575615f886SFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); 24585615f886SFrancis Yan else 24595615f886SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); 24605615f886SFrancis Yan 2461aa93466bSDavid S. Miller if (likely(sent_pkts)) { 2462684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 2463684bad11SYuchung Cheng tp->prr_out += sent_pkts; 24646ba8a3b1SNandita Dukkipati 24656ba8a3b1SNandita Dukkipati /* Send one loss probe per tail loss episode. */ 24666ba8a3b1SNandita Dukkipati if (push_one != 2) 2467ed66dfafSNeal Cardwell tcp_schedule_loss_probe(sk, false); 2468d2e1339fSBendik Rønning Opstad is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); 2469ca8a2263SNeal Cardwell tcp_cwnd_validate(sk, is_cwnd_limited); 2470a2a385d6SEric Dumazet return false; 24711da177e4SLinus Torvalds } 247275c119afSEric Dumazet return !tp->packets_out && !tcp_write_queue_empty(sk); 24736ba8a3b1SNandita Dukkipati } 24746ba8a3b1SNandita Dukkipati 2475ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) 24766ba8a3b1SNandita Dukkipati { 24776ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 24786ba8a3b1SNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 2479a2815817SNeal Cardwell u32 timeout, rto_delta_us; 24802ae21cf5SEric Dumazet int early_retrans; 24816ba8a3b1SNandita Dukkipati 24826ba8a3b1SNandita Dukkipati /* Don't do any loss probe on a Fast Open connection before 3WHS 24836ba8a3b1SNandita Dukkipati * finishes. 24846ba8a3b1SNandita Dukkipati */ 2485d983ea6fSEric Dumazet if (rcu_access_pointer(tp->fastopen_rsk)) 24866ba8a3b1SNandita Dukkipati return false; 24876ba8a3b1SNandita Dukkipati 24882ae21cf5SEric Dumazet early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; 24896ba8a3b1SNandita Dukkipati /* Schedule a loss probe in 2*RTT for SACK capable connections 2490b4f70c3dSNeal Cardwell * not in loss recovery, that are either limited by cwnd or application. 24916ba8a3b1SNandita Dukkipati */ 24922ae21cf5SEric Dumazet if ((early_retrans != 3 && early_retrans != 4) || 2493bec41a11SYuchung Cheng !tp->packets_out || !tcp_is_sack(tp) || 2494b4f70c3dSNeal Cardwell (icsk->icsk_ca_state != TCP_CA_Open && 2495b4f70c3dSNeal Cardwell icsk->icsk_ca_state != TCP_CA_CWR)) 24966ba8a3b1SNandita Dukkipati return false; 24976ba8a3b1SNandita Dukkipati 2498bb4d991aSYuchung Cheng /* Probe timeout is 2*rtt. Add minimum RTO to account 2499f9b99582SYuchung Cheng * for delayed ack when there's one outstanding packet. If no RTT 2500f9b99582SYuchung Cheng * sample is available then probe after TCP_TIMEOUT_INIT. 25016ba8a3b1SNandita Dukkipati */ 2502bb4d991aSYuchung Cheng if (tp->srtt_us) { 2503bb4d991aSYuchung Cheng timeout = usecs_to_jiffies(tp->srtt_us >> 2); 25046ba8a3b1SNandita Dukkipati if (tp->packets_out == 1) 2505bb4d991aSYuchung Cheng timeout += TCP_RTO_MIN; 2506bb4d991aSYuchung Cheng else 2507bb4d991aSYuchung Cheng timeout += TCP_TIMEOUT_MIN; 2508bb4d991aSYuchung Cheng } else { 2509bb4d991aSYuchung Cheng timeout = TCP_TIMEOUT_INIT; 2510bb4d991aSYuchung Cheng } 25116ba8a3b1SNandita Dukkipati 2512a2815817SNeal Cardwell /* If the RTO formula yields an earlier time, then use that time. */ 2513ed66dfafSNeal Cardwell rto_delta_us = advancing_rto ? 2514ed66dfafSNeal Cardwell jiffies_to_usecs(inet_csk(sk)->icsk_rto) : 2515ed66dfafSNeal Cardwell tcp_rto_delta_us(sk); /* How far in future is RTO? */ 2516a2815817SNeal Cardwell if (rto_delta_us > 0) 2517a2815817SNeal Cardwell timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); 25186ba8a3b1SNandita Dukkipati 25193f80e08fSEric Dumazet tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 25203f80e08fSEric Dumazet TCP_RTO_MAX, NULL); 25216ba8a3b1SNandita Dukkipati return true; 25226ba8a3b1SNandita Dukkipati } 25236ba8a3b1SNandita Dukkipati 25241f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of 25251f3279aeSEric Dumazet * a packet is still in a qdisc or driver queue. 25261f3279aeSEric Dumazet * In this case, there is very little point doing a retransmit ! 25271f3279aeSEric Dumazet */ 25281f3279aeSEric Dumazet static bool skb_still_in_host_queue(const struct sock *sk, 25291f3279aeSEric Dumazet const struct sk_buff *skb) 25301f3279aeSEric Dumazet { 253139bb5e62SEric Dumazet if (unlikely(skb_fclone_busy(sk, skb))) { 2532c10d9310SEric Dumazet NET_INC_STATS(sock_net(sk), 25331f3279aeSEric Dumazet LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 25341f3279aeSEric Dumazet return true; 25351f3279aeSEric Dumazet } 25361f3279aeSEric Dumazet return false; 25371f3279aeSEric Dumazet } 25381f3279aeSEric Dumazet 2539b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else 25406ba8a3b1SNandita Dukkipati * retransmit the last segment. 25416ba8a3b1SNandita Dukkipati */ 25426ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk) 25436ba8a3b1SNandita Dukkipati { 25449b717a8dSNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 25456ba8a3b1SNandita Dukkipati struct sk_buff *skb; 25466ba8a3b1SNandita Dukkipati int pcount; 25476ba8a3b1SNandita Dukkipati int mss = tcp_current_mss(sk); 25486ba8a3b1SNandita Dukkipati 2549b340b264SYuchung Cheng skb = tcp_send_head(sk); 255075c119afSEric Dumazet if (skb && tcp_snd_wnd_test(tp, skb, mss)) { 2551b340b264SYuchung Cheng pcount = tp->packets_out; 2552b340b264SYuchung Cheng tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 2553b340b264SYuchung Cheng if (tp->packets_out > pcount) 2554b340b264SYuchung Cheng goto probe_sent; 25556ba8a3b1SNandita Dukkipati goto rearm_timer; 25566ba8a3b1SNandita Dukkipati } 255775c119afSEric Dumazet skb = skb_rb_last(&sk->tcp_rtx_queue); 2558b2b7af86SYuchung Cheng if (unlikely(!skb)) { 2559b2b7af86SYuchung Cheng WARN_ONCE(tp->packets_out, 2560b2b7af86SYuchung Cheng "invalid inflight: %u state %u cwnd %u mss %d\n", 2561b2b7af86SYuchung Cheng tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); 2562b2b7af86SYuchung Cheng inet_csk(sk)->icsk_pending = 0; 2563b2b7af86SYuchung Cheng return; 2564b2b7af86SYuchung Cheng } 25656ba8a3b1SNandita Dukkipati 25669b717a8dSNandita Dukkipati /* At most one outstanding TLP retransmission. */ 25679b717a8dSNandita Dukkipati if (tp->tlp_high_seq) 25689b717a8dSNandita Dukkipati goto rearm_timer; 25699b717a8dSNandita Dukkipati 25701f3279aeSEric Dumazet if (skb_still_in_host_queue(sk, skb)) 25711f3279aeSEric Dumazet goto rearm_timer; 25721f3279aeSEric Dumazet 25736ba8a3b1SNandita Dukkipati pcount = tcp_skb_pcount(skb); 25746ba8a3b1SNandita Dukkipati if (WARN_ON(!pcount)) 25756ba8a3b1SNandita Dukkipati goto rearm_timer; 25766ba8a3b1SNandita Dukkipati 25776ba8a3b1SNandita Dukkipati if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 257875c119afSEric Dumazet if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 257975c119afSEric Dumazet (pcount - 1) * mss, mss, 25806cc55e09SOctavian Purdila GFP_ATOMIC))) 25816ba8a3b1SNandita Dukkipati goto rearm_timer; 258275c119afSEric Dumazet skb = skb_rb_next(skb); 25836ba8a3b1SNandita Dukkipati } 25846ba8a3b1SNandita Dukkipati 25856ba8a3b1SNandita Dukkipati if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 25866ba8a3b1SNandita Dukkipati goto rearm_timer; 25876ba8a3b1SNandita Dukkipati 258810d3be56SEric Dumazet if (__tcp_retransmit_skb(sk, skb, 1)) 2589b340b264SYuchung Cheng goto rearm_timer; 25906ba8a3b1SNandita Dukkipati 25919b717a8dSNandita Dukkipati /* Record snd_nxt for loss detection. */ 25929b717a8dSNandita Dukkipati tp->tlp_high_seq = tp->snd_nxt; 25939b717a8dSNandita Dukkipati 2594b340b264SYuchung Cheng probe_sent: 2595c10d9310SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 2596fcd16c0aSYuchung Cheng /* Reset s.t. tcp_rearm_rto will restart timer from now */ 2597fcd16c0aSYuchung Cheng inet_csk(sk)->icsk_pending = 0; 2598b340b264SYuchung Cheng rearm_timer: 2599fcd16c0aSYuchung Cheng tcp_rearm_rto(sk); 26001da177e4SLinus Torvalds } 26011da177e4SLinus Torvalds 2602a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 2603a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 2604a762a980SDavid S. Miller * The socket must be locked by the caller. 2605a762a980SDavid S. Miller */ 26069e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 26079e412ba7SIlpo Järvinen int nonagle) 2608a762a980SDavid S. Miller { 2609726e07a8SIlpo Järvinen /* If we are closed, the bytes will have to remain here. 2610726e07a8SIlpo Järvinen * In time closedown will finish, we empty the write queue and 2611726e07a8SIlpo Järvinen * all will be happy. 2612726e07a8SIlpo Järvinen */ 2613726e07a8SIlpo Järvinen if (unlikely(sk->sk_state == TCP_CLOSE)) 2614726e07a8SIlpo Järvinen return; 2615726e07a8SIlpo Järvinen 261699a1dec7SMel Gorman if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 26177450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC))) 26189e412ba7SIlpo Järvinen tcp_check_probe_timer(sk); 2619a762a980SDavid S. Miller } 2620a762a980SDavid S. Miller 2621c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 2622c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 2623c1b4a7e6SDavid S. Miller */ 2624c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 2625c1b4a7e6SDavid S. Miller { 2626fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 2627c1b4a7e6SDavid S. Miller 2628c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 2629c1b4a7e6SDavid S. Miller 2630d5dd9175SIlpo Järvinen tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2631c1b4a7e6SDavid S. Miller } 2632c1b4a7e6SDavid S. Miller 26331da177e4SLinus Torvalds /* This function returns the amount that we can raise the 26341da177e4SLinus Torvalds * usable window based on the following constraints 26351da177e4SLinus Torvalds * 26361da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 26371da177e4SLinus Torvalds * 2. We limit memory per socket 26381da177e4SLinus Torvalds * 26391da177e4SLinus Torvalds * RFC 1122: 26401da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 26411da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 26421da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 26431da177e4SLinus Torvalds * 26441da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 26451da177e4SLinus Torvalds * it at least MSS bytes. 26461da177e4SLinus Torvalds * 26471da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 26481da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 26491da177e4SLinus Torvalds * 26501da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 26511da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 26521da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 26531da177e4SLinus Torvalds * window to always advance by a single byte. 26541da177e4SLinus Torvalds * 26551da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 26561da177e4SLinus Torvalds * then this will not be a problem. 26571da177e4SLinus Torvalds * 26581da177e4SLinus Torvalds * BSD seems to make the following compromise: 26591da177e4SLinus Torvalds * 26601da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 26611da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 26621da177e4SLinus Torvalds * then set the window to 0. 26631da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 26641da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 26651da177e4SLinus Torvalds * and from being larger than the largest representable value. 26661da177e4SLinus Torvalds * 26671da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 26681da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 26691da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 26701da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 26711da177e4SLinus Torvalds * because the pipeline is full. 26721da177e4SLinus Torvalds * 26731da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 26741da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 26751da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 26761da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 26771da177e4SLinus Torvalds * of having a fixed window size at almost all times. 26781da177e4SLinus Torvalds * 26791da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 26801da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 26811da177e4SLinus Torvalds * 26821da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 26831da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 26841da177e4SLinus Torvalds */ 26851da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 26861da177e4SLinus Torvalds { 2687463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 26881da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2689caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 26901da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 26911da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 26921da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 26931da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 26941da177e4SLinus Torvalds */ 2695463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 26961da177e4SLinus Torvalds int free_space = tcp_space(sk); 269786c1a045SFlorian Westphal int allowed_space = tcp_full_space(sk); 269886c1a045SFlorian Westphal int full_space = min_t(int, tp->window_clamp, allowed_space); 26991da177e4SLinus Torvalds int window; 27001da177e4SLinus Torvalds 270106425c30SEric Dumazet if (unlikely(mss > full_space)) { 27021da177e4SLinus Torvalds mss = full_space; 270306425c30SEric Dumazet if (mss <= 0) 270406425c30SEric Dumazet return 0; 270506425c30SEric Dumazet } 2706b92edbe0SEric Dumazet if (free_space < (full_space >> 1)) { 2707463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 27081da177e4SLinus Torvalds 2709b8da51ebSEric Dumazet if (tcp_under_memory_pressure(sk)) 2710056834d9SIlpo Järvinen tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2711056834d9SIlpo Järvinen 4U * tp->advmss); 27121da177e4SLinus Torvalds 271386c1a045SFlorian Westphal /* free_space might become our new window, make sure we don't 271486c1a045SFlorian Westphal * increase it due to wscale. 271586c1a045SFlorian Westphal */ 271686c1a045SFlorian Westphal free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 271786c1a045SFlorian Westphal 271886c1a045SFlorian Westphal /* if free space is less than mss estimate, or is below 1/16th 271986c1a045SFlorian Westphal * of the maximum allowed, try to move to zero-window, else 272086c1a045SFlorian Westphal * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and 272186c1a045SFlorian Westphal * new incoming data is dropped due to memory limits. 272286c1a045SFlorian Westphal * With large window, mss test triggers way too late in order 272386c1a045SFlorian Westphal * to announce zero window in time before rmem limit kicks in. 272486c1a045SFlorian Westphal */ 272586c1a045SFlorian Westphal if (free_space < (allowed_space >> 4) || free_space < mss) 27261da177e4SLinus Torvalds return 0; 27271da177e4SLinus Torvalds } 27281da177e4SLinus Torvalds 27291da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 27301da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 27311da177e4SLinus Torvalds 27321da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 27331da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 27341da177e4SLinus Torvalds */ 27351da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 27361da177e4SLinus Torvalds window = free_space; 27371da177e4SLinus Torvalds 27381da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 27391da177e4SLinus Torvalds * Import case: prevent zero window announcement if 27401da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 27411da177e4SLinus Torvalds */ 27421935299dSGao Feng window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); 27431da177e4SLinus Torvalds } else { 27441935299dSGao Feng window = tp->rcv_wnd; 27451da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 27461da177e4SLinus Torvalds * Window clamp already applied above. 27471da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 27481da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 27491da177e4SLinus Torvalds * and multiply from happening most of the time. 27501da177e4SLinus Torvalds * We also don't do any window rounding when the free space 27511da177e4SLinus Torvalds * is too small. 27521da177e4SLinus Torvalds */ 27531da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 27541935299dSGao Feng window = rounddown(free_space, mss); 275584565070SJohn Heffner else if (mss == full_space && 2756b92edbe0SEric Dumazet free_space > window + (full_space >> 1)) 275784565070SJohn Heffner window = free_space; 27581da177e4SLinus Torvalds } 27591da177e4SLinus Torvalds 27601da177e4SLinus Torvalds return window; 27611da177e4SLinus Torvalds } 27621da177e4SLinus Torvalds 2763cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb, 2764082ac2d5SMartin KaFai Lau const struct sk_buff *next_skb) 2765082ac2d5SMartin KaFai Lau { 27660a2cf20cSSoheil Hassas Yeganeh if (unlikely(tcp_has_tx_tstamp(next_skb))) { 27670a2cf20cSSoheil Hassas Yeganeh const struct skb_shared_info *next_shinfo = 27680a2cf20cSSoheil Hassas Yeganeh skb_shinfo(next_skb); 2769082ac2d5SMartin KaFai Lau struct skb_shared_info *shinfo = skb_shinfo(skb); 2770082ac2d5SMartin KaFai Lau 27710a2cf20cSSoheil Hassas Yeganeh shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 2772082ac2d5SMartin KaFai Lau shinfo->tskey = next_shinfo->tskey; 27732de8023eSMartin KaFai Lau TCP_SKB_CB(skb)->txstamp_ack |= 27742de8023eSMartin KaFai Lau TCP_SKB_CB(next_skb)->txstamp_ack; 2775082ac2d5SMartin KaFai Lau } 2776082ac2d5SMartin KaFai Lau } 2777082ac2d5SMartin KaFai Lau 27784a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */ 2779f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 27801da177e4SLinus Torvalds { 27811da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 278275c119afSEric Dumazet struct sk_buff *next_skb = skb_rb_next(skb); 278313dde04fSWei Yongjun int next_skb_size; 27841da177e4SLinus Torvalds 2785058dc334SIlpo Järvinen next_skb_size = next_skb->len; 27861da177e4SLinus Torvalds 2787058dc334SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 27881da177e4SLinus Torvalds 2789f8071cdeSEric Dumazet if (next_skb_size) { 2790f8071cdeSEric Dumazet if (next_skb_size <= skb_availroom(skb)) 2791f8071cdeSEric Dumazet skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), 2792f8071cdeSEric Dumazet next_skb_size); 27933b4929f6SEric Dumazet else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size)) 2794f8071cdeSEric Dumazet return false; 2795f8071cdeSEric Dumazet } 27962b7cda9cSEric Dumazet tcp_highest_sack_replace(sk, next_skb, skb); 2797a6963a6bSIlpo Järvinen 27981da177e4SLinus Torvalds /* Update sequence range on original skb. */ 27991da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 28001da177e4SLinus Torvalds 2801e6c7d085SIlpo Järvinen /* Merge over control information. This moves PSH/FIN etc. over */ 28024de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 28031da177e4SLinus Torvalds 28041da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 28051da177e4SLinus Torvalds * packet counting does not break. 28061da177e4SLinus Torvalds */ 28074828e7f4SIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2808a643b5d4SMartin KaFai Lau TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; 2809b7689205SIlpo Järvinen 2810b7689205SIlpo Järvinen /* changed transmit queue under us so clear hints */ 2811ef9da47cSIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 2812ef9da47cSIlpo Järvinen if (next_skb == tp->retransmit_skb_hint) 2813ef9da47cSIlpo Järvinen tp->retransmit_skb_hint = skb; 2814b7689205SIlpo Järvinen 2815797108d1SIlpo Järvinen tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2816797108d1SIlpo Järvinen 2817082ac2d5SMartin KaFai Lau tcp_skb_collapse_tstamp(skb, next_skb); 2818082ac2d5SMartin KaFai Lau 281975c119afSEric Dumazet tcp_rtx_queue_unlink_and_free(next_skb, sk); 2820f8071cdeSEric Dumazet return true; 28211da177e4SLinus Torvalds } 28221da177e4SLinus Torvalds 282367edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */ 2824a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 28254a17fc3aSIlpo Järvinen { 28264a17fc3aSIlpo Järvinen if (tcp_skb_pcount(skb) > 1) 2827a2a385d6SEric Dumazet return false; 28284a17fc3aSIlpo Järvinen if (skb_cloned(skb)) 2829a2a385d6SEric Dumazet return false; 28302331ccc5SEric Dumazet /* Some heuristics for collapsing over SACK'd could be invented */ 28314a17fc3aSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2832a2a385d6SEric Dumazet return false; 28334a17fc3aSIlpo Järvinen 2834a2a385d6SEric Dumazet return true; 28354a17fc3aSIlpo Järvinen } 28364a17fc3aSIlpo Järvinen 283767edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create 283867edfef7SAndi Kleen * less packets on the wire. This is only done on retransmission. 283967edfef7SAndi Kleen */ 28404a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 28414a17fc3aSIlpo Järvinen int space) 28424a17fc3aSIlpo Järvinen { 28434a17fc3aSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 28444a17fc3aSIlpo Järvinen struct sk_buff *skb = to, *tmp; 2845a2a385d6SEric Dumazet bool first = true; 28464a17fc3aSIlpo Järvinen 2847e0a1e5b5SEric Dumazet if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) 28484a17fc3aSIlpo Järvinen return; 28494de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 28504a17fc3aSIlpo Järvinen return; 28514a17fc3aSIlpo Järvinen 285275c119afSEric Dumazet skb_rbtree_walk_from_safe(skb, tmp) { 28534a17fc3aSIlpo Järvinen if (!tcp_can_collapse(sk, skb)) 28544a17fc3aSIlpo Järvinen break; 28554a17fc3aSIlpo Järvinen 2856a643b5d4SMartin KaFai Lau if (!tcp_skb_can_collapse_to(to)) 2857a643b5d4SMartin KaFai Lau break; 2858a643b5d4SMartin KaFai Lau 28594a17fc3aSIlpo Järvinen space -= skb->len; 28604a17fc3aSIlpo Järvinen 28614a17fc3aSIlpo Järvinen if (first) { 2862a2a385d6SEric Dumazet first = false; 28634a17fc3aSIlpo Järvinen continue; 28644a17fc3aSIlpo Järvinen } 28654a17fc3aSIlpo Järvinen 28664a17fc3aSIlpo Järvinen if (space < 0) 28674a17fc3aSIlpo Järvinen break; 28684a17fc3aSIlpo Järvinen 28694a17fc3aSIlpo Järvinen if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 28704a17fc3aSIlpo Järvinen break; 28714a17fc3aSIlpo Järvinen 2872f8071cdeSEric Dumazet if (!tcp_collapse_retrans(sk, to)) 2873f8071cdeSEric Dumazet break; 28744a17fc3aSIlpo Järvinen } 28754a17fc3aSIlpo Järvinen } 28764a17fc3aSIlpo Järvinen 28771da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 28781da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 28791da177e4SLinus Torvalds * error occurred which prevented the send. 28801da177e4SLinus Torvalds */ 288110d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 28821da177e4SLinus Torvalds { 28835d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 288410d3be56SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 28857d227cd2SSridhar Samudrala unsigned int cur_mss; 288610d3be56SEric Dumazet int diff, len, err; 28871da177e4SLinus Torvalds 288810d3be56SEric Dumazet 288910d3be56SEric Dumazet /* Inconclusive MTU probe */ 289010d3be56SEric Dumazet if (icsk->icsk_mtup.probe_size) 28915d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 28925d424d5aSJohn Heffner 28931da177e4SLinus Torvalds /* Do not sent more than we queued. 1/4 is reserved for possible 2894caa20d9aSStephen Hemminger * copying overhead: fragmentation, tunneling, mangling etc. 28951da177e4SLinus Torvalds */ 289614afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > 2897ffb4d6c8SEric Dumazet min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), 2898ffb4d6c8SEric Dumazet sk->sk_sndbuf)) 28991da177e4SLinus Torvalds return -EAGAIN; 29001da177e4SLinus Torvalds 29011f3279aeSEric Dumazet if (skb_still_in_host_queue(sk, skb)) 29021f3279aeSEric Dumazet return -EBUSY; 29031f3279aeSEric Dumazet 29041da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 29057f582b24SEric Dumazet if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { 29067f582b24SEric Dumazet WARN_ON_ONCE(1); 29077f582b24SEric Dumazet return -EINVAL; 29087f582b24SEric Dumazet } 29091da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 29101da177e4SLinus Torvalds return -ENOMEM; 29111da177e4SLinus Torvalds } 29121da177e4SLinus Torvalds 29137d227cd2SSridhar Samudrala if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 29147d227cd2SSridhar Samudrala return -EHOSTUNREACH; /* Routing failure or similar. */ 29157d227cd2SSridhar Samudrala 29160c54b85fSIlpo Järvinen cur_mss = tcp_current_mss(sk); 29177d227cd2SSridhar Samudrala 29181da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 29191da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 29201da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 29211da177e4SLinus Torvalds * our retransmit serves as a zero window probe. 29221da177e4SLinus Torvalds */ 29239d4fb27dSJoe Perches if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 29249d4fb27dSJoe Perches TCP_SKB_CB(skb)->seq != tp->snd_una) 29251da177e4SLinus Torvalds return -EAGAIN; 29261da177e4SLinus Torvalds 292710d3be56SEric Dumazet len = cur_mss * segs; 292810d3be56SEric Dumazet if (skb->len > len) { 292975c119afSEric Dumazet if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, 293075c119afSEric Dumazet cur_mss, GFP_ATOMIC)) 29311da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 293202276f3cSIlpo Järvinen } else { 2933c52e2421SEric Dumazet if (skb_unclone(skb, GFP_ATOMIC)) 2934c52e2421SEric Dumazet return -ENOMEM; 293510d3be56SEric Dumazet 293610d3be56SEric Dumazet diff = tcp_skb_pcount(skb); 293710d3be56SEric Dumazet tcp_set_skb_tso_segs(skb, cur_mss); 293810d3be56SEric Dumazet diff -= tcp_skb_pcount(skb); 293910d3be56SEric Dumazet if (diff) 294010d3be56SEric Dumazet tcp_adjust_pcount(sk, skb, diff); 294110d3be56SEric Dumazet if (skb->len < cur_mss) 294210d3be56SEric Dumazet tcp_retrans_try_collapse(sk, skb, cur_mss); 29431da177e4SLinus Torvalds } 29441da177e4SLinus Torvalds 294549213555SDaniel Borkmann /* RFC3168, section 6.1.1.1. ECN fallback */ 294649213555SDaniel Borkmann if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) 294749213555SDaniel Borkmann tcp_ecn_clear_syn(sk, skb); 294849213555SDaniel Borkmann 2949678550c6SYuchung Cheng /* Update global and local TCP statistics. */ 2950678550c6SYuchung Cheng segs = tcp_skb_pcount(skb); 2951678550c6SYuchung Cheng TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); 2952678550c6SYuchung Cheng if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2953678550c6SYuchung Cheng __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 2954678550c6SYuchung Cheng tp->total_retrans += segs; 2955fb31c9b9SWei Wang tp->bytes_retrans += skb->len; 2956678550c6SYuchung Cheng 295750bceae9SThomas Graf /* make sure skb->data is aligned on arches that require it 295850bceae9SThomas Graf * and check if ack-trimming & collapsing extended the headroom 295950bceae9SThomas Graf * beyond what csum_start can cover. 296050bceae9SThomas Graf */ 296150bceae9SThomas Graf if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 296250bceae9SThomas Graf skb_headroom(skb) >= 0xFFFF)) { 296310a81980SEric Dumazet struct sk_buff *nskb; 296410a81980SEric Dumazet 2965e2080072SEric Dumazet tcp_skb_tsorted_save(skb) { 296610a81980SEric Dumazet nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2967c84a5711SYuchung Cheng err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2968117632e6SEric Dumazet -ENOBUFS; 2969e2080072SEric Dumazet } tcp_skb_tsorted_restore(skb); 2970e2080072SEric Dumazet 29715889e2c0SYousuk Seung if (!err) { 2972a7a25630SEric Dumazet tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); 29735889e2c0SYousuk Seung tcp_rate_skb_sent(sk, skb); 29745889e2c0SYousuk Seung } 2975117632e6SEric Dumazet } else { 2976c84a5711SYuchung Cheng err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2977117632e6SEric Dumazet } 2978c84a5711SYuchung Cheng 29797f12422cSYuchung Cheng /* To avoid taking spuriously low RTT samples based on a timestamp 29807f12422cSYuchung Cheng * for a transmit that never happened, always mark EVER_RETRANS 29817f12422cSYuchung Cheng */ 29827f12422cSYuchung Cheng TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 29837f12422cSYuchung Cheng 2984a31ad29eSLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) 2985a31ad29eSLawrence Brakmo tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, 2986a31ad29eSLawrence Brakmo TCP_SKB_CB(skb)->seq, segs, err); 2987a31ad29eSLawrence Brakmo 2988fc9f3501SEric Dumazet if (likely(!err)) { 2989e086101bSCong Wang trace_tcp_retransmit_skb(sk, skb); 2990678550c6SYuchung Cheng } else if (err != -EBUSY) { 2991ec641b39SYuchung Cheng NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); 2992fc9f3501SEric Dumazet } 2993c84a5711SYuchung Cheng return err; 299493b174adSYuchung Cheng } 299593b174adSYuchung Cheng 299610d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 299793b174adSYuchung Cheng { 299893b174adSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 299910d3be56SEric Dumazet int err = __tcp_retransmit_skb(sk, skb, segs); 30001da177e4SLinus Torvalds 30011da177e4SLinus Torvalds if (err == 0) { 30021da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 30031da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 3004e87cc472SJoe Perches net_dbg_ratelimited("retrans_out leaked\n"); 30051da177e4SLinus Torvalds } 30061da177e4SLinus Torvalds #endif 30071da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 30081da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 30097ae18975SYuchung Cheng } 30101da177e4SLinus Torvalds 30117ae18975SYuchung Cheng /* Save stamp of the first (attempted) retransmit. */ 30121da177e4SLinus Torvalds if (!tp->retrans_stamp) 30137faee5c0SEric Dumazet tp->retrans_stamp = tcp_skb_timestamp(skb); 30141da177e4SLinus Torvalds 30156e08d5e3SYuchung Cheng if (tp->undo_retrans < 0) 30166e08d5e3SYuchung Cheng tp->undo_retrans = 0; 30176e08d5e3SYuchung Cheng tp->undo_retrans += tcp_skb_pcount(skb); 30181da177e4SLinus Torvalds return err; 30191da177e4SLinus Torvalds } 30201da177e4SLinus Torvalds 30211da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 30221da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 30231da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 30241da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 30251da177e4SLinus Torvalds */ 30261da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 30271da177e4SLinus Torvalds { 30286687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 3029b9f1f1ceSEric Dumazet struct sk_buff *skb, *rtx_head, *hole = NULL; 30301da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3031840a3cbeSYuchung Cheng u32 max_segs; 303261eb55f4SIlpo Järvinen int mib_idx; 30336a438bbeSStephen Hemminger 303445e77d31SIlpo Järvinen if (!tp->packets_out) 303545e77d31SIlpo Järvinen return; 303645e77d31SIlpo Järvinen 303775c119afSEric Dumazet rtx_head = tcp_rtx_queue_head(sk); 3038b9f1f1ceSEric Dumazet skb = tp->retransmit_skb_hint ?: rtx_head; 3039ed6e7268SNeal Cardwell max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); 304075c119afSEric Dumazet skb_rbtree_walk_from(skb) { 3041dca0aaf8SEric Dumazet __u8 sacked; 304210d3be56SEric Dumazet int segs; 30431da177e4SLinus Torvalds 3044218af599SEric Dumazet if (tcp_pacing_check(sk)) 3045218af599SEric Dumazet break; 3046218af599SEric Dumazet 30476a438bbeSStephen Hemminger /* we could do better than to assign each time */ 304851456b29SIan Morris if (!hole) 30496a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 30506a438bbeSStephen Hemminger 305110d3be56SEric Dumazet segs = tp->snd_cwnd - tcp_packets_in_flight(tp); 305210d3be56SEric Dumazet if (segs <= 0) 30531da177e4SLinus Torvalds return; 3054dca0aaf8SEric Dumazet sacked = TCP_SKB_CB(skb)->sacked; 3055a3d2e9f8SEric Dumazet /* In case tcp_shift_skb_data() have aggregated large skbs, 3056a3d2e9f8SEric Dumazet * we need to make sure not sending too bigs TSO packets 3057a3d2e9f8SEric Dumazet */ 3058a3d2e9f8SEric Dumazet segs = min_t(int, segs, max_segs); 30590e1c54c2SIlpo Järvinen 3060840a3cbeSYuchung Cheng if (tp->retrans_out >= tp->lost_out) { 3061006f582cSIlpo Järvinen break; 30620e1c54c2SIlpo Järvinen } else if (!(sacked & TCPCB_LOST)) { 306351456b29SIan Morris if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 30640e1c54c2SIlpo Järvinen hole = skb; 306561eb55f4SIlpo Järvinen continue; 30661da177e4SLinus Torvalds 30670e1c54c2SIlpo Järvinen } else { 30680e1c54c2SIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Loss) 30690e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFASTRETRANS; 30700e1c54c2SIlpo Järvinen else 30710e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 30720e1c54c2SIlpo Järvinen } 30730e1c54c2SIlpo Järvinen 30740e1c54c2SIlpo Järvinen if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 307561eb55f4SIlpo Järvinen continue; 307640b215e5SPavel Emelyanov 3077f9616c35SEric Dumazet if (tcp_small_queue_check(sk, skb, 1)) 3078f9616c35SEric Dumazet return; 3079f9616c35SEric Dumazet 308010d3be56SEric Dumazet if (tcp_retransmit_skb(sk, skb, segs)) 30811da177e4SLinus Torvalds return; 308224ab6becSYuchung Cheng 3083de1d6578SYuchung Cheng NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); 30841da177e4SLinus Torvalds 3085684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 3086a262f0cdSNandita Dukkipati tp->prr_out += tcp_skb_pcount(skb); 3087a262f0cdSNandita Dukkipati 308875c119afSEric Dumazet if (skb == rtx_head && 308957dde7f7SYuchung Cheng icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) 30903f80e08fSEric Dumazet tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 30913f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 30923f80e08fSEric Dumazet TCP_RTO_MAX, 30933f80e08fSEric Dumazet skb); 30941da177e4SLinus Torvalds } 30951da177e4SLinus Torvalds } 30961da177e4SLinus Torvalds 3097d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite 3098d83769a5SEric Dumazet * connection tear down and (memory) recovery. 3099845704a5SEric Dumazet * Otherwise tcp_send_fin() could be tempted to either delay FIN 3100845704a5SEric Dumazet * or even be forced to close flow without any FIN. 3101a6c5ea4cSEric Dumazet * In general, we want to allow one skb per socket to avoid hangs 3102a6c5ea4cSEric Dumazet * with edge trigger epoll() 3103d83769a5SEric Dumazet */ 3104a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size) 3105d83769a5SEric Dumazet { 3106e805605cSJohannes Weiner int amt; 3107d83769a5SEric Dumazet 3108d83769a5SEric Dumazet if (size <= sk->sk_forward_alloc) 3109d83769a5SEric Dumazet return; 3110d83769a5SEric Dumazet amt = sk_mem_pages(size); 3111d83769a5SEric Dumazet sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 3112e805605cSJohannes Weiner sk_memory_allocated_add(sk, amt); 3113e805605cSJohannes Weiner 3114baac50bbSJohannes Weiner if (mem_cgroup_sockets_enabled && sk->sk_memcg) 3115baac50bbSJohannes Weiner mem_cgroup_charge_skmem(sk->sk_memcg, amt); 3116d83769a5SEric Dumazet } 3117d83769a5SEric Dumazet 3118845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us. 3119845704a5SEric Dumazet * We should try to send a FIN packet really hard, but eventually give up. 31201da177e4SLinus Torvalds */ 31211da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 31221da177e4SLinus Torvalds { 3123845704a5SEric Dumazet struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); 31241da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31251da177e4SLinus Torvalds 3126845704a5SEric Dumazet /* Optimization, tack on the FIN if we have one skb in write queue and 3127845704a5SEric Dumazet * this skb was not yet sent, or we are under memory pressure. 3128845704a5SEric Dumazet * Note: in the latter case, FIN packet will be sent after a timeout, 3129845704a5SEric Dumazet * as TCP stack thinks it has already been transmitted. 31301da177e4SLinus Torvalds */ 313175c119afSEric Dumazet if (!tskb && tcp_under_memory_pressure(sk)) 313275c119afSEric Dumazet tskb = skb_rb_last(&sk->tcp_rtx_queue); 313375c119afSEric Dumazet 313475c119afSEric Dumazet if (tskb) { 3135845704a5SEric Dumazet TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; 3136845704a5SEric Dumazet TCP_SKB_CB(tskb)->end_seq++; 31371da177e4SLinus Torvalds tp->write_seq++; 313875c119afSEric Dumazet if (tcp_write_queue_empty(sk)) { 3139845704a5SEric Dumazet /* This means tskb was already sent. 3140845704a5SEric Dumazet * Pretend we included the FIN on previous transmit. 3141845704a5SEric Dumazet * We need to set tp->snd_nxt to the value it would have 3142845704a5SEric Dumazet * if FIN had been sent. This is because retransmit path 3143845704a5SEric Dumazet * does not change tp->snd_nxt. 3144845704a5SEric Dumazet */ 3145845704a5SEric Dumazet tp->snd_nxt++; 3146845704a5SEric Dumazet return; 3147845704a5SEric Dumazet } 31481da177e4SLinus Torvalds } else { 3149845704a5SEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); 3150d1edc085SColin Ian King if (unlikely(!skb)) 3151845704a5SEric Dumazet return; 3152d1edc085SColin Ian King 3153e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 3154d83769a5SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 3155a6c5ea4cSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 31561da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 3157e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tp->write_seq, 3158a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_FIN); 31591da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 31601da177e4SLinus Torvalds } 3161845704a5SEric Dumazet __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); 31621da177e4SLinus Torvalds } 31631da177e4SLinus Torvalds 31641da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 31651da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 31661da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 316765bb723cSGerrit Renker * by RFC 2525, section 2.17. -DaveM 31681da177e4SLinus Torvalds */ 3169dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 31701da177e4SLinus Torvalds { 31711da177e4SLinus Torvalds struct sk_buff *skb; 31721da177e4SLinus Torvalds 31737cc2b043SGao Feng TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 31747cc2b043SGao Feng 31751da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 31761da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 31771da177e4SLinus Torvalds if (!skb) { 31784e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 31791da177e4SLinus Torvalds return; 31801da177e4SLinus Torvalds } 31811da177e4SLinus Torvalds 31821da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 31831da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 3184e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 3185a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_RST); 31869a568de4SEric Dumazet tcp_mstamp_refresh(tcp_sk(sk)); 31871da177e4SLinus Torvalds /* Send it off. */ 3188dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 31894e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3190c24b14c4SSong Liu 3191c24b14c4SSong Liu /* skb of trace_tcp_send_reset() keeps the skb that caused RST, 3192c24b14c4SSong Liu * skb here is different to the troublesome skb, so use NULL 3193c24b14c4SSong Liu */ 3194c24b14c4SSong Liu trace_tcp_send_reset(sk, NULL); 31951da177e4SLinus Torvalds } 31961da177e4SLinus Torvalds 319767edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment. 319867edfef7SAndi Kleen * WARNING: This routine must only be called when we have already sent 31991da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 32001da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 32011da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 32021da177e4SLinus Torvalds */ 32031da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 32041da177e4SLinus Torvalds { 32051da177e4SLinus Torvalds struct sk_buff *skb; 32061da177e4SLinus Torvalds 320775c119afSEric Dumazet skb = tcp_rtx_queue_head(sk); 320851456b29SIan Morris if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 320975c119afSEric Dumazet pr_err("%s: wrong queue state\n", __func__); 32101da177e4SLinus Torvalds return -EFAULT; 32111da177e4SLinus Torvalds } 32124de075e0SEric Dumazet if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 32131da177e4SLinus Torvalds if (skb_cloned(skb)) { 3214e2080072SEric Dumazet struct sk_buff *nskb; 3215e2080072SEric Dumazet 3216e2080072SEric Dumazet tcp_skb_tsorted_save(skb) { 3217e2080072SEric Dumazet nskb = skb_copy(skb, GFP_ATOMIC); 3218e2080072SEric Dumazet } tcp_skb_tsorted_restore(skb); 321951456b29SIan Morris if (!nskb) 32201da177e4SLinus Torvalds return -ENOMEM; 3221e2080072SEric Dumazet INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); 322275c119afSEric Dumazet tcp_rtx_queue_unlink_and_free(skb, sk); 3223f4a775d1SEric Dumazet __skb_header_release(nskb); 322475c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); 32253ab224beSHideo Aoki sk->sk_wmem_queued += nskb->truesize; 32263ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 32271da177e4SLinus Torvalds skb = nskb; 32281da177e4SLinus Torvalds } 32291da177e4SLinus Torvalds 32304de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 3231735d3831SFlorian Westphal tcp_ecn_send_synack(sk, skb); 32321da177e4SLinus Torvalds } 3233dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 32341da177e4SLinus Torvalds } 32351da177e4SLinus Torvalds 32364aea39c1SEric Dumazet /** 32374aea39c1SEric Dumazet * tcp_make_synack - Prepare a SYN-ACK. 32384aea39c1SEric Dumazet * sk: listener socket 32394aea39c1SEric Dumazet * dst: dst entry attached to the SYNACK 32404aea39c1SEric Dumazet * req: request_sock pointer 32414aea39c1SEric Dumazet * 32424aea39c1SEric Dumazet * Allocate one skb and build a SYNACK packet. 32434aea39c1SEric Dumazet * @dst is consumed : Caller should not use it again. 32444aea39c1SEric Dumazet */ 32455d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 3246e6b4d113SWilliam Allen Simpson struct request_sock *req, 3247ca6fb065SEric Dumazet struct tcp_fastopen_cookie *foc, 3248b3d05147SEric Dumazet enum tcp_synack_type synack_type) 32491da177e4SLinus Torvalds { 32502e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 32515d062de7SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 325280f03e27SEric Dumazet struct tcp_md5sig_key *md5 = NULL; 32535d062de7SEric Dumazet struct tcp_out_options opts; 32545d062de7SEric Dumazet struct sk_buff *skb; 3255bd0388aeSWilliam Allen Simpson int tcp_header_size; 32565d062de7SEric Dumazet struct tcphdr *th; 3257f5fff5dcSTom Quetchenbach int mss; 3258a842fe14SEric Dumazet u64 now; 32591da177e4SLinus Torvalds 3260ca6fb065SEric Dumazet skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 32614aea39c1SEric Dumazet if (unlikely(!skb)) { 32624aea39c1SEric Dumazet dst_release(dst); 32631da177e4SLinus Torvalds return NULL; 32644aea39c1SEric Dumazet } 32651da177e4SLinus Torvalds /* Reserve space for headers. */ 32661da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 32671da177e4SLinus Torvalds 3268b3d05147SEric Dumazet switch (synack_type) { 3269b3d05147SEric Dumazet case TCP_SYNACK_NORMAL: 32709e17f8a4SEric Dumazet skb_set_owner_w(skb, req_to_sk(req)); 3271b3d05147SEric Dumazet break; 3272b3d05147SEric Dumazet case TCP_SYNACK_COOKIE: 3273b3d05147SEric Dumazet /* Under synflood, we do not attach skb to a socket, 3274b3d05147SEric Dumazet * to avoid false sharing. 3275b3d05147SEric Dumazet */ 3276b3d05147SEric Dumazet break; 3277b3d05147SEric Dumazet case TCP_SYNACK_FASTOPEN: 3278ca6fb065SEric Dumazet /* sk is a const pointer, because we want to express multiple 3279ca6fb065SEric Dumazet * cpu might call us concurrently. 3280ca6fb065SEric Dumazet * sk->sk_wmem_alloc in an atomic, we can promote to rw. 3281ca6fb065SEric Dumazet */ 3282ca6fb065SEric Dumazet skb_set_owner_w(skb, (struct sock *)sk); 3283b3d05147SEric Dumazet break; 3284ca6fb065SEric Dumazet } 32854aea39c1SEric Dumazet skb_dst_set(skb, dst); 32861da177e4SLinus Torvalds 32873541f9e8SEric Dumazet mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3288f5fff5dcSTom Quetchenbach 328933ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 3290a842fe14SEric Dumazet now = tcp_clock_ns(); 32918b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES 32928b5f12d0SFlorian Westphal if (unlikely(req->cookie_ts)) 3293d3edd06eSEric Dumazet skb->skb_mstamp_ns = cookie_init_timestamp(req); 32948b5f12d0SFlorian Westphal else 32958b5f12d0SFlorian Westphal #endif 32969e450c1eSYuchung Cheng { 3297a842fe14SEric Dumazet skb->skb_mstamp_ns = now; 32989e450c1eSYuchung Cheng if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ 32999e450c1eSYuchung Cheng tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); 33009e450c1eSYuchung Cheng } 330180f03e27SEric Dumazet 330280f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG 330380f03e27SEric Dumazet rcu_read_lock(); 3304fd3a154aSEric Dumazet md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 330580f03e27SEric Dumazet #endif 330658d607d3SEric Dumazet skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); 330760e2a778SUrsula Braun tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, 330860e2a778SUrsula Braun foc) + sizeof(*th); 330933ad798cSAdam Langley 3310aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 3311aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 33121da177e4SLinus Torvalds 3313ea1627c2SEric Dumazet th = (struct tcphdr *)skb->data; 33141da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 33151da177e4SLinus Torvalds th->syn = 1; 33161da177e4SLinus Torvalds th->ack = 1; 33176ac705b1SEric Dumazet tcp_ecn_make_synack(req, th); 3318b44084c2SEric Dumazet th->source = htons(ireq->ir_num); 3319634fb979SEric Dumazet th->dest = ireq->ir_rmt_port; 3320e05a90ecSJamal Hadi Salim skb->mark = ireq->ir_mark; 33213b117750SEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 33223b117750SEric Dumazet th->seq = htonl(tcp_rsk(req)->snt_isn); 33238336886fSJerry Chu /* XXX data is queued and acked as is. No buffer/window check */ 33248336886fSJerry Chu th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 33251da177e4SLinus Torvalds 33261da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 3327ed53d0abSEric Dumazet th->window = htons(min(req->rsk_rcv_wnd, 65535U)); 33285d062de7SEric Dumazet tcp_options_write((__be32 *)(th + 1), NULL, &opts); 33291da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 333090bbcc60SEric Dumazet __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 3331cfb6eeb4SYOSHIFUJI Hideaki 3332cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3333cfb6eeb4SYOSHIFUJI Hideaki /* Okay, we have all we need - do the md5 hash if needed */ 333480f03e27SEric Dumazet if (md5) 3335bd0388aeSWilliam Allen Simpson tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 333639f8e58eSEric Dumazet md5, req_to_sk(req), skb); 333780f03e27SEric Dumazet rcu_read_unlock(); 3338cfb6eeb4SYOSHIFUJI Hideaki #endif 3339cfb6eeb4SYOSHIFUJI Hideaki 3340a842fe14SEric Dumazet skb->skb_mstamp_ns = now; 3341a842fe14SEric Dumazet tcp_add_tx_delay(skb, tp); 3342a842fe14SEric Dumazet 33431da177e4SLinus Torvalds return skb; 33441da177e4SLinus Torvalds } 33454bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack); 33461da177e4SLinus Torvalds 334781164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 334881164413SDaniel Borkmann { 334981164413SDaniel Borkmann struct inet_connection_sock *icsk = inet_csk(sk); 335081164413SDaniel Borkmann const struct tcp_congestion_ops *ca; 335181164413SDaniel Borkmann u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 335281164413SDaniel Borkmann 335381164413SDaniel Borkmann if (ca_key == TCP_CA_UNSPEC) 335481164413SDaniel Borkmann return; 335581164413SDaniel Borkmann 335681164413SDaniel Borkmann rcu_read_lock(); 335781164413SDaniel Borkmann ca = tcp_ca_find_key(ca_key); 335881164413SDaniel Borkmann if (likely(ca && try_module_get(ca->owner))) { 335981164413SDaniel Borkmann module_put(icsk->icsk_ca_ops->owner); 336081164413SDaniel Borkmann icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 336181164413SDaniel Borkmann icsk->icsk_ca_ops = ca; 336281164413SDaniel Borkmann } 336381164413SDaniel Borkmann rcu_read_unlock(); 336481164413SDaniel Borkmann } 336581164413SDaniel Borkmann 336667edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */ 3367f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk) 33681da177e4SLinus Torvalds { 3369cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 33701da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 33711da177e4SLinus Torvalds __u8 rcv_wscale; 337213d3b1ebSLawrence Brakmo u32 rcv_wnd; 33731da177e4SLinus Torvalds 33741da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 33751da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 33761da177e4SLinus Torvalds */ 33775d2ed052SEric Dumazet tp->tcp_header_len = sizeof(struct tcphdr); 33785d2ed052SEric Dumazet if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) 33795d2ed052SEric Dumazet tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; 33801da177e4SLinus Torvalds 3381cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 338200db4124SIan Morris if (tp->af_specific->md5_lookup(sk, sk)) 3383cfb6eeb4SYOSHIFUJI Hideaki tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 3384cfb6eeb4SYOSHIFUJI Hideaki #endif 3385cfb6eeb4SYOSHIFUJI Hideaki 33861da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 33871da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 33881da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 33891da177e4SLinus Torvalds tp->max_window = 0; 33905d424d5aSJohn Heffner tcp_mtup_init(sk); 33911da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 33921da177e4SLinus Torvalds 339381164413SDaniel Borkmann tcp_ca_dst_init(sk, dst); 339481164413SDaniel Borkmann 33951da177e4SLinus Torvalds if (!tp->window_clamp) 33961da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 33973541f9e8SEric Dumazet tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3398f5fff5dcSTom Quetchenbach 33991da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 34001da177e4SLinus Torvalds 3401e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 3402e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 3403e88c64f0SHagen Paul Pfeifer (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 3404e88c64f0SHagen Paul Pfeifer tp->window_clamp = tcp_full_space(sk); 3405e88c64f0SHagen Paul Pfeifer 340613d3b1ebSLawrence Brakmo rcv_wnd = tcp_rwnd_init_bpf(sk); 340713d3b1ebSLawrence Brakmo if (rcv_wnd == 0) 340813d3b1ebSLawrence Brakmo rcv_wnd = dst_metric(dst, RTAX_INITRWND); 340913d3b1ebSLawrence Brakmo 3410ceef9ab6SEric Dumazet tcp_select_initial_window(sk, tcp_full_space(sk), 34111da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 34121da177e4SLinus Torvalds &tp->rcv_wnd, 34131da177e4SLinus Torvalds &tp->window_clamp, 34149bb37ef0SEric Dumazet sock_net(sk)->ipv4.sysctl_tcp_window_scaling, 341531d12926Slaurent chavey &rcv_wscale, 341613d3b1ebSLawrence Brakmo rcv_wnd); 34171da177e4SLinus Torvalds 34181da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 34191da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 34201da177e4SLinus Torvalds 34211da177e4SLinus Torvalds sk->sk_err = 0; 34221da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 34231da177e4SLinus Torvalds tp->snd_wnd = 0; 3424ee7537b6SHantzis Fotis tcp_init_wl(tp, 0); 34257f582b24SEric Dumazet tcp_write_queue_purge(sk); 34261da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 34271da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 342833f5f57eSIlpo Järvinen tp->snd_up = tp->write_seq; 3429370816aeSPavel Emelyanov tp->snd_nxt = tp->write_seq; 3430ee995283SPavel Emelyanov 3431ee995283SPavel Emelyanov if (likely(!tp->repair)) 34321da177e4SLinus Torvalds tp->rcv_nxt = 0; 3433c7781a6eSAndrew Vagin else 343470eabf0eSEric Dumazet tp->rcv_tstamp = tcp_jiffies32; 3435ee995283SPavel Emelyanov tp->rcv_wup = tp->rcv_nxt; 34367db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 34371da177e4SLinus Torvalds 34388550f328SLawrence Brakmo inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); 3439463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 34401da177e4SLinus Torvalds tcp_clear_retrans(tp); 34411da177e4SLinus Torvalds } 34421da177e4SLinus Torvalds 3443783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 3444783237e8SYuchung Cheng { 3445783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 3446783237e8SYuchung Cheng struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 3447783237e8SYuchung Cheng 3448783237e8SYuchung Cheng tcb->end_seq += skb->len; 3449f4a775d1SEric Dumazet __skb_header_release(skb); 3450783237e8SYuchung Cheng sk->sk_wmem_queued += skb->truesize; 3451783237e8SYuchung Cheng sk_mem_charge(sk, skb->truesize); 3452*0f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tcb->end_seq); 3453783237e8SYuchung Cheng tp->packets_out += tcp_skb_pcount(skb); 3454783237e8SYuchung Cheng } 3455783237e8SYuchung Cheng 3456783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However, 3457783237e8SYuchung Cheng * queue a data-only packet after the regular SYN, such that regular SYNs 3458783237e8SYuchung Cheng * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 3459783237e8SYuchung Cheng * only the SYN sequence, the data are retransmitted in the first ACK. 3460783237e8SYuchung Cheng * If cookie is not cached or other error occurs, falls back to send a 3461783237e8SYuchung Cheng * regular SYN with Fast Open cookie request option. 3462783237e8SYuchung Cheng */ 3463783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 3464783237e8SYuchung Cheng { 3465783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 3466783237e8SYuchung Cheng struct tcp_fastopen_request *fo = tp->fastopen_req; 3467065263f4SWei Wang int space, err = 0; 3468355a901eSEric Dumazet struct sk_buff *syn_data; 3469783237e8SYuchung Cheng 347067da22d2SYuchung Cheng tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 3471065263f4SWei Wang if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) 3472783237e8SYuchung Cheng goto fallback; 3473783237e8SYuchung Cheng 3474783237e8SYuchung Cheng /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 3475783237e8SYuchung Cheng * user-MSS. Reserve maximum option space for middleboxes that add 3476783237e8SYuchung Cheng * private TCP options. The cost is reduced data space in SYN :( 3477783237e8SYuchung Cheng */ 34783541f9e8SEric Dumazet tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); 34793541f9e8SEric Dumazet 34801b63edd6SYuchung Cheng space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 3481783237e8SYuchung Cheng MAX_TCP_OPTION_SPACE; 3482783237e8SYuchung Cheng 3483f5ddcbbbSEric Dumazet space = min_t(size_t, space, fo->size); 3484f5ddcbbbSEric Dumazet 3485f5ddcbbbSEric Dumazet /* limit to order-0 allocations */ 3486f5ddcbbbSEric Dumazet space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); 3487f5ddcbbbSEric Dumazet 3488eb934478SEric Dumazet syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); 3489355a901eSEric Dumazet if (!syn_data) 3490783237e8SYuchung Cheng goto fallback; 3491355a901eSEric Dumazet syn_data->ip_summed = CHECKSUM_PARTIAL; 3492355a901eSEric Dumazet memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 349307e100f9SEric Dumazet if (space) { 349407e100f9SEric Dumazet int copied = copy_from_iter(skb_put(syn_data, space), space, 349557be5bdaSAl Viro &fo->data->msg_iter); 349657be5bdaSAl Viro if (unlikely(!copied)) { 3497ba233b34SEric Dumazet tcp_skb_tsorted_anchor_cleanup(syn_data); 3498355a901eSEric Dumazet kfree_skb(syn_data); 3499783237e8SYuchung Cheng goto fallback; 3500783237e8SYuchung Cheng } 350157be5bdaSAl Viro if (copied != space) { 350257be5bdaSAl Viro skb_trim(syn_data, copied); 350357be5bdaSAl Viro space = copied; 350457be5bdaSAl Viro } 3505f859a448SWillem de Bruijn skb_zcopy_set(syn_data, fo->uarg, NULL); 350607e100f9SEric Dumazet } 3507355a901eSEric Dumazet /* No more data pending in inet_wait_for_connect() */ 3508355a901eSEric Dumazet if (space == fo->size) 3509355a901eSEric Dumazet fo->data = NULL; 3510355a901eSEric Dumazet fo->copied = space; 3511783237e8SYuchung Cheng 3512355a901eSEric Dumazet tcp_connect_queue_skb(sk, syn_data); 35130f87230dSFrancis Yan if (syn_data->len) 35140f87230dSFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_BUSY); 3515355a901eSEric Dumazet 3516355a901eSEric Dumazet err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); 3517355a901eSEric Dumazet 3518d3edd06eSEric Dumazet syn->skb_mstamp_ns = syn_data->skb_mstamp_ns; 3519355a901eSEric Dumazet 3520355a901eSEric Dumazet /* Now full SYN+DATA was cloned and sent (or not), 3521355a901eSEric Dumazet * remove the SYN from the original skb (syn_data) 3522355a901eSEric Dumazet * we keep in write queue in case of a retransmit, as we 3523355a901eSEric Dumazet * also have the SYN packet (with no data) in the same queue. 3524431a9124SEric Dumazet */ 3525355a901eSEric Dumazet TCP_SKB_CB(syn_data)->seq++; 3526355a901eSEric Dumazet TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; 3527355a901eSEric Dumazet if (!err) { 352867da22d2SYuchung Cheng tp->syn_data = (fo->copied > 0); 352975c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); 3530f19c29e3SYuchung Cheng NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); 3531783237e8SYuchung Cheng goto done; 3532783237e8SYuchung Cheng } 3533783237e8SYuchung Cheng 353475c119afSEric Dumazet /* data was not sent, put it in write_queue */ 353575c119afSEric Dumazet __skb_queue_tail(&sk->sk_write_queue, syn_data); 3536b5b7db8dSEric Dumazet tp->packets_out -= tcp_skb_pcount(syn_data); 3537b5b7db8dSEric Dumazet 3538783237e8SYuchung Cheng fallback: 3539783237e8SYuchung Cheng /* Send a regular SYN with Fast Open cookie request option */ 3540783237e8SYuchung Cheng if (fo->cookie.len > 0) 3541783237e8SYuchung Cheng fo->cookie.len = 0; 3542783237e8SYuchung Cheng err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 3543783237e8SYuchung Cheng if (err) 3544783237e8SYuchung Cheng tp->syn_fastopen = 0; 3545783237e8SYuchung Cheng done: 3546783237e8SYuchung Cheng fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 3547783237e8SYuchung Cheng return err; 3548783237e8SYuchung Cheng } 3549783237e8SYuchung Cheng 355067edfef7SAndi Kleen /* Build a SYN and send it off. */ 35511da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 35521da177e4SLinus Torvalds { 35531da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 35541da177e4SLinus Torvalds struct sk_buff *buff; 3555ee586811SEric Paris int err; 35561da177e4SLinus Torvalds 3557de525be2SLawrence Brakmo tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL); 35588ba60924SEric Dumazet 35598ba60924SEric Dumazet if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 35608ba60924SEric Dumazet return -EHOSTUNREACH; /* Routing failure or similar. */ 35618ba60924SEric Dumazet 35621da177e4SLinus Torvalds tcp_connect_init(sk); 35631da177e4SLinus Torvalds 35642b916477SAndrey Vagin if (unlikely(tp->repair)) { 35652b916477SAndrey Vagin tcp_finish_connect(sk, NULL); 35662b916477SAndrey Vagin return 0; 35672b916477SAndrey Vagin } 35682b916477SAndrey Vagin 3569eb934478SEric Dumazet buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); 3570355a901eSEric Dumazet if (unlikely(!buff)) 35711da177e4SLinus Torvalds return -ENOBUFS; 35721da177e4SLinus Torvalds 3573a3433f35SChangli Gao tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 35749a568de4SEric Dumazet tcp_mstamp_refresh(tp); 35759a568de4SEric Dumazet tp->retrans_stamp = tcp_time_stamp(tp); 3576783237e8SYuchung Cheng tcp_connect_queue_skb(sk, buff); 3577735d3831SFlorian Westphal tcp_ecn_send_syn(sk, buff); 357875c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); 35791da177e4SLinus Torvalds 3580783237e8SYuchung Cheng /* Send off SYN; include data in Fast Open. */ 3581783237e8SYuchung Cheng err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3582783237e8SYuchung Cheng tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 3583ee586811SEric Paris if (err == -ECONNREFUSED) 3584ee586811SEric Paris return err; 3585bd37a088SWei Yongjun 3586bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 3587bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 3588bd37a088SWei Yongjun */ 3589bd37a088SWei Yongjun tp->snd_nxt = tp->write_seq; 3590bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 3591b5b7db8dSEric Dumazet buff = tcp_send_head(sk); 3592b5b7db8dSEric Dumazet if (unlikely(buff)) { 3593b5b7db8dSEric Dumazet tp->snd_nxt = TCP_SKB_CB(buff)->seq; 3594b5b7db8dSEric Dumazet tp->pushed_seq = TCP_SKB_CB(buff)->seq; 3595b5b7db8dSEric Dumazet } 359681cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 35971da177e4SLinus Torvalds 35981da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 35993f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 36003f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 36011da177e4SLinus Torvalds return 0; 36021da177e4SLinus Torvalds } 36034bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect); 36041da177e4SLinus Torvalds 36051da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 36061da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 36071da177e4SLinus Torvalds * for details. 36081da177e4SLinus Torvalds */ 36091da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 36101da177e4SLinus Torvalds { 3611463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 3612463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 36131da177e4SLinus Torvalds unsigned long timeout; 36141da177e4SLinus Torvalds 36151da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 3616463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 36171da177e4SLinus Torvalds int max_ato = HZ / 2; 36181da177e4SLinus Torvalds 361931954cd8SWei Wang if (inet_csk_in_pingpong_mode(sk) || 3620056834d9SIlpo Järvinen (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 36211da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 36221da177e4SLinus Torvalds 36231da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 36241da177e4SLinus Torvalds 36251da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 3626463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 36271da177e4SLinus Torvalds * directly. 36281da177e4SLinus Torvalds */ 3629740b0f18SEric Dumazet if (tp->srtt_us) { 3630740b0f18SEric Dumazet int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), 3631740b0f18SEric Dumazet TCP_DELACK_MIN); 36321da177e4SLinus Torvalds 36331da177e4SLinus Torvalds if (rtt < max_ato) 36341da177e4SLinus Torvalds max_ato = rtt; 36351da177e4SLinus Torvalds } 36361da177e4SLinus Torvalds 36371da177e4SLinus Torvalds ato = min(ato, max_ato); 36381da177e4SLinus Torvalds } 36391da177e4SLinus Torvalds 36401da177e4SLinus Torvalds /* Stay within the limit we were given */ 36411da177e4SLinus Torvalds timeout = jiffies + ato; 36421da177e4SLinus Torvalds 36431da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 3644463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 36451da177e4SLinus Torvalds /* If delack timer was blocked or is about to expire, 36461da177e4SLinus Torvalds * send ACK now. 36471da177e4SLinus Torvalds */ 3648463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.blocked || 3649463c84b9SArnaldo Carvalho de Melo time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 36501da177e4SLinus Torvalds tcp_send_ack(sk); 36511da177e4SLinus Torvalds return; 36521da177e4SLinus Torvalds } 36531da177e4SLinus Torvalds 3654463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 3655463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 36561da177e4SLinus Torvalds } 3657463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3658463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 3659463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 36601da177e4SLinus Torvalds } 36611da177e4SLinus Torvalds 36621da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 36632987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) 36641da177e4SLinus Torvalds { 36651da177e4SLinus Torvalds struct sk_buff *buff; 36661da177e4SLinus Torvalds 3667058dc334SIlpo Järvinen /* If we have been reset, we may not send again. */ 3668058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3669058dc334SIlpo Järvinen return; 3670058dc334SIlpo Järvinen 36711da177e4SLinus Torvalds /* We are not putting this on the write queue, so 36721da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 36731da177e4SLinus Torvalds * sock. 36741da177e4SLinus Torvalds */ 36757450aaf6SEric Dumazet buff = alloc_skb(MAX_TCP_HEADER, 36767450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 36777450aaf6SEric Dumazet if (unlikely(!buff)) { 3678463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 3679463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 36803f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 36813f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 36821da177e4SLinus Torvalds return; 36831da177e4SLinus Torvalds } 36841da177e4SLinus Torvalds 36851da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 36861da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 3687a3433f35SChangli Gao tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 36881da177e4SLinus Torvalds 368998781965SEric Dumazet /* We do not want pure acks influencing TCP Small Queues or fq/pacing 369098781965SEric Dumazet * too much. 369198781965SEric Dumazet * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 369298781965SEric Dumazet */ 369398781965SEric Dumazet skb_set_tcp_pure_ack(buff); 369498781965SEric Dumazet 36951da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 36962987babbSYuchung Cheng __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); 36971da177e4SLinus Torvalds } 369827cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack); 36992987babbSYuchung Cheng 37002987babbSYuchung Cheng void tcp_send_ack(struct sock *sk) 37012987babbSYuchung Cheng { 37022987babbSYuchung Cheng __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); 37031da177e4SLinus Torvalds } 37041da177e4SLinus Torvalds 37051da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 37061da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 37071da177e4SLinus Torvalds * 37081da177e4SLinus Torvalds * Question: what should we make while urgent mode? 37091da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 37101da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 37111da177e4SLinus Torvalds * 37121da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 37131da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 37141da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 37151da177e4SLinus Torvalds */ 3716e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) 37171da177e4SLinus Torvalds { 37181da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 37191da177e4SLinus Torvalds struct sk_buff *skb; 37201da177e4SLinus Torvalds 37211da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 37227450aaf6SEric Dumazet skb = alloc_skb(MAX_TCP_HEADER, 37237450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 372451456b29SIan Morris if (!skb) 37251da177e4SLinus Torvalds return -1; 37261da177e4SLinus Torvalds 37271da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 37281da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 37291da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 37301da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 37311da177e4SLinus Torvalds * send it. 37321da177e4SLinus Torvalds */ 3733a3433f35SChangli Gao tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3734e2e8009fSRenato Westphal NET_INC_STATS(sock_net(sk), mib); 37357450aaf6SEric Dumazet return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); 37361da177e4SLinus Torvalds } 37371da177e4SLinus Torvalds 3738385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */ 3739ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk) 3740ee995283SPavel Emelyanov { 3741ee995283SPavel Emelyanov if (sk->sk_state == TCP_ESTABLISHED) { 3742ee995283SPavel Emelyanov tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 37439a568de4SEric Dumazet tcp_mstamp_refresh(tcp_sk(sk)); 3744e520af48SEric Dumazet tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); 3745ee995283SPavel Emelyanov } 3746ee995283SPavel Emelyanov } 3747ee995283SPavel Emelyanov 374867edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */ 3749e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib) 37501da177e4SLinus Torvalds { 37511da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 37521da177e4SLinus Torvalds struct sk_buff *skb; 37531da177e4SLinus Torvalds 3754058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 3755058dc334SIlpo Järvinen return -1; 3756058dc334SIlpo Järvinen 375700db4124SIan Morris skb = tcp_send_head(sk); 375800db4124SIan Morris if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 37591da177e4SLinus Torvalds int err; 37600c54b85fSIlpo Järvinen unsigned int mss = tcp_current_mss(sk); 376190840defSIlpo Järvinen unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 37621da177e4SLinus Torvalds 37631da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 37641da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 37651da177e4SLinus Torvalds 37661da177e4SLinus Torvalds /* We are probing the opening of a window 37671da177e4SLinus Torvalds * but the window size is != 0 37681da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 37691da177e4SLinus Torvalds */ 37701da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 37711da177e4SLinus Torvalds skb->len > mss) { 37721da177e4SLinus Torvalds seg_size = min(seg_size, mss); 37734de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 377475c119afSEric Dumazet if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, 377575c119afSEric Dumazet skb, seg_size, mss, GFP_ATOMIC)) 37761da177e4SLinus Torvalds return -1; 37771da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 37785bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss); 37791da177e4SLinus Torvalds 37804de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3781dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 378266f5fe62SIlpo Järvinen if (!err) 378366f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 37841da177e4SLinus Torvalds return err; 37851da177e4SLinus Torvalds } else { 378633f5f57eSIlpo Järvinen if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3787e520af48SEric Dumazet tcp_xmit_probe_skb(sk, 1, mib); 3788e520af48SEric Dumazet return tcp_xmit_probe_skb(sk, 0, mib); 37891da177e4SLinus Torvalds } 37901da177e4SLinus Torvalds } 37911da177e4SLinus Torvalds 37921da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 37931da177e4SLinus Torvalds * a partial packet else a zero probe. 37941da177e4SLinus Torvalds */ 37951da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 37961da177e4SLinus Torvalds { 3797463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 37981da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3799c6214a97SNikolay Borisov struct net *net = sock_net(sk); 3800c1d5674fSYuchung Cheng unsigned long timeout; 38011da177e4SLinus Torvalds int err; 38021da177e4SLinus Torvalds 3803e520af48SEric Dumazet err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); 38041da177e4SLinus Torvalds 380575c119afSEric Dumazet if (tp->packets_out || tcp_write_queue_empty(sk)) { 38061da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 38076687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 3808463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 38091da177e4SLinus Torvalds return; 38101da177e4SLinus Torvalds } 38111da177e4SLinus Torvalds 3812c1d5674fSYuchung Cheng icsk->icsk_probes_out++; 38131da177e4SLinus Torvalds if (err <= 0) { 3814c6214a97SNikolay Borisov if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) 3815463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 3816c1d5674fSYuchung Cheng timeout = tcp_probe0_when(sk, TCP_RTO_MAX); 38171da177e4SLinus Torvalds } else { 38181da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 3819c1d5674fSYuchung Cheng * Let senders fight for local resources conservatively. 38201da177e4SLinus Torvalds */ 3821c1d5674fSYuchung Cheng timeout = TCP_RESOURCE_PROBE_INTERVAL; 38221da177e4SLinus Torvalds } 3823c1d5674fSYuchung Cheng tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL); 38241da177e4SLinus Torvalds } 38255db92c99SOctavian Purdila 3826ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) 38275db92c99SOctavian Purdila { 38285db92c99SOctavian Purdila const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; 38295db92c99SOctavian Purdila struct flowi fl; 38305db92c99SOctavian Purdila int res; 38315db92c99SOctavian Purdila 383258d607d3SEric Dumazet tcp_rsk(req)->txhash = net_tx_rndhash(); 3833b3d05147SEric Dumazet res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); 38345db92c99SOctavian Purdila if (!res) { 383590bbcc60SEric Dumazet __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 383602a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 38377e32b443SYuchung Cheng if (unlikely(tcp_passive_fastopen(sk))) 38387e32b443SYuchung Cheng tcp_sk(sk)->total_retrans++; 3839cf34ce3dSSong Liu trace_tcp_retransmit_synack(sk, req); 38405db92c99SOctavian Purdila } 38415db92c99SOctavian Purdila return res; 38425db92c99SOctavian Purdila } 38435db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack); 3844