1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds /* 231da177e4SLinus Torvalds * Changes: Pedro Roque : Retransmit queue handled by TCP. 241da177e4SLinus Torvalds * : Fragmentation on mtu decrease 251da177e4SLinus Torvalds * : Segment collapse on retransmit 261da177e4SLinus Torvalds * : AF independence 271da177e4SLinus Torvalds * 281da177e4SLinus Torvalds * Linus Torvalds : send_delayed_ack 291da177e4SLinus Torvalds * David S. Miller : Charge memory using the right skb 301da177e4SLinus Torvalds * during syn/ack processing. 311da177e4SLinus Torvalds * David S. Miller : Output engine completely rewritten. 321da177e4SLinus Torvalds * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 331da177e4SLinus Torvalds * Cacophonix Gaul : draft-minshall-nagle-01 341da177e4SLinus Torvalds * J Hadi Salim : ECN support 351da177e4SLinus Torvalds * 361da177e4SLinus Torvalds */ 371da177e4SLinus Torvalds 3891df42beSJoe Perches #define pr_fmt(fmt) "TCP: " fmt 3991df42beSJoe Perches 401da177e4SLinus Torvalds #include <net/tcp.h> 41eda7acddSPeter Krystad #include <net/mptcp.h> 421da177e4SLinus Torvalds 431da177e4SLinus Torvalds #include <linux/compiler.h> 445a0e3ad6STejun Heo #include <linux/gfp.h> 451da177e4SLinus Torvalds #include <linux/module.h> 4660e2a778SUrsula Braun #include <linux/static_key.h> 471da177e4SLinus Torvalds 48e086101bSCong Wang #include <trace/events/tcp.h> 4935089bb2SDavid S. Miller 509799ccb0SEric Dumazet /* Refresh clocks of a TCP socket, 519799ccb0SEric Dumazet * ensuring monotically increasing values. 529799ccb0SEric Dumazet */ 539799ccb0SEric Dumazet void tcp_mstamp_refresh(struct tcp_sock *tp) 549799ccb0SEric Dumazet { 559799ccb0SEric Dumazet u64 val = tcp_clock_ns(); 569799ccb0SEric Dumazet 575f6188a8SEric Dumazet tp->tcp_clock_cache = val; 58e6d14070SEric Dumazet tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); 599799ccb0SEric Dumazet } 609799ccb0SEric Dumazet 6146d3ceabSEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 6246d3ceabSEric Dumazet int push_one, gfp_t gfp); 63519855c5SWilliam Allen Simpson 6467edfef7SAndi Kleen /* Account for new data that has been sent to the network. */ 6575c119afSEric Dumazet static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 666ff03ac3SIlpo Järvinen { 676ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 686ff03ac3SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 6966f5fe62SIlpo Järvinen unsigned int prior_packets = tp->packets_out; 709e412ba7SIlpo Järvinen 71e0d694d6SEric Dumazet WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); 728512430eSIlpo Järvinen 7375c119afSEric Dumazet __skb_unlink(skb, &sk->sk_write_queue); 7475c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); 7575c119afSEric Dumazet 7685369750SCambda Zhu if (tp->highest_sack == NULL) 7785369750SCambda Zhu tp->highest_sack = skb; 7885369750SCambda Zhu 7966f5fe62SIlpo Järvinen tp->packets_out += tcp_skb_pcount(skb); 80bec41a11SYuchung Cheng if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 81750ea2baSYuchung Cheng tcp_rearm_rto(sk); 82f19c29e3SYuchung Cheng 83f7324acdSDavid S. Miller NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, 84f19c29e3SYuchung Cheng tcp_skb_pcount(skb)); 854bfe744fSEric Dumazet tcp_check_space(sk); 866a5dc9e5SEric Dumazet } 871da177e4SLinus Torvalds 88a4ecb15aSCui, Cheng /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one 89a4ecb15aSCui, Cheng * window scaling factor due to loss of precision. 901da177e4SLinus Torvalds * If window has been shrunk, what should we make? It is not clear at all. 911da177e4SLinus Torvalds * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 921da177e4SLinus Torvalds * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 931da177e4SLinus Torvalds * invalid. OK, let's make this for now: 941da177e4SLinus Torvalds */ 95cf533ea5SEric Dumazet static inline __u32 tcp_acceptable_seq(const struct sock *sk) 961da177e4SLinus Torvalds { 97cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 989e412ba7SIlpo Järvinen 99a4ecb15aSCui, Cheng if (!before(tcp_wnd_end(tp), tp->snd_nxt) || 100a4ecb15aSCui, Cheng (tp->rx_opt.wscale_ok && 101a4ecb15aSCui, Cheng ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) 1021da177e4SLinus Torvalds return tp->snd_nxt; 1031da177e4SLinus Torvalds else 10490840defSIlpo Järvinen return tcp_wnd_end(tp); 1051da177e4SLinus Torvalds } 1061da177e4SLinus Torvalds 1071da177e4SLinus Torvalds /* Calculate mss to advertise in SYN segment. 1081da177e4SLinus Torvalds * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 1091da177e4SLinus Torvalds * 1101da177e4SLinus Torvalds * 1. It is independent of path mtu. 1111da177e4SLinus Torvalds * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 1121da177e4SLinus Torvalds * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 1131da177e4SLinus Torvalds * attached devices, because some buggy hosts are confused by 1141da177e4SLinus Torvalds * large MSS. 1151da177e4SLinus Torvalds * 4. We do not make 3, we advertise MSS, calculated from first 1161da177e4SLinus Torvalds * hop device mtu, but allow to raise it to ip_rt_min_advmss. 1171da177e4SLinus Torvalds * This may be overridden via information stored in routing table. 1181da177e4SLinus Torvalds * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 1191da177e4SLinus Torvalds * probably even Jumbo". 1201da177e4SLinus Torvalds */ 1211da177e4SLinus Torvalds static __u16 tcp_advertise_mss(struct sock *sk) 1221da177e4SLinus Torvalds { 1231da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 124cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1251da177e4SLinus Torvalds int mss = tp->advmss; 1261da177e4SLinus Torvalds 1270dbaee3bSDavid S. Miller if (dst) { 1280dbaee3bSDavid S. Miller unsigned int metric = dst_metric_advmss(dst); 1290dbaee3bSDavid S. Miller 1300dbaee3bSDavid S. Miller if (metric < mss) { 1310dbaee3bSDavid S. Miller mss = metric; 1321da177e4SLinus Torvalds tp->advmss = mss; 1331da177e4SLinus Torvalds } 1340dbaee3bSDavid S. Miller } 1351da177e4SLinus Torvalds 1361da177e4SLinus Torvalds return (__u16)mss; 1371da177e4SLinus Torvalds } 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 1406f021c62SEric Dumazet * This is the first part of cwnd validation mechanism. 1416f021c62SEric Dumazet */ 1426f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta) 1431da177e4SLinus Torvalds { 144463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 1456f021c62SEric Dumazet u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 14640570375SEric Dumazet u32 cwnd = tcp_snd_cwnd(tp); 1471da177e4SLinus Torvalds 1486687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 1491da177e4SLinus Torvalds 1506687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 1511da177e4SLinus Torvalds restart_cwnd = min(restart_cwnd, cwnd); 1521da177e4SLinus Torvalds 153463c84b9SArnaldo Carvalho de Melo while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 1541da177e4SLinus Torvalds cwnd >>= 1; 15540570375SEric Dumazet tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd)); 156c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 1571da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 1581da177e4SLinus Torvalds } 1591da177e4SLinus Torvalds 16067edfef7SAndi Kleen /* Congestion state accounting after a packet has been sent. */ 16140efc6faSStephen Hemminger static void tcp_event_data_sent(struct tcp_sock *tp, 162cf533ea5SEric Dumazet struct sock *sk) 1631da177e4SLinus Torvalds { 164463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 165d635fbe2SEric Dumazet const u32 now = tcp_jiffies32; 1661da177e4SLinus Torvalds 16705c5a46dSNeal Cardwell if (tcp_packets_in_flight(tp) == 0) 16805c5a46dSNeal Cardwell tcp_ca_event(sk, CA_EVENT_TX_START); 16905c5a46dSNeal Cardwell 1704a41f453SWei Wang tp->lsndtime = now; 1714d8f24eeSWei Wang 1724d8f24eeSWei Wang /* If it is a reply for ato after last received 173562b1fdfSHaiyang Zhang * packet, increase pingpong count. 1744d8f24eeSWei Wang */ 1754d8f24eeSWei Wang if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 176562b1fdfSHaiyang Zhang inet_csk_inc_pingpong_cnt(sk); 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 17967edfef7SAndi Kleen /* Account for an ACK we sent. */ 180059217c1SNeal Cardwell static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt) 1811da177e4SLinus Torvalds { 1825d9f4262SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 1835d9f4262SEric Dumazet 1842b195850SEric Dumazet if (unlikely(tp->compressed_ack)) { 185200d95f4SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, 1862b195850SEric Dumazet tp->compressed_ack); 1872b195850SEric Dumazet tp->compressed_ack = 0; 1885d9f4262SEric Dumazet if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 1895d9f4262SEric Dumazet __sock_put(sk); 1905d9f4262SEric Dumazet } 19127cde44aSYuchung Cheng 19227cde44aSYuchung Cheng if (unlikely(rcv_nxt != tp->rcv_nxt)) 19327cde44aSYuchung Cheng return; /* Special ACK sent by DCTCP to reflect ECN */ 194059217c1SNeal Cardwell tcp_dec_quickack_mode(sk); 195463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 1961da177e4SLinus Torvalds } 1971da177e4SLinus Torvalds 1981da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. 1991da177e4SLinus Torvalds * Based on the assumption that the given amount of space 2001da177e4SLinus Torvalds * will be offered. Store the results in the tp structure. 2011da177e4SLinus Torvalds * NOTE: for smooth operation initial space offering should 2021da177e4SLinus Torvalds * be a multiple of mss if possible. We assume here that mss >= 1. 2031da177e4SLinus Torvalds * This MUST be enforced by all callers. 2041da177e4SLinus Torvalds */ 205ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, 2061da177e4SLinus Torvalds __u32 *rcv_wnd, __u32 *window_clamp, 20731d12926Slaurent chavey int wscale_ok, __u8 *rcv_wscale, 20831d12926Slaurent chavey __u32 init_rcv_wnd) 2091da177e4SLinus Torvalds { 2101da177e4SLinus Torvalds unsigned int space = (__space < 0 ? 0 : __space); 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds /* If no clamp set the clamp to the max possible scaled window */ 2131da177e4SLinus Torvalds if (*window_clamp == 0) 214589c49cbSGao Feng (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE); 2151da177e4SLinus Torvalds space = min(*window_clamp, space); 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds /* Quantize space offering to a multiple of mss if possible. */ 2181da177e4SLinus Torvalds if (space > mss) 219589c49cbSGao Feng space = rounddown(space, mss); 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds /* NOTE: offering an initial window larger than 32767 22215d99e02SRick Jones * will break some buggy TCP stacks. If the admin tells us 22315d99e02SRick Jones * it is likely we could be speaking with such a buggy stack 22415d99e02SRick Jones * we will truncate our initial window offering to 32K-1 22515d99e02SRick Jones * unless the remote has sent us a window scaling option, 22615d99e02SRick Jones * which we interpret as a sign the remote TCP is not 22715d99e02SRick Jones * misinterpreting the window field as a signed quantity. 2281da177e4SLinus Torvalds */ 2290f1e4d06SKuniyuki Iwashima if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) 2301da177e4SLinus Torvalds (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 23115d99e02SRick Jones else 232a337531bSYuchung Cheng (*rcv_wnd) = min_t(u32, space, U16_MAX); 233a337531bSYuchung Cheng 234a337531bSYuchung Cheng if (init_rcv_wnd) 235a337531bSYuchung Cheng *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 23615d99e02SRick Jones 23719bf6261SEric Dumazet *rcv_wscale = 0; 2381da177e4SLinus Torvalds if (wscale_ok) { 239589c49cbSGao Feng /* Set window scaling on max possible window */ 24002739545SKuniyuki Iwashima space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 2411227c177SKuniyuki Iwashima space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); 242316c1592SStephen Hemminger space = min_t(u32, space, *window_clamp); 24319bf6261SEric Dumazet *rcv_wscale = clamp_t(int, ilog2(space) - 15, 24419bf6261SEric Dumazet 0, TCP_MAX_WSCALE); 2451da177e4SLinus Torvalds } 2461da177e4SLinus Torvalds /* Set the clamp no higher than max representable value */ 247589c49cbSGao Feng (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp); 2481da177e4SLinus Torvalds } 2494bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_select_initial_window); 2501da177e4SLinus Torvalds 2511da177e4SLinus Torvalds /* Chose a new window to advertise, update state in tcp_sock for the 2521da177e4SLinus Torvalds * socket, and return result with RFC1323 scaling applied. The return 2531da177e4SLinus Torvalds * value can be stuffed directly into th->window for an outgoing 2541da177e4SLinus Torvalds * frame. 2551da177e4SLinus Torvalds */ 25640efc6faSStephen Hemminger static u16 tcp_select_window(struct sock *sk) 2571da177e4SLinus Torvalds { 2581da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 259b650d953Smfreemon@cloudflare.com struct net *net = sock_net(sk); 260e2142825SMenglong Dong u32 old_win = tp->rcv_wnd; 261e2142825SMenglong Dong u32 cur_win, new_win; 2621da177e4SLinus Torvalds 263e2142825SMenglong Dong /* Make the window 0 if we failed to queue the data because we 264e2142825SMenglong Dong * are out of memory. The window is temporary, so we don't store 265e2142825SMenglong Dong * it on the socket. 266e2142825SMenglong Dong */ 267e2142825SMenglong Dong if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) 268e2142825SMenglong Dong return 0; 269e2142825SMenglong Dong 270e2142825SMenglong Dong cur_win = tcp_receive_window(tp); 271e2142825SMenglong Dong new_win = __tcp_select_window(sk); 2721da177e4SLinus Torvalds if (new_win < cur_win) { 2731da177e4SLinus Torvalds /* Danger Will Robinson! 2741da177e4SLinus Torvalds * Don't update rcv_wup/rcv_wnd here or else 2751da177e4SLinus Torvalds * we will not be able to advertise a zero 2761da177e4SLinus Torvalds * window in time. --DaveM 2771da177e4SLinus Torvalds * 2781da177e4SLinus Torvalds * Relax Will Robinson. 2791da177e4SLinus Torvalds */ 280b650d953Smfreemon@cloudflare.com if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { 281b650d953Smfreemon@cloudflare.com /* Never shrink the offered window */ 2828e165e20SFlorian Westphal if (new_win == 0) 283b650d953Smfreemon@cloudflare.com NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV); 284607bfbf2SPatrick McHardy new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 2851da177e4SLinus Torvalds } 286b650d953Smfreemon@cloudflare.com } 287b650d953Smfreemon@cloudflare.com 2881da177e4SLinus Torvalds tp->rcv_wnd = new_win; 2891da177e4SLinus Torvalds tp->rcv_wup = tp->rcv_nxt; 2901da177e4SLinus Torvalds 2911da177e4SLinus Torvalds /* Make sure we do not exceed the maximum possible 2921da177e4SLinus Torvalds * scaled window. 2931da177e4SLinus Torvalds */ 294ceef9ab6SEric Dumazet if (!tp->rx_opt.rcv_wscale && 295b650d953Smfreemon@cloudflare.com READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) 2961da177e4SLinus Torvalds new_win = min(new_win, MAX_TCP_WINDOW); 2971da177e4SLinus Torvalds else 2981da177e4SLinus Torvalds new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds /* RFC1323 scaling applied */ 3011da177e4SLinus Torvalds new_win >>= tp->rx_opt.rcv_wscale; 3021da177e4SLinus Torvalds 30331770e34SFlorian Westphal /* If we advertise zero window, disable fast path. */ 3048e165e20SFlorian Westphal if (new_win == 0) { 30531770e34SFlorian Westphal tp->pred_flags = 0; 3068e165e20SFlorian Westphal if (old_win) 307b650d953Smfreemon@cloudflare.com NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV); 3088e165e20SFlorian Westphal } else if (old_win == 0) { 309b650d953Smfreemon@cloudflare.com NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV); 3108e165e20SFlorian Westphal } 3111da177e4SLinus Torvalds 3121da177e4SLinus Torvalds return new_win; 3131da177e4SLinus Torvalds } 3141da177e4SLinus Torvalds 31567edfef7SAndi Kleen /* Packet ECN state for a SYN-ACK */ 316735d3831SFlorian Westphal static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) 317bdf1ee5dSIlpo Järvinen { 31830e502a3SDaniel Borkmann const struct tcp_sock *tp = tcp_sk(sk); 31930e502a3SDaniel Borkmann 3204de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 321bdf1ee5dSIlpo Järvinen if (!(tp->ecn_flags & TCP_ECN_OK)) 3224de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 32391b5b21cSLawrence Brakmo else if (tcp_ca_needs_ecn(sk) || 32491b5b21cSLawrence Brakmo tcp_bpf_ca_needs_ecn(sk)) 32530e502a3SDaniel Borkmann INET_ECN_xmit(sk); 326bdf1ee5dSIlpo Järvinen } 327bdf1ee5dSIlpo Järvinen 32867edfef7SAndi Kleen /* Packet ECN state for a SYN. */ 329735d3831SFlorian Westphal static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) 330bdf1ee5dSIlpo Järvinen { 331bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 33291b5b21cSLawrence Brakmo bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); 3334785a667SKuniyuki Iwashima bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 || 33491b5b21cSLawrence Brakmo tcp_ca_needs_ecn(sk) || bpf_needs_ecn; 335f7b3bec6SFlorian Westphal 336f7b3bec6SFlorian Westphal if (!use_ecn) { 337f7b3bec6SFlorian Westphal const struct dst_entry *dst = __sk_dst_get(sk); 338f7b3bec6SFlorian Westphal 339f7b3bec6SFlorian Westphal if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) 340f7b3bec6SFlorian Westphal use_ecn = true; 341f7b3bec6SFlorian Westphal } 342bdf1ee5dSIlpo Järvinen 343bdf1ee5dSIlpo Järvinen tp->ecn_flags = 0; 344f7b3bec6SFlorian Westphal 345f7b3bec6SFlorian Westphal if (use_ecn) { 3464de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 347bdf1ee5dSIlpo Järvinen tp->ecn_flags = TCP_ECN_OK; 34891b5b21cSLawrence Brakmo if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn) 34930e502a3SDaniel Borkmann INET_ECN_xmit(sk); 350bdf1ee5dSIlpo Järvinen } 351bdf1ee5dSIlpo Järvinen } 352bdf1ee5dSIlpo Järvinen 35349213555SDaniel Borkmann static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) 35449213555SDaniel Borkmann { 35512b8d9caSKuniyuki Iwashima if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) 35649213555SDaniel Borkmann /* tp->ecn_flags are cleared at a later point in time when 35749213555SDaniel Borkmann * SYN ACK is ultimatively being received. 35849213555SDaniel Borkmann */ 35949213555SDaniel Borkmann TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); 36049213555SDaniel Borkmann } 36149213555SDaniel Borkmann 362735d3831SFlorian Westphal static void 3636ac705b1SEric Dumazet tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th) 364bdf1ee5dSIlpo Järvinen { 3656ac705b1SEric Dumazet if (inet_rsk(req)->ecn_ok) 366bdf1ee5dSIlpo Järvinen th->ece = 1; 367bdf1ee5dSIlpo Järvinen } 368bdf1ee5dSIlpo Järvinen 36967edfef7SAndi Kleen /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 37067edfef7SAndi Kleen * be sent. 37167edfef7SAndi Kleen */ 372735d3831SFlorian Westphal static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, 373ea1627c2SEric Dumazet struct tcphdr *th, int tcp_header_len) 374bdf1ee5dSIlpo Järvinen { 375bdf1ee5dSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 376bdf1ee5dSIlpo Järvinen 377bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 378bdf1ee5dSIlpo Järvinen /* Not-retransmitted data segment: set ECT and inject CWR. */ 379bdf1ee5dSIlpo Järvinen if (skb->len != tcp_header_len && 380bdf1ee5dSIlpo Järvinen !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 381bdf1ee5dSIlpo Järvinen INET_ECN_xmit(sk); 382bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 383bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 384ea1627c2SEric Dumazet th->cwr = 1; 385bdf1ee5dSIlpo Järvinen skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 386bdf1ee5dSIlpo Järvinen } 38730e502a3SDaniel Borkmann } else if (!tcp_ca_needs_ecn(sk)) { 388bdf1ee5dSIlpo Järvinen /* ACK or retransmitted segment: clear ECT|CE */ 389bdf1ee5dSIlpo Järvinen INET_ECN_dontxmit(sk); 390bdf1ee5dSIlpo Järvinen } 391bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 392ea1627c2SEric Dumazet th->ece = 1; 393bdf1ee5dSIlpo Järvinen } 394bdf1ee5dSIlpo Järvinen } 395bdf1ee5dSIlpo Järvinen 396e870a8efSIlpo Järvinen /* Constructs common control bits of non-data skb. If SYN/FIN is present, 397e870a8efSIlpo Järvinen * auto increment end seqno. 398e870a8efSIlpo Järvinen */ 399e870a8efSIlpo Järvinen static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 400e870a8efSIlpo Järvinen { 4012e8e18efSDavid S. Miller skb->ip_summed = CHECKSUM_PARTIAL; 402e870a8efSIlpo Järvinen 4034de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags; 404e870a8efSIlpo Järvinen 405cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 1); 406e870a8efSIlpo Järvinen 407e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->seq = seq; 408a3433f35SChangli Gao if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 409e870a8efSIlpo Järvinen seq++; 410e870a8efSIlpo Järvinen TCP_SKB_CB(skb)->end_seq = seq; 411e870a8efSIlpo Järvinen } 412e870a8efSIlpo Järvinen 413a2a385d6SEric Dumazet static inline bool tcp_urg_mode(const struct tcp_sock *tp) 41433f5f57eSIlpo Järvinen { 41533f5f57eSIlpo Järvinen return tp->snd_una != tp->snd_up; 41633f5f57eSIlpo Järvinen } 41733f5f57eSIlpo Järvinen 4183b65abb8SLeonard Crestez #define OPTION_SACK_ADVERTISE BIT(0) 4193b65abb8SLeonard Crestez #define OPTION_TS BIT(1) 4203b65abb8SLeonard Crestez #define OPTION_MD5 BIT(2) 4213b65abb8SLeonard Crestez #define OPTION_WSCALE BIT(3) 4223b65abb8SLeonard Crestez #define OPTION_FAST_OPEN_COOKIE BIT(8) 4233b65abb8SLeonard Crestez #define OPTION_SMC BIT(9) 4243b65abb8SLeonard Crestez #define OPTION_MPTCP BIT(10) 4251e03d32bSDmitry Safonov #define OPTION_AO BIT(11) 42660e2a778SUrsula Braun 42760e2a778SUrsula Braun static void smc_options_write(__be32 *ptr, u16 *options) 42860e2a778SUrsula Braun { 42960e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 43060e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) { 43160e2a778SUrsula Braun if (unlikely(OPTION_SMC & *options)) { 43260e2a778SUrsula Braun *ptr++ = htonl((TCPOPT_NOP << 24) | 43360e2a778SUrsula Braun (TCPOPT_NOP << 16) | 43460e2a778SUrsula Braun (TCPOPT_EXP << 8) | 43560e2a778SUrsula Braun (TCPOLEN_EXP_SMC_BASE)); 43660e2a778SUrsula Braun *ptr++ = htonl(TCPOPT_SMC_MAGIC); 43760e2a778SUrsula Braun } 43860e2a778SUrsula Braun } 43960e2a778SUrsula Braun #endif 44060e2a778SUrsula Braun } 44133ad798cSAdam Langley 44233ad798cSAdam Langley struct tcp_out_options { 4432100c8d2SYuchung Cheng u16 options; /* bit field of OPTION_* */ 4442100c8d2SYuchung Cheng u16 mss; /* 0 to disable */ 44533ad798cSAdam Langley u8 ws; /* window scale, 0 to disable */ 44633ad798cSAdam Langley u8 num_sack_blocks; /* number of SACK blocks to include */ 447bd0388aeSWilliam Allen Simpson u8 hash_size; /* bytes in hash_location */ 448331fca43SMartin KaFai Lau u8 bpf_opt_len; /* length of BPF hdr option */ 449bd0388aeSWilliam Allen Simpson __u8 *hash_location; /* temporary pointer, overloaded */ 4502100c8d2SYuchung Cheng __u32 tsval, tsecr; /* need to include OPTION_TS */ 4512100c8d2SYuchung Cheng struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 452eda7acddSPeter Krystad struct mptcp_out_options mptcp; 45333ad798cSAdam Langley }; 45433ad798cSAdam Langley 455ea66758cSPaolo Abeni static void mptcp_options_write(struct tcphdr *th, __be32 *ptr, 456ea66758cSPaolo Abeni struct tcp_sock *tp, 457fa3fe2b1SFlorian Westphal struct tcp_out_options *opts) 458eda7acddSPeter Krystad { 459eda7acddSPeter Krystad #if IS_ENABLED(CONFIG_MPTCP) 460eda7acddSPeter Krystad if (unlikely(OPTION_MPTCP & opts->options)) 461ea66758cSPaolo Abeni mptcp_write_options(th, ptr, tp, &opts->mptcp); 462eda7acddSPeter Krystad #endif 463eda7acddSPeter Krystad } 464eda7acddSPeter Krystad 465331fca43SMartin KaFai Lau #ifdef CONFIG_CGROUP_BPF 4660813a841SMartin KaFai Lau static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb, 4670813a841SMartin KaFai Lau enum tcp_synack_type synack_type) 4680813a841SMartin KaFai Lau { 4690813a841SMartin KaFai Lau if (unlikely(!skb)) 4700813a841SMartin KaFai Lau return BPF_WRITE_HDR_TCP_CURRENT_MSS; 4710813a841SMartin KaFai Lau 4720813a841SMartin KaFai Lau if (unlikely(synack_type == TCP_SYNACK_COOKIE)) 4730813a841SMartin KaFai Lau return BPF_WRITE_HDR_TCP_SYNACK_COOKIE; 4740813a841SMartin KaFai Lau 4750813a841SMartin KaFai Lau return 0; 4760813a841SMartin KaFai Lau } 4770813a841SMartin KaFai Lau 478331fca43SMartin KaFai Lau /* req, syn_skb and synack_type are used when writing synack */ 479331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, 480331fca43SMartin KaFai Lau struct request_sock *req, 481331fca43SMartin KaFai Lau struct sk_buff *syn_skb, 482331fca43SMartin KaFai Lau enum tcp_synack_type synack_type, 483331fca43SMartin KaFai Lau struct tcp_out_options *opts, 484331fca43SMartin KaFai Lau unsigned int *remaining) 485331fca43SMartin KaFai Lau { 4860813a841SMartin KaFai Lau struct bpf_sock_ops_kern sock_ops; 4870813a841SMartin KaFai Lau int err; 4880813a841SMartin KaFai Lau 489331fca43SMartin KaFai Lau if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), 490331fca43SMartin KaFai Lau BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) || 491331fca43SMartin KaFai Lau !*remaining) 492331fca43SMartin KaFai Lau return; 493331fca43SMartin KaFai Lau 4940813a841SMartin KaFai Lau /* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */ 4950813a841SMartin KaFai Lau 4960813a841SMartin KaFai Lau /* init sock_ops */ 4970813a841SMartin KaFai Lau memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 4980813a841SMartin KaFai Lau 4990813a841SMartin KaFai Lau sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB; 5000813a841SMartin KaFai Lau 5010813a841SMartin KaFai Lau if (req) { 5020813a841SMartin KaFai Lau /* The listen "sk" cannot be passed here because 5030813a841SMartin KaFai Lau * it is not locked. It would not make too much 5040813a841SMartin KaFai Lau * sense to do bpf_setsockopt(listen_sk) based 5050813a841SMartin KaFai Lau * on individual connection request also. 5060813a841SMartin KaFai Lau * 5070813a841SMartin KaFai Lau * Thus, "req" is passed here and the cgroup-bpf-progs 5080813a841SMartin KaFai Lau * of the listen "sk" will be run. 5090813a841SMartin KaFai Lau * 5100813a841SMartin KaFai Lau * "req" is also used here for fastopen even the "sk" here is 5110813a841SMartin KaFai Lau * a fullsock "child" sk. It is to keep the behavior 5120813a841SMartin KaFai Lau * consistent between fastopen and non-fastopen on 5130813a841SMartin KaFai Lau * the bpf programming side. 514331fca43SMartin KaFai Lau */ 5150813a841SMartin KaFai Lau sock_ops.sk = (struct sock *)req; 5160813a841SMartin KaFai Lau sock_ops.syn_skb = syn_skb; 5170813a841SMartin KaFai Lau } else { 5180813a841SMartin KaFai Lau sock_owned_by_me(sk); 5190813a841SMartin KaFai Lau 5200813a841SMartin KaFai Lau sock_ops.is_fullsock = 1; 5210813a841SMartin KaFai Lau sock_ops.sk = sk; 5220813a841SMartin KaFai Lau } 5230813a841SMartin KaFai Lau 5240813a841SMartin KaFai Lau sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type); 5250813a841SMartin KaFai Lau sock_ops.remaining_opt_len = *remaining; 5260813a841SMartin KaFai Lau /* tcp_current_mss() does not pass a skb */ 5270813a841SMartin KaFai Lau if (skb) 5280813a841SMartin KaFai Lau bpf_skops_init_skb(&sock_ops, skb, 0); 5290813a841SMartin KaFai Lau 5300813a841SMartin KaFai Lau err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); 5310813a841SMartin KaFai Lau 5320813a841SMartin KaFai Lau if (err || sock_ops.remaining_opt_len == *remaining) 5330813a841SMartin KaFai Lau return; 5340813a841SMartin KaFai Lau 5350813a841SMartin KaFai Lau opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; 5360813a841SMartin KaFai Lau /* round up to 4 bytes */ 5370813a841SMartin KaFai Lau opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; 5380813a841SMartin KaFai Lau 5390813a841SMartin KaFai Lau *remaining -= opts->bpf_opt_len; 540331fca43SMartin KaFai Lau } 541331fca43SMartin KaFai Lau 542331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, 543331fca43SMartin KaFai Lau struct request_sock *req, 544331fca43SMartin KaFai Lau struct sk_buff *syn_skb, 545331fca43SMartin KaFai Lau enum tcp_synack_type synack_type, 546331fca43SMartin KaFai Lau struct tcp_out_options *opts) 547331fca43SMartin KaFai Lau { 5480813a841SMartin KaFai Lau u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; 5490813a841SMartin KaFai Lau struct bpf_sock_ops_kern sock_ops; 5500813a841SMartin KaFai Lau int err; 5510813a841SMartin KaFai Lau 5520813a841SMartin KaFai Lau if (likely(!max_opt_len)) 553331fca43SMartin KaFai Lau return; 554331fca43SMartin KaFai Lau 5550813a841SMartin KaFai Lau memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 5560813a841SMartin KaFai Lau 5570813a841SMartin KaFai Lau sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB; 5580813a841SMartin KaFai Lau 5590813a841SMartin KaFai Lau if (req) { 5600813a841SMartin KaFai Lau sock_ops.sk = (struct sock *)req; 5610813a841SMartin KaFai Lau sock_ops.syn_skb = syn_skb; 5620813a841SMartin KaFai Lau } else { 5630813a841SMartin KaFai Lau sock_owned_by_me(sk); 5640813a841SMartin KaFai Lau 5650813a841SMartin KaFai Lau sock_ops.is_fullsock = 1; 5660813a841SMartin KaFai Lau sock_ops.sk = sk; 5670813a841SMartin KaFai Lau } 5680813a841SMartin KaFai Lau 5690813a841SMartin KaFai Lau sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type); 5700813a841SMartin KaFai Lau sock_ops.remaining_opt_len = max_opt_len; 5710813a841SMartin KaFai Lau first_opt_off = tcp_hdrlen(skb) - max_opt_len; 5720813a841SMartin KaFai Lau bpf_skops_init_skb(&sock_ops, skb, first_opt_off); 5730813a841SMartin KaFai Lau 5740813a841SMartin KaFai Lau err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); 5750813a841SMartin KaFai Lau 5760813a841SMartin KaFai Lau if (err) 5770813a841SMartin KaFai Lau nr_written = 0; 5780813a841SMartin KaFai Lau else 5790813a841SMartin KaFai Lau nr_written = max_opt_len - sock_ops.remaining_opt_len; 5800813a841SMartin KaFai Lau 5810813a841SMartin KaFai Lau if (nr_written < max_opt_len) 5820813a841SMartin KaFai Lau memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, 5830813a841SMartin KaFai Lau max_opt_len - nr_written); 584331fca43SMartin KaFai Lau } 585331fca43SMartin KaFai Lau #else 586331fca43SMartin KaFai Lau static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, 587331fca43SMartin KaFai Lau struct request_sock *req, 588331fca43SMartin KaFai Lau struct sk_buff *syn_skb, 589331fca43SMartin KaFai Lau enum tcp_synack_type synack_type, 590331fca43SMartin KaFai Lau struct tcp_out_options *opts, 591331fca43SMartin KaFai Lau unsigned int *remaining) 592331fca43SMartin KaFai Lau { 593331fca43SMartin KaFai Lau } 594331fca43SMartin KaFai Lau 595331fca43SMartin KaFai Lau static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, 596331fca43SMartin KaFai Lau struct request_sock *req, 597331fca43SMartin KaFai Lau struct sk_buff *syn_skb, 598331fca43SMartin KaFai Lau enum tcp_synack_type synack_type, 599331fca43SMartin KaFai Lau struct tcp_out_options *opts) 600331fca43SMartin KaFai Lau { 601331fca43SMartin KaFai Lau } 602331fca43SMartin KaFai Lau #endif 603331fca43SMartin KaFai Lau 60467edfef7SAndi Kleen /* Write previously computed TCP options to the packet. 60567edfef7SAndi Kleen * 60667edfef7SAndi Kleen * Beware: Something in the Internet is very sensitive to the ordering of 607fd6149d3SIlpo Järvinen * TCP options, we learned this through the hard way, so be careful here. 608fd6149d3SIlpo Järvinen * Luckily we can at least blame others for their non-compliance but from 6098e3bff96Sstephen hemminger * inter-operability perspective it seems that we're somewhat stuck with 610fd6149d3SIlpo Järvinen * the ordering which we have been using if we want to keep working with 611fd6149d3SIlpo Järvinen * those broken things (not that it currently hurts anybody as there isn't 612fd6149d3SIlpo Järvinen * particular reason why the ordering would need to be changed). 613fd6149d3SIlpo Järvinen * 614fd6149d3SIlpo Järvinen * At least SACK_PERM as the first option is known to lead to a disaster 615fd6149d3SIlpo Järvinen * (but it may well be that other scenarios fail similarly). 616fd6149d3SIlpo Järvinen */ 617ea66758cSPaolo Abeni static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp, 6181e03d32bSDmitry Safonov struct tcp_out_options *opts, 6191e03d32bSDmitry Safonov struct tcp_key *key) 620bd0388aeSWilliam Allen Simpson { 621ea66758cSPaolo Abeni __be32 *ptr = (__be32 *)(th + 1); 6222100c8d2SYuchung Cheng u16 options = opts->options; /* mungable copy */ 623bd0388aeSWilliam Allen Simpson 6241e03d32bSDmitry Safonov if (tcp_key_is_md5(key)) { 6251a2c6181SChristoph Paasch *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 6261a2c6181SChristoph Paasch (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 627bd0388aeSWilliam Allen Simpson /* overload cookie hash location */ 628bd0388aeSWilliam Allen Simpson opts->hash_location = (__u8 *)ptr; 62933ad798cSAdam Langley ptr += 4; 6301e03d32bSDmitry Safonov } else if (tcp_key_is_ao(key)) { 6311e03d32bSDmitry Safonov #ifdef CONFIG_TCP_AO 6321e03d32bSDmitry Safonov struct tcp_ao_key *rnext_key; 6331e03d32bSDmitry Safonov struct tcp_ao_info *ao_info; 6341e03d32bSDmitry Safonov u8 maclen; 63533ad798cSAdam Langley 6361e03d32bSDmitry Safonov ao_info = rcu_dereference_check(tp->ao_info, 6371e03d32bSDmitry Safonov lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); 6381e03d32bSDmitry Safonov rnext_key = READ_ONCE(ao_info->rnext_key); 6391e03d32bSDmitry Safonov if (WARN_ON_ONCE(!rnext_key)) 6401e03d32bSDmitry Safonov goto out_ao; 6411e03d32bSDmitry Safonov maclen = tcp_ao_maclen(key->ao_key); 6421e03d32bSDmitry Safonov *ptr++ = htonl((TCPOPT_AO << 24) | 6431e03d32bSDmitry Safonov (tcp_ao_len(key->ao_key) << 16) | 6441e03d32bSDmitry Safonov (key->ao_key->sndid << 8) | 6451e03d32bSDmitry Safonov (rnext_key->rcvid)); 6461e03d32bSDmitry Safonov opts->hash_location = (__u8 *)ptr; 6471e03d32bSDmitry Safonov ptr += maclen / sizeof(*ptr); 6481e03d32bSDmitry Safonov if (unlikely(maclen % sizeof(*ptr))) { 6491e03d32bSDmitry Safonov memset(ptr, TCPOPT_NOP, sizeof(*ptr)); 6501e03d32bSDmitry Safonov ptr++; 6511e03d32bSDmitry Safonov } 6521e03d32bSDmitry Safonov out_ao: 6531e03d32bSDmitry Safonov #endif 6541e03d32bSDmitry Safonov } 655fd6149d3SIlpo Järvinen if (unlikely(opts->mss)) { 656fd6149d3SIlpo Järvinen *ptr++ = htonl((TCPOPT_MSS << 24) | 657fd6149d3SIlpo Järvinen (TCPOLEN_MSS << 16) | 658fd6149d3SIlpo Järvinen opts->mss); 659fd6149d3SIlpo Järvinen } 660fd6149d3SIlpo Järvinen 661bd0388aeSWilliam Allen Simpson if (likely(OPTION_TS & options)) { 662bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 66333ad798cSAdam Langley *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 66433ad798cSAdam Langley (TCPOLEN_SACK_PERM << 16) | 66533ad798cSAdam Langley (TCPOPT_TIMESTAMP << 8) | 66633ad798cSAdam Langley TCPOLEN_TIMESTAMP); 667bd0388aeSWilliam Allen Simpson options &= ~OPTION_SACK_ADVERTISE; 66833ad798cSAdam Langley } else { 669496c98dfSYOSHIFUJI Hideaki *ptr++ = htonl((TCPOPT_NOP << 24) | 67040efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 67140efc6faSStephen Hemminger (TCPOPT_TIMESTAMP << 8) | 67240efc6faSStephen Hemminger TCPOLEN_TIMESTAMP); 67340efc6faSStephen Hemminger } 67433ad798cSAdam Langley *ptr++ = htonl(opts->tsval); 67533ad798cSAdam Langley *ptr++ = htonl(opts->tsecr); 67633ad798cSAdam Langley } 67733ad798cSAdam Langley 678bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_SACK_ADVERTISE & options)) { 67933ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 68033ad798cSAdam Langley (TCPOPT_NOP << 16) | 68133ad798cSAdam Langley (TCPOPT_SACK_PERM << 8) | 68233ad798cSAdam Langley TCPOLEN_SACK_PERM); 68333ad798cSAdam Langley } 68433ad798cSAdam Langley 685bd0388aeSWilliam Allen Simpson if (unlikely(OPTION_WSCALE & options)) { 68633ad798cSAdam Langley *ptr++ = htonl((TCPOPT_NOP << 24) | 68733ad798cSAdam Langley (TCPOPT_WINDOW << 16) | 68833ad798cSAdam Langley (TCPOLEN_WINDOW << 8) | 68933ad798cSAdam Langley opts->ws); 69033ad798cSAdam Langley } 69133ad798cSAdam Langley 69233ad798cSAdam Langley if (unlikely(opts->num_sack_blocks)) { 69333ad798cSAdam Langley struct tcp_sack_block *sp = tp->rx_opt.dsack ? 69433ad798cSAdam Langley tp->duplicate_sack : tp->selective_acks; 69540efc6faSStephen Hemminger int this_sack; 69640efc6faSStephen Hemminger 69740efc6faSStephen Hemminger *ptr++ = htonl((TCPOPT_NOP << 24) | 69840efc6faSStephen Hemminger (TCPOPT_NOP << 16) | 69940efc6faSStephen Hemminger (TCPOPT_SACK << 8) | 70033ad798cSAdam Langley (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 70140efc6faSStephen Hemminger TCPOLEN_SACK_PERBLOCK))); 7022de979bdSStephen Hemminger 70333ad798cSAdam Langley for (this_sack = 0; this_sack < opts->num_sack_blocks; 70433ad798cSAdam Langley ++this_sack) { 70540efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].start_seq); 70640efc6faSStephen Hemminger *ptr++ = htonl(sp[this_sack].end_seq); 70740efc6faSStephen Hemminger } 7082de979bdSStephen Hemminger 70940efc6faSStephen Hemminger tp->rx_opt.dsack = 0; 71040efc6faSStephen Hemminger } 7112100c8d2SYuchung Cheng 7122100c8d2SYuchung Cheng if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 7132100c8d2SYuchung Cheng struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 7147f9b838bSDaniel Lee u8 *p = (u8 *)ptr; 7157f9b838bSDaniel Lee u32 len; /* Fast Open option length */ 7162100c8d2SYuchung Cheng 7177f9b838bSDaniel Lee if (foc->exp) { 7187f9b838bSDaniel Lee len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 7197f9b838bSDaniel Lee *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | 7202100c8d2SYuchung Cheng TCPOPT_FASTOPEN_MAGIC); 7217f9b838bSDaniel Lee p += TCPOLEN_EXP_FASTOPEN_BASE; 7227f9b838bSDaniel Lee } else { 7237f9b838bSDaniel Lee len = TCPOLEN_FASTOPEN_BASE + foc->len; 7247f9b838bSDaniel Lee *p++ = TCPOPT_FASTOPEN; 7257f9b838bSDaniel Lee *p++ = len; 7262100c8d2SYuchung Cheng } 7277f9b838bSDaniel Lee 7287f9b838bSDaniel Lee memcpy(p, foc->val, foc->len); 7297f9b838bSDaniel Lee if ((len & 3) == 2) { 7307f9b838bSDaniel Lee p[foc->len] = TCPOPT_NOP; 7317f9b838bSDaniel Lee p[foc->len + 1] = TCPOPT_NOP; 7327f9b838bSDaniel Lee } 7337f9b838bSDaniel Lee ptr += (len + 3) >> 2; 7342100c8d2SYuchung Cheng } 73560e2a778SUrsula Braun 73660e2a778SUrsula Braun smc_options_write(ptr, &options); 737eda7acddSPeter Krystad 738ea66758cSPaolo Abeni mptcp_options_write(th, ptr, tp, opts); 73960e2a778SUrsula Braun } 74060e2a778SUrsula Braun 74160e2a778SUrsula Braun static void smc_set_option(const struct tcp_sock *tp, 74260e2a778SUrsula Braun struct tcp_out_options *opts, 74360e2a778SUrsula Braun unsigned int *remaining) 74460e2a778SUrsula Braun { 74560e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 74660e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) { 74760e2a778SUrsula Braun if (tp->syn_smc) { 74860e2a778SUrsula Braun if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { 74960e2a778SUrsula Braun opts->options |= OPTION_SMC; 75060e2a778SUrsula Braun *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; 75160e2a778SUrsula Braun } 75260e2a778SUrsula Braun } 75360e2a778SUrsula Braun } 75460e2a778SUrsula Braun #endif 75560e2a778SUrsula Braun } 75660e2a778SUrsula Braun 75760e2a778SUrsula Braun static void smc_set_option_cond(const struct tcp_sock *tp, 75860e2a778SUrsula Braun const struct inet_request_sock *ireq, 75960e2a778SUrsula Braun struct tcp_out_options *opts, 76060e2a778SUrsula Braun unsigned int *remaining) 76160e2a778SUrsula Braun { 76260e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 76360e2a778SUrsula Braun if (static_branch_unlikely(&tcp_have_smc)) { 76460e2a778SUrsula Braun if (tp->syn_smc && ireq->smc_ok) { 76560e2a778SUrsula Braun if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { 76660e2a778SUrsula Braun opts->options |= OPTION_SMC; 76760e2a778SUrsula Braun *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; 76860e2a778SUrsula Braun } 76960e2a778SUrsula Braun } 77060e2a778SUrsula Braun } 77160e2a778SUrsula Braun #endif 77240efc6faSStephen Hemminger } 77340efc6faSStephen Hemminger 774cec37a6eSPeter Krystad static void mptcp_set_option_cond(const struct request_sock *req, 775cec37a6eSPeter Krystad struct tcp_out_options *opts, 776cec37a6eSPeter Krystad unsigned int *remaining) 777cec37a6eSPeter Krystad { 778cec37a6eSPeter Krystad if (rsk_is_mptcp(req)) { 779cec37a6eSPeter Krystad unsigned int size; 780cec37a6eSPeter Krystad 781cec37a6eSPeter Krystad if (mptcp_synack_options(req, &size, &opts->mptcp)) { 782cec37a6eSPeter Krystad if (*remaining >= size) { 783cec37a6eSPeter Krystad opts->options |= OPTION_MPTCP; 784cec37a6eSPeter Krystad *remaining -= size; 785cec37a6eSPeter Krystad } 786cec37a6eSPeter Krystad } 787cec37a6eSPeter Krystad } 788cec37a6eSPeter Krystad } 789cec37a6eSPeter Krystad 79067edfef7SAndi Kleen /* Compute TCP options for SYN packets. This is not the final 79167edfef7SAndi Kleen * network wire format yet. 79267edfef7SAndi Kleen */ 79395c96174SEric Dumazet static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 79433ad798cSAdam Langley struct tcp_out_options *opts, 7951e03d32bSDmitry Safonov struct tcp_key *key) 796cf533ea5SEric Dumazet { 79733ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 79895c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 799783237e8SYuchung Cheng struct tcp_fastopen_request *fastopen = tp->fastopen_req; 8001e03d32bSDmitry Safonov bool timestamps; 80133ad798cSAdam Langley 8021e03d32bSDmitry Safonov /* Better than switch (key.type) as it has static branches */ 8031e03d32bSDmitry Safonov if (tcp_key_is_md5(key)) { 8041e03d32bSDmitry Safonov timestamps = false; 80533ad798cSAdam Langley opts->options |= OPTION_MD5; 806bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 8071e03d32bSDmitry Safonov } else { 8081e03d32bSDmitry Safonov timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps); 8091e03d32bSDmitry Safonov if (tcp_key_is_ao(key)) { 8101e03d32bSDmitry Safonov opts->options |= OPTION_AO; 8111e03d32bSDmitry Safonov remaining -= tcp_ao_len(key->ao_key); 812cfb6eeb4SYOSHIFUJI Hideaki } 8138c2320e8SEric Dumazet } 81433ad798cSAdam Langley 81533ad798cSAdam Langley /* We always get an MSS option. The option bytes which will be seen in 81633ad798cSAdam Langley * normal data packets should timestamps be used, must be in the MSS 81733ad798cSAdam Langley * advertised. But we subtract them from tp->mss_cache so that 81833ad798cSAdam Langley * calculations in tcp_sendmsg are simpler etc. So account for this 81933ad798cSAdam Langley * fact here if necessary. If we don't do this correctly, as a 82033ad798cSAdam Langley * receiver we won't recognize data packets as being full sized when we 82133ad798cSAdam Langley * should, and thus we won't abide by the delayed ACK rules correctly. 82233ad798cSAdam Langley * SACKs don't matter, we never delay an ACK when we have any of those 82333ad798cSAdam Langley * going out. */ 82433ad798cSAdam Langley opts->mss = tcp_advertise_mss(sk); 825bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 82633ad798cSAdam Langley 8271e03d32bSDmitry Safonov if (likely(timestamps)) { 82833ad798cSAdam Langley opts->options |= OPTION_TS; 829614e8316SEric Dumazet opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset; 83033ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 831bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 83233ad798cSAdam Langley } 8333666f666SKuniyuki Iwashima if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { 83433ad798cSAdam Langley opts->ws = tp->rx_opt.rcv_wscale; 83589e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 836bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 83733ad798cSAdam Langley } 8383666f666SKuniyuki Iwashima if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { 83933ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 840b32d1310SDavid S. Miller if (unlikely(!(OPTION_TS & opts->options))) 841bd0388aeSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 84233ad798cSAdam Langley } 84333ad798cSAdam Langley 844783237e8SYuchung Cheng if (fastopen && fastopen->cookie.len >= 0) { 8452646c831SDaniel Lee u32 need = fastopen->cookie.len; 8462646c831SDaniel Lee 8472646c831SDaniel Lee need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : 8482646c831SDaniel Lee TCPOLEN_FASTOPEN_BASE; 849783237e8SYuchung Cheng need = (need + 3) & ~3U; /* Align to 32 bits */ 850783237e8SYuchung Cheng if (remaining >= need) { 851783237e8SYuchung Cheng opts->options |= OPTION_FAST_OPEN_COOKIE; 852783237e8SYuchung Cheng opts->fastopen_cookie = &fastopen->cookie; 853783237e8SYuchung Cheng remaining -= need; 854783237e8SYuchung Cheng tp->syn_fastopen = 1; 8552646c831SDaniel Lee tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; 856783237e8SYuchung Cheng } 857783237e8SYuchung Cheng } 858bd0388aeSWilliam Allen Simpson 85960e2a778SUrsula Braun smc_set_option(tp, opts, &remaining); 86060e2a778SUrsula Braun 861cec37a6eSPeter Krystad if (sk_is_mptcp(sk)) { 862cec37a6eSPeter Krystad unsigned int size; 863cec37a6eSPeter Krystad 864cc7972eaSChristoph Paasch if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { 865cec37a6eSPeter Krystad opts->options |= OPTION_MPTCP; 866cec37a6eSPeter Krystad remaining -= size; 867cec37a6eSPeter Krystad } 868cec37a6eSPeter Krystad } 869cec37a6eSPeter Krystad 870331fca43SMartin KaFai Lau bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining); 871331fca43SMartin KaFai Lau 872bd0388aeSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 87333ad798cSAdam Langley } 87433ad798cSAdam Langley 87567edfef7SAndi Kleen /* Set up TCP options for SYN-ACKs. */ 87660e2a778SUrsula Braun static unsigned int tcp_synack_options(const struct sock *sk, 87760e2a778SUrsula Braun struct request_sock *req, 87895c96174SEric Dumazet unsigned int mss, struct sk_buff *skb, 87933ad798cSAdam Langley struct tcp_out_options *opts, 88080f03e27SEric Dumazet const struct tcp_md5sig_key *md5, 881e114e1e8SEric Dumazet struct tcp_fastopen_cookie *foc, 882331fca43SMartin KaFai Lau enum tcp_synack_type synack_type, 883331fca43SMartin KaFai Lau struct sk_buff *syn_skb) 8844957faadSWilliam Allen Simpson { 88533ad798cSAdam Langley struct inet_request_sock *ireq = inet_rsk(req); 88695c96174SEric Dumazet unsigned int remaining = MAX_TCP_OPTION_SPACE; 88733ad798cSAdam Langley 88833ad798cSAdam Langley #ifdef CONFIG_TCP_MD5SIG 88980f03e27SEric Dumazet if (md5) { 89033ad798cSAdam Langley opts->options |= OPTION_MD5; 8914957faadSWilliam Allen Simpson remaining -= TCPOLEN_MD5SIG_ALIGNED; 8924957faadSWilliam Allen Simpson 8934957faadSWilliam Allen Simpson /* We can't fit any SACK blocks in a packet with MD5 + TS 8944957faadSWilliam Allen Simpson * options. There was discussion about disabling SACK 8954957faadSWilliam Allen Simpson * rather than TS in order to fit in better with old, 8964957faadSWilliam Allen Simpson * buggy kernels, but that was deemed to be unnecessary. 8974957faadSWilliam Allen Simpson */ 898e114e1e8SEric Dumazet if (synack_type != TCP_SYNACK_COOKIE) 899de213e5eSEric Dumazet ireq->tstamp_ok &= !ireq->sack_ok; 90033ad798cSAdam Langley } 90133ad798cSAdam Langley #endif 90233ad798cSAdam Langley 9034957faadSWilliam Allen Simpson /* We always send an MSS option. */ 90433ad798cSAdam Langley opts->mss = mss; 9054957faadSWilliam Allen Simpson remaining -= TCPOLEN_MSS_ALIGNED; 90633ad798cSAdam Langley 90733ad798cSAdam Langley if (likely(ireq->wscale_ok)) { 90833ad798cSAdam Langley opts->ws = ireq->rcv_wscale; 90989e95a61SOri Finkelman opts->options |= OPTION_WSCALE; 9104957faadSWilliam Allen Simpson remaining -= TCPOLEN_WSCALE_ALIGNED; 91133ad798cSAdam Langley } 912de213e5eSEric Dumazet if (likely(ireq->tstamp_ok)) { 91333ad798cSAdam Langley opts->options |= OPTION_TS; 914614e8316SEric Dumazet opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) + 915614e8316SEric Dumazet tcp_rsk(req)->ts_off; 916eba20811SEric Dumazet opts->tsecr = READ_ONCE(req->ts_recent); 9174957faadSWilliam Allen Simpson remaining -= TCPOLEN_TSTAMP_ALIGNED; 91833ad798cSAdam Langley } 91933ad798cSAdam Langley if (likely(ireq->sack_ok)) { 92033ad798cSAdam Langley opts->options |= OPTION_SACK_ADVERTISE; 921de213e5eSEric Dumazet if (unlikely(!ireq->tstamp_ok)) 9224957faadSWilliam Allen Simpson remaining -= TCPOLEN_SACKPERM_ALIGNED; 92333ad798cSAdam Langley } 9247f9b838bSDaniel Lee if (foc != NULL && foc->len >= 0) { 9257f9b838bSDaniel Lee u32 need = foc->len; 9267f9b838bSDaniel Lee 9277f9b838bSDaniel Lee need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : 9287f9b838bSDaniel Lee TCPOLEN_FASTOPEN_BASE; 9298336886fSJerry Chu need = (need + 3) & ~3U; /* Align to 32 bits */ 9308336886fSJerry Chu if (remaining >= need) { 9318336886fSJerry Chu opts->options |= OPTION_FAST_OPEN_COOKIE; 9328336886fSJerry Chu opts->fastopen_cookie = foc; 9338336886fSJerry Chu remaining -= need; 9348336886fSJerry Chu } 9358336886fSJerry Chu } 9364957faadSWilliam Allen Simpson 937cec37a6eSPeter Krystad mptcp_set_option_cond(req, opts, &remaining); 938cec37a6eSPeter Krystad 93960e2a778SUrsula Braun smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); 94060e2a778SUrsula Braun 941331fca43SMartin KaFai Lau bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb, 942331fca43SMartin KaFai Lau synack_type, opts, &remaining); 943331fca43SMartin KaFai Lau 9444957faadSWilliam Allen Simpson return MAX_TCP_OPTION_SPACE - remaining; 94533ad798cSAdam Langley } 94633ad798cSAdam Langley 94767edfef7SAndi Kleen /* Compute TCP options for ESTABLISHED sockets. This is not the 94867edfef7SAndi Kleen * final wire format yet. 94967edfef7SAndi Kleen */ 95095c96174SEric Dumazet static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 95133ad798cSAdam Langley struct tcp_out_options *opts, 9521e03d32bSDmitry Safonov struct tcp_key *key) 953cf533ea5SEric Dumazet { 95433ad798cSAdam Langley struct tcp_sock *tp = tcp_sk(sk); 95595c96174SEric Dumazet unsigned int size = 0; 956cabeccbdSIlpo Järvinen unsigned int eff_sacks; 95733ad798cSAdam Langley 9585843ef42SAndi Kleen opts->options = 0; 9595843ef42SAndi Kleen 9601e03d32bSDmitry Safonov /* Better than switch (key.type) as it has static branches */ 9611e03d32bSDmitry Safonov if (tcp_key_is_md5(key)) { 96233ad798cSAdam Langley opts->options |= OPTION_MD5; 96333ad798cSAdam Langley size += TCPOLEN_MD5SIG_ALIGNED; 9641e03d32bSDmitry Safonov } else if (tcp_key_is_ao(key)) { 9651e03d32bSDmitry Safonov opts->options |= OPTION_AO; 9661e03d32bSDmitry Safonov size += tcp_ao_len(key->ao_key); 96733ad798cSAdam Langley } 96833ad798cSAdam Langley 96933ad798cSAdam Langley if (likely(tp->rx_opt.tstamp_ok)) { 97033ad798cSAdam Langley opts->options |= OPTION_TS; 971614e8316SEric Dumazet opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + 972614e8316SEric Dumazet tp->tsoffset : 0; 97333ad798cSAdam Langley opts->tsecr = tp->rx_opt.ts_recent; 97433ad798cSAdam Langley size += TCPOLEN_TSTAMP_ALIGNED; 97533ad798cSAdam Langley } 97633ad798cSAdam Langley 977cec37a6eSPeter Krystad /* MPTCP options have precedence over SACK for the limited TCP 978cec37a6eSPeter Krystad * option space because a MPTCP connection would be forced to 979cec37a6eSPeter Krystad * fall back to regular TCP if a required multipath option is 980cec37a6eSPeter Krystad * missing. SACK still gets a chance to use whatever space is 981cec37a6eSPeter Krystad * left. 982cec37a6eSPeter Krystad */ 983cec37a6eSPeter Krystad if (sk_is_mptcp(sk)) { 984cec37a6eSPeter Krystad unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 985cec37a6eSPeter Krystad unsigned int opt_size = 0; 986cec37a6eSPeter Krystad 987cec37a6eSPeter Krystad if (mptcp_established_options(sk, skb, &opt_size, remaining, 988cec37a6eSPeter Krystad &opts->mptcp)) { 989cec37a6eSPeter Krystad opts->options |= OPTION_MPTCP; 990cec37a6eSPeter Krystad size += opt_size; 991cec37a6eSPeter Krystad } 992cec37a6eSPeter Krystad } 993cec37a6eSPeter Krystad 994cabeccbdSIlpo Järvinen eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 995cabeccbdSIlpo Järvinen if (unlikely(eff_sacks)) { 99695c96174SEric Dumazet const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 9979cfcca23SMat Martineau if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED + 9989cfcca23SMat Martineau TCPOLEN_SACK_PERBLOCK)) 9999cfcca23SMat Martineau return size; 10009cfcca23SMat Martineau 100133ad798cSAdam Langley opts->num_sack_blocks = 100295c96174SEric Dumazet min_t(unsigned int, eff_sacks, 100333ad798cSAdam Langley (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 100433ad798cSAdam Langley TCPOLEN_SACK_PERBLOCK); 10059cfcca23SMat Martineau 100633ad798cSAdam Langley size += TCPOLEN_SACK_BASE_ALIGNED + 100733ad798cSAdam Langley opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 100833ad798cSAdam Langley } 100933ad798cSAdam Langley 1010331fca43SMartin KaFai Lau if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp, 1011331fca43SMartin KaFai Lau BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) { 1012331fca43SMartin KaFai Lau unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 1013331fca43SMartin KaFai Lau 1014331fca43SMartin KaFai Lau bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining); 1015331fca43SMartin KaFai Lau 1016331fca43SMartin KaFai Lau size = MAX_TCP_OPTION_SPACE - remaining; 1017331fca43SMartin KaFai Lau } 1018331fca43SMartin KaFai Lau 101933ad798cSAdam Langley return size; 102040efc6faSStephen Hemminger } 10211da177e4SLinus Torvalds 102246d3ceabSEric Dumazet 102346d3ceabSEric Dumazet /* TCP SMALL QUEUES (TSQ) 102446d3ceabSEric Dumazet * 102546d3ceabSEric Dumazet * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 102646d3ceabSEric Dumazet * to reduce RTT and bufferbloat. 102746d3ceabSEric Dumazet * We do this using a special skb destructor (tcp_wfree). 102846d3ceabSEric Dumazet * 102946d3ceabSEric Dumazet * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 103046d3ceabSEric Dumazet * needs to be reallocated in a driver. 10318e3bff96Sstephen hemminger * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 103246d3ceabSEric Dumazet * 103346d3ceabSEric Dumazet * Since transmit from skb destructor is forbidden, we use a tasklet 103446d3ceabSEric Dumazet * to process all sockets that eventually need to send more skbs. 103546d3ceabSEric Dumazet * We use one tasklet per cpu, with its own queue of sockets. 103646d3ceabSEric Dumazet */ 103746d3ceabSEric Dumazet struct tsq_tasklet { 103846d3ceabSEric Dumazet struct tasklet_struct tasklet; 103946d3ceabSEric Dumazet struct list_head head; /* queue of tcp sockets */ 104046d3ceabSEric Dumazet }; 104146d3ceabSEric Dumazet static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 104246d3ceabSEric Dumazet 104373a6bab5SEric Dumazet static void tcp_tsq_write(struct sock *sk) 10446f458dfbSEric Dumazet { 10456f458dfbSEric Dumazet if ((1 << sk->sk_state) & 10466f458dfbSEric Dumazet (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 1047f9616c35SEric Dumazet TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) { 1048f9616c35SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 1049f9616c35SEric Dumazet 1050f9616c35SEric Dumazet if (tp->lost_out > tp->retrans_out && 105140570375SEric Dumazet tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) { 10523a91d29fSKoichiro Den tcp_mstamp_refresh(tp); 1053f9616c35SEric Dumazet tcp_xmit_retransmit_queue(sk); 10543a91d29fSKoichiro Den } 1055f9616c35SEric Dumazet 1056f9616c35SEric Dumazet tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 1057bf06200eSJohn Ogness 0, GFP_ATOMIC); 10586f458dfbSEric Dumazet } 1059f9616c35SEric Dumazet } 106073a6bab5SEric Dumazet 106173a6bab5SEric Dumazet static void tcp_tsq_handler(struct sock *sk) 106273a6bab5SEric Dumazet { 106373a6bab5SEric Dumazet bh_lock_sock(sk); 106473a6bab5SEric Dumazet if (!sock_owned_by_user(sk)) 106573a6bab5SEric Dumazet tcp_tsq_write(sk); 106673a6bab5SEric Dumazet else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) 106773a6bab5SEric Dumazet sock_hold(sk); 106873a6bab5SEric Dumazet bh_unlock_sock(sk); 106973a6bab5SEric Dumazet } 107046d3ceabSEric Dumazet /* 10718e3bff96Sstephen hemminger * One tasklet per cpu tries to send more skbs. 107246d3ceabSEric Dumazet * We run in tasklet context but need to disable irqs when 10738e3bff96Sstephen hemminger * transferring tsq->head because tcp_wfree() might 107446d3ceabSEric Dumazet * interrupt us (non NAPI drivers) 107546d3ceabSEric Dumazet */ 1076c6533ca8SAllen Pais static void tcp_tasklet_func(struct tasklet_struct *t) 107746d3ceabSEric Dumazet { 1078c6533ca8SAllen Pais struct tsq_tasklet *tsq = from_tasklet(tsq, t, tasklet); 107946d3ceabSEric Dumazet LIST_HEAD(list); 108046d3ceabSEric Dumazet unsigned long flags; 108146d3ceabSEric Dumazet struct list_head *q, *n; 108246d3ceabSEric Dumazet struct tcp_sock *tp; 108346d3ceabSEric Dumazet struct sock *sk; 108446d3ceabSEric Dumazet 108546d3ceabSEric Dumazet local_irq_save(flags); 108646d3ceabSEric Dumazet list_splice_init(&tsq->head, &list); 108746d3ceabSEric Dumazet local_irq_restore(flags); 108846d3ceabSEric Dumazet 108946d3ceabSEric Dumazet list_for_each_safe(q, n, &list) { 109046d3ceabSEric Dumazet tp = list_entry(q, struct tcp_sock, tsq_node); 109146d3ceabSEric Dumazet list_del(&tp->tsq_node); 109246d3ceabSEric Dumazet 109346d3ceabSEric Dumazet sk = (struct sock *)tp; 10940a9648f1SEric Dumazet smp_mb__before_atomic(); 10957aa5470cSEric Dumazet clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); 10967aa5470cSEric Dumazet 10976f458dfbSEric Dumazet tcp_tsq_handler(sk); 109846d3ceabSEric Dumazet sk_free(sk); 109946d3ceabSEric Dumazet } 110046d3ceabSEric Dumazet } 110146d3ceabSEric Dumazet 110240fc3423SEric Dumazet #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ 110340fc3423SEric Dumazet TCPF_WRITE_TIMER_DEFERRED | \ 110440fc3423SEric Dumazet TCPF_DELACK_TIMER_DEFERRED | \ 1105133c4c0dSEric Dumazet TCPF_MTU_REDUCED_DEFERRED | \ 1106133c4c0dSEric Dumazet TCPF_ACK_DEFERRED) 110746d3ceabSEric Dumazet /** 110846d3ceabSEric Dumazet * tcp_release_cb - tcp release_sock() callback 110946d3ceabSEric Dumazet * @sk: socket 111046d3ceabSEric Dumazet * 111146d3ceabSEric Dumazet * called from release_sock() to perform protocol dependent 111246d3ceabSEric Dumazet * actions before socket release. 111346d3ceabSEric Dumazet */ 111446d3ceabSEric Dumazet void tcp_release_cb(struct sock *sk) 111546d3ceabSEric Dumazet { 1116fac30731SEric Dumazet unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); 1117fac30731SEric Dumazet unsigned long nflags; 111846d3ceabSEric Dumazet 11196f458dfbSEric Dumazet /* perform an atomic operation only if at least one flag is set */ 11206f458dfbSEric Dumazet do { 11216f458dfbSEric Dumazet if (!(flags & TCP_DEFERRED_ALL)) 11226f458dfbSEric Dumazet return; 11236f458dfbSEric Dumazet nflags = flags & ~TCP_DEFERRED_ALL; 1124fac30731SEric Dumazet } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); 11256f458dfbSEric Dumazet 112673a6bab5SEric Dumazet if (flags & TCPF_TSQ_DEFERRED) { 112773a6bab5SEric Dumazet tcp_tsq_write(sk); 112873a6bab5SEric Dumazet __sock_put(sk); 112973a6bab5SEric Dumazet } 1130c3f9b018SEric Dumazet 113140fc3423SEric Dumazet if (flags & TCPF_WRITE_TIMER_DEFERRED) { 11326f458dfbSEric Dumazet tcp_write_timer_handler(sk); 1133144d56e9SEric Dumazet __sock_put(sk); 1134144d56e9SEric Dumazet } 113540fc3423SEric Dumazet if (flags & TCPF_DELACK_TIMER_DEFERRED) { 11366f458dfbSEric Dumazet tcp_delack_timer_handler(sk); 1137144d56e9SEric Dumazet __sock_put(sk); 1138144d56e9SEric Dumazet } 113940fc3423SEric Dumazet if (flags & TCPF_MTU_REDUCED_DEFERRED) { 11404fab9071SNeal Cardwell inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); 1141144d56e9SEric Dumazet __sock_put(sk); 1142144d56e9SEric Dumazet } 1143133c4c0dSEric Dumazet if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk)) 1144133c4c0dSEric Dumazet tcp_send_ack(sk); 114546d3ceabSEric Dumazet } 114646d3ceabSEric Dumazet EXPORT_SYMBOL(tcp_release_cb); 114746d3ceabSEric Dumazet 114846d3ceabSEric Dumazet void __init tcp_tasklet_init(void) 114946d3ceabSEric Dumazet { 115046d3ceabSEric Dumazet int i; 115146d3ceabSEric Dumazet 115246d3ceabSEric Dumazet for_each_possible_cpu(i) { 115346d3ceabSEric Dumazet struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 115446d3ceabSEric Dumazet 115546d3ceabSEric Dumazet INIT_LIST_HEAD(&tsq->head); 1156c6533ca8SAllen Pais tasklet_setup(&tsq->tasklet, tcp_tasklet_func); 115746d3ceabSEric Dumazet } 115846d3ceabSEric Dumazet } 115946d3ceabSEric Dumazet 116046d3ceabSEric Dumazet /* 116146d3ceabSEric Dumazet * Write buffer destructor automatically called from kfree_skb. 11628e3bff96Sstephen hemminger * We can't xmit new skbs from this context, as we might already 116346d3ceabSEric Dumazet * hold qdisc lock. 116446d3ceabSEric Dumazet */ 1165d6a4a104SEric Dumazet void tcp_wfree(struct sk_buff *skb) 116646d3ceabSEric Dumazet { 116746d3ceabSEric Dumazet struct sock *sk = skb->sk; 116846d3ceabSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 1169408f0a6cSEric Dumazet unsigned long flags, nval, oval; 1170b548b17aSEric Dumazet struct tsq_tasklet *tsq; 1171b548b17aSEric Dumazet bool empty; 11729b462d02SEric Dumazet 11739b462d02SEric Dumazet /* Keep one reference on sk_wmem_alloc. 11749b462d02SEric Dumazet * Will be released by sk_free() from here or tcp_tasklet_func() 11759b462d02SEric Dumazet */ 117614afee4bSReshetova, Elena WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); 11779b462d02SEric Dumazet 11789b462d02SEric Dumazet /* If this softirq is serviced by ksoftirqd, we are likely under stress. 11799b462d02SEric Dumazet * Wait until our queues (qdisc + devices) are drained. 11809b462d02SEric Dumazet * This gives : 11819b462d02SEric Dumazet * - less callbacks to tcp_write_xmit(), reducing stress (batches) 11829b462d02SEric Dumazet * - chance for incoming ACK (processed by another cpu maybe) 11839b462d02SEric Dumazet * to migrate this flow (skb->ooo_okay will be eventually set) 11849b462d02SEric Dumazet */ 118514afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 11869b462d02SEric Dumazet goto out; 118746d3ceabSEric Dumazet 1188b548b17aSEric Dumazet oval = smp_load_acquire(&sk->sk_tsq_flags); 1189b548b17aSEric Dumazet do { 1190408f0a6cSEric Dumazet if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) 1191408f0a6cSEric Dumazet goto out; 1192408f0a6cSEric Dumazet 119373a6bab5SEric Dumazet nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; 1194b548b17aSEric Dumazet } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); 1195408f0a6cSEric Dumazet 119646d3ceabSEric Dumazet /* queue this socket to tasklet queue */ 119746d3ceabSEric Dumazet local_irq_save(flags); 1198903ceff7SChristoph Lameter tsq = this_cpu_ptr(&tsq_tasklet); 1199a9b204d1SEric Dumazet empty = list_empty(&tsq->head); 120046d3ceabSEric Dumazet list_add(&tp->tsq_node, &tsq->head); 1201a9b204d1SEric Dumazet if (empty) 120246d3ceabSEric Dumazet tasklet_schedule(&tsq->tasklet); 120346d3ceabSEric Dumazet local_irq_restore(flags); 12049b462d02SEric Dumazet return; 12059b462d02SEric Dumazet out: 12069b462d02SEric Dumazet sk_free(sk); 120746d3ceabSEric Dumazet } 120846d3ceabSEric Dumazet 120973a6bab5SEric Dumazet /* Note: Called under soft irq. 121073a6bab5SEric Dumazet * We can call TCP stack right away, unless socket is owned by user. 1211218af599SEric Dumazet */ 1212218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) 1213218af599SEric Dumazet { 1214218af599SEric Dumazet struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); 1215218af599SEric Dumazet struct sock *sk = (struct sock *)tp; 1216218af599SEric Dumazet 121773a6bab5SEric Dumazet tcp_tsq_handler(sk); 121873a6bab5SEric Dumazet sock_put(sk); 1219218af599SEric Dumazet 1220218af599SEric Dumazet return HRTIMER_NORESTART; 1221218af599SEric Dumazet } 1222218af599SEric Dumazet 1223a7a25630SEric Dumazet static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, 1224a7a25630SEric Dumazet u64 prior_wstamp) 1225e2080072SEric Dumazet { 1226ab408b6dSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 1227ab408b6dSEric Dumazet 1228ab408b6dSEric Dumazet if (sk->sk_pacing_status != SK_PACING_NONE) { 122928b24f90SEric Dumazet unsigned long rate = READ_ONCE(sk->sk_pacing_rate); 1230ab408b6dSEric Dumazet 1231ab408b6dSEric Dumazet /* Original sch_fq does not pace first 10 MSS 1232ab408b6dSEric Dumazet * Note that tp->data_segs_out overflows after 2^32 packets, 1233ab408b6dSEric Dumazet * this is a minor annoyance. 1234ab408b6dSEric Dumazet */ 123576a9ebe8SEric Dumazet if (rate != ~0UL && rate && tp->data_segs_out >= 10) { 1236a7a25630SEric Dumazet u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); 1237a7a25630SEric Dumazet u64 credit = tp->tcp_wstamp_ns - prior_wstamp; 1238a7a25630SEric Dumazet 1239a7a25630SEric Dumazet /* take into account OS jitter */ 1240a7a25630SEric Dumazet len_ns -= min_t(u64, len_ns / 2, credit); 1241a7a25630SEric Dumazet tp->tcp_wstamp_ns += len_ns; 1242ab408b6dSEric Dumazet } 1243ab408b6dSEric Dumazet } 1244e2080072SEric Dumazet list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 1245e2080072SEric Dumazet } 1246e2080072SEric Dumazet 124705e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); 124805e22e83SEric Dumazet INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); 1249dd2e0b86SEric Dumazet INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); 125005e22e83SEric Dumazet 12511da177e4SLinus Torvalds /* This routine actually transmits TCP packets queued in by 12521da177e4SLinus Torvalds * tcp_do_sendmsg(). This is used by both the initial 12531da177e4SLinus Torvalds * transmission and possible later retransmissions. 12541da177e4SLinus Torvalds * All SKB's seen here are completely headerless. It is our 12551da177e4SLinus Torvalds * job to build the TCP header, and pass the packet down to 12561da177e4SLinus Torvalds * IP so it can do the same plus pass the packet off to the 12571da177e4SLinus Torvalds * device. 12581da177e4SLinus Torvalds * 12591da177e4SLinus Torvalds * We are working here with either a clone of the original 12601da177e4SLinus Torvalds * SKB, or a fresh unique copy made by the retransmit engine. 12611da177e4SLinus Torvalds */ 12622987babbSYuchung Cheng static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, 12632987babbSYuchung Cheng int clone_it, gfp_t gfp_mask, u32 rcv_nxt) 12641da177e4SLinus Torvalds { 12656687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1266dfb4b9dcSDavid S. Miller struct inet_sock *inet; 1267dfb4b9dcSDavid S. Miller struct tcp_sock *tp; 1268dfb4b9dcSDavid S. Miller struct tcp_skb_cb *tcb; 126933ad798cSAdam Langley struct tcp_out_options opts; 127095c96174SEric Dumazet unsigned int tcp_options_size, tcp_header_size; 12718c72c65bSEric Dumazet struct sk_buff *oskb = NULL; 12721e03d32bSDmitry Safonov struct tcp_key key; 12731da177e4SLinus Torvalds struct tcphdr *th; 1274a7a25630SEric Dumazet u64 prior_wstamp; 12751da177e4SLinus Torvalds int err; 12761da177e4SLinus Torvalds 1277dfb4b9dcSDavid S. Miller BUG_ON(!skb || !tcp_skb_pcount(skb)); 12786f094b9eSLawrence Brakmo tp = tcp_sk(sk); 12797f12422cSYuchung Cheng prior_wstamp = tp->tcp_wstamp_ns; 12807f12422cSYuchung Cheng tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); 1281a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); 1282ccdbb6e9SEric Dumazet if (clone_it) { 12838c72c65bSEric Dumazet oskb = skb; 1284e2080072SEric Dumazet 1285e2080072SEric Dumazet tcp_skb_tsorted_save(oskb) { 1286e2080072SEric Dumazet if (unlikely(skb_cloned(oskb))) 1287e2080072SEric Dumazet skb = pskb_copy(oskb, gfp_mask); 1288dfb4b9dcSDavid S. Miller else 1289e2080072SEric Dumazet skb = skb_clone(oskb, gfp_mask); 1290e2080072SEric Dumazet } tcp_skb_tsorted_restore(oskb); 1291e2080072SEric Dumazet 1292dfb4b9dcSDavid S. Miller if (unlikely(!skb)) 1293dfb4b9dcSDavid S. Miller return -ENOBUFS; 1294b738a185SEric Dumazet /* retransmit skbs might have a non zero value in skb->dev 1295b738a185SEric Dumazet * because skb->dev is aliased with skb->rbnode.rb_left 1296b738a185SEric Dumazet */ 1297b738a185SEric Dumazet skb->dev = NULL; 1298dfb4b9dcSDavid S. Miller } 12995f6188a8SEric Dumazet 1300dfb4b9dcSDavid S. Miller inet = inet_sk(sk); 1301dfb4b9dcSDavid S. Miller tcb = TCP_SKB_CB(skb); 130233ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 13031da177e4SLinus Torvalds 13041e03d32bSDmitry Safonov tcp_get_current_key(sk, &key); 1305051ba674SEric Dumazet if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 13061e03d32bSDmitry Safonov tcp_options_size = tcp_syn_options(sk, skb, &opts, &key); 1307051ba674SEric Dumazet } else { 13081e03d32bSDmitry Safonov tcp_options_size = tcp_established_options(sk, skb, &opts, &key); 1309051ba674SEric Dumazet /* Force a PSH flag on all (GSO) packets to expedite GRO flush 1310051ba674SEric Dumazet * at receiver : This slightly improve GRO performance. 1311051ba674SEric Dumazet * Note that we do not force the PSH flag for non GSO packets, 1312051ba674SEric Dumazet * because they might be sent under high congestion events, 1313051ba674SEric Dumazet * and in this case it is better to delay the delivery of 1-MSS 1314051ba674SEric Dumazet * packets and thus the corresponding ACK packet that would 1315051ba674SEric Dumazet * release the following packet. 1316051ba674SEric Dumazet */ 1317051ba674SEric Dumazet if (tcp_skb_pcount(skb) > 1) 1318051ba674SEric Dumazet tcb->tcp_flags |= TCPHDR_PSH; 1319051ba674SEric Dumazet } 132033ad798cSAdam Langley tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 13211da177e4SLinus Torvalds 1322726e9e8bSEric Dumazet /* We set skb->ooo_okay to one if this packet can select 1323726e9e8bSEric Dumazet * a different TX queue than prior packets of this flow, 1324726e9e8bSEric Dumazet * to avoid self inflicted reorders. 1325726e9e8bSEric Dumazet * The 'other' queue decision is based on current cpu number 1326726e9e8bSEric Dumazet * if XPS is enabled, or sk->sk_txhash otherwise. 1327726e9e8bSEric Dumazet * We can switch to another (and better) queue if: 1328726e9e8bSEric Dumazet * 1) No packet with payload is in qdisc/device queues. 1329726e9e8bSEric Dumazet * Delays in TX completion can defeat the test 1330726e9e8bSEric Dumazet * even if packets were already sent. 1331726e9e8bSEric Dumazet * 2) Or rtx queue is empty. 1332726e9e8bSEric Dumazet * This mitigates above case if ACK packets for 1333726e9e8bSEric Dumazet * all prior packets were already processed. 1334547669d4SEric Dumazet */ 1335726e9e8bSEric Dumazet skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || 1336726e9e8bSEric Dumazet tcp_rtx_queue_empty(sk); 13371da177e4SLinus Torvalds 133838ab52e8SEric Dumazet /* If we had to use memory reserve to allocate this skb, 133938ab52e8SEric Dumazet * this might cause drops if packet is looped back : 134038ab52e8SEric Dumazet * Other socket might not have SOCK_MEMALLOC. 134138ab52e8SEric Dumazet * Packets not looped back do not care about pfmemalloc. 134238ab52e8SEric Dumazet */ 134338ab52e8SEric Dumazet skb->pfmemalloc = 0; 134438ab52e8SEric Dumazet 1345aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 1346aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 134746d3ceabSEric Dumazet 134846d3ceabSEric Dumazet skb_orphan(skb); 134946d3ceabSEric Dumazet skb->sk = sk; 13501d2077acSEric Dumazet skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 135114afee4bSReshetova, Elena refcount_add(skb->truesize, &sk->sk_wmem_alloc); 13521da177e4SLinus Torvalds 1353eb44ad4eSEric Dumazet skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); 1354c3a2e837SJulian Anastasov 13551da177e4SLinus Torvalds /* Build TCP header and checksum it. */ 1356ea1627c2SEric Dumazet th = (struct tcphdr *)skb->data; 1357c720c7e8SEric Dumazet th->source = inet->inet_sport; 1358c720c7e8SEric Dumazet th->dest = inet->inet_dport; 13591da177e4SLinus Torvalds th->seq = htonl(tcb->seq); 13602987babbSYuchung Cheng th->ack_seq = htonl(rcv_nxt); 1361df7a3b07SAl Viro *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 13624de075e0SEric Dumazet tcb->tcp_flags); 1363dfb4b9dcSDavid S. Miller 13641da177e4SLinus Torvalds th->check = 0; 13651da177e4SLinus Torvalds th->urg_ptr = 0; 13661da177e4SLinus Torvalds 136733f5f57eSIlpo Järvinen /* The urg_mode check is necessary during a below snd_una win probe */ 13687691367dSHerbert Xu if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 13697691367dSHerbert Xu if (before(tp->snd_up, tcb->seq + 0x10000)) { 13701da177e4SLinus Torvalds th->urg_ptr = htons(tp->snd_up - tcb->seq); 13711da177e4SLinus Torvalds th->urg = 1; 13727691367dSHerbert Xu } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 13730eae88f3SEric Dumazet th->urg_ptr = htons(0xFFFF); 13747691367dSHerbert Xu th->urg = 1; 13757691367dSHerbert Xu } 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 137851466a75SEric Dumazet skb_shinfo(skb)->gso_type = sk->sk_gso_type; 1379ea1627c2SEric Dumazet if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { 1380ea1627c2SEric Dumazet th->window = htons(tcp_select_window(sk)); 1381ea1627c2SEric Dumazet tcp_ecn_send(sk, skb, th, tcp_header_size); 1382ea1627c2SEric Dumazet } else { 1383ea1627c2SEric Dumazet /* RFC1323: The window in SYN & SYN/ACK segments 1384ea1627c2SEric Dumazet * is never scaled. 1385ea1627c2SEric Dumazet */ 1386ea1627c2SEric Dumazet th->window = htons(min(tp->rcv_wnd, 65535U)); 1387ea1627c2SEric Dumazet } 1388fa3fe2b1SFlorian Westphal 13891e03d32bSDmitry Safonov tcp_options_write(th, tp, &opts, &key); 1390fa3fe2b1SFlorian Westphal 13911e03d32bSDmitry Safonov if (tcp_key_is_md5(&key)) { 1392cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 1393cfb6eeb4SYOSHIFUJI Hideaki /* Calculate the MD5 hash, as we have all we need now */ 1394aba54656SEric Dumazet sk_gso_disable(sk); 1395bd0388aeSWilliam Allen Simpson tp->af_specific->calc_md5_hash(opts.hash_location, 13961e03d32bSDmitry Safonov key.md5_key, sk, skb); 1397cfb6eeb4SYOSHIFUJI Hideaki #endif 13981e03d32bSDmitry Safonov } else if (tcp_key_is_ao(&key)) { 13991e03d32bSDmitry Safonov int err; 14001e03d32bSDmitry Safonov 14011e03d32bSDmitry Safonov err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th, 14021e03d32bSDmitry Safonov opts.hash_location); 14031e03d32bSDmitry Safonov if (err) { 14041e03d32bSDmitry Safonov kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); 14051e03d32bSDmitry Safonov return -ENOMEM; 14061e03d32bSDmitry Safonov } 14071e03d32bSDmitry Safonov } 1408cfb6eeb4SYOSHIFUJI Hideaki 1409331fca43SMartin KaFai Lau /* BPF prog is the last one writing header option */ 1410331fca43SMartin KaFai Lau bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts); 1411331fca43SMartin KaFai Lau 1412dd2e0b86SEric Dumazet INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, 1413dd2e0b86SEric Dumazet tcp_v6_send_check, tcp_v4_send_check, 1414dd2e0b86SEric Dumazet sk, skb); 14151da177e4SLinus Torvalds 14164de075e0SEric Dumazet if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1417059217c1SNeal Cardwell tcp_event_ack_sent(sk, rcv_nxt); 14181da177e4SLinus Torvalds 1419a44d6eacSMartin KaFai Lau if (skb->len != tcp_header_size) { 1420cf533ea5SEric Dumazet tcp_event_data_sent(tp, sk); 1421a44d6eacSMartin KaFai Lau tp->data_segs_out += tcp_skb_pcount(skb); 1422ba113c3aSWei Wang tp->bytes_sent += skb->len - tcp_header_size; 1423a44d6eacSMartin KaFai Lau } 14241da177e4SLinus Torvalds 1425bd37a088SWei Yongjun if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 1426aa2ea058STom Herbert TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 1427aa2ea058STom Herbert tcp_skb_pcount(skb)); 14281da177e4SLinus Torvalds 14292efd055cSMarcelo Ricardo Leitner tp->segs_out += tcp_skb_pcount(skb); 14300ae5b43dSYuchung Cheng skb_set_hash_from_sk(skb, sk); 1431f69ad292SEric Dumazet /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ 1432cd7d8498SEric Dumazet skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); 1433f69ad292SEric Dumazet skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1434cd7d8498SEric Dumazet 1435d3edd06eSEric Dumazet /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ 1436971f10ecSEric Dumazet 1437971f10ecSEric Dumazet /* Cleanup our debris for IP stacks */ 1438971f10ecSEric Dumazet memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1439971f10ecSEric Dumazet sizeof(struct inet6_skb_parm))); 1440971f10ecSEric Dumazet 1441a842fe14SEric Dumazet tcp_add_tx_delay(skb, tp); 1442a842fe14SEric Dumazet 144305e22e83SEric Dumazet err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, 144405e22e83SEric Dumazet inet6_csk_xmit, ip_queue_xmit, 144505e22e83SEric Dumazet sk, skb, &inet->cork.fl); 14467faee5c0SEric Dumazet 14478c72c65bSEric Dumazet if (unlikely(err > 0)) { 14485ee2c941SChristoph Paasch tcp_enter_cwr(sk); 14498c72c65bSEric Dumazet err = net_xmit_eval(err); 14508c72c65bSEric Dumazet } 1451fc225799SEric Dumazet if (!err && oskb) { 1452a7a25630SEric Dumazet tcp_update_skb_after_send(sk, oskb, prior_wstamp); 1453fc225799SEric Dumazet tcp_rate_skb_sent(sk, oskb); 1454fc225799SEric Dumazet } 14558c72c65bSEric Dumazet return err; 14561da177e4SLinus Torvalds } 14571da177e4SLinus Torvalds 14582987babbSYuchung Cheng static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 14592987babbSYuchung Cheng gfp_t gfp_mask) 14602987babbSYuchung Cheng { 14612987babbSYuchung Cheng return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, 14622987babbSYuchung Cheng tcp_sk(sk)->rcv_nxt); 14632987babbSYuchung Cheng } 14642987babbSYuchung Cheng 146567edfef7SAndi Kleen /* This routine just queues the buffer for sending. 14661da177e4SLinus Torvalds * 14671da177e4SLinus Torvalds * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 14681da177e4SLinus Torvalds * otherwise socket can stall. 14691da177e4SLinus Torvalds */ 14701da177e4SLinus Torvalds static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 14711da177e4SLinus Torvalds { 14721da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 14731da177e4SLinus Torvalds 14741da177e4SLinus Torvalds /* Advance write_seq and place onto the write_queue. */ 14750f317464SEric Dumazet WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); 1476f4a775d1SEric Dumazet __skb_header_release(skb); 1477fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 1478ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 14793ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 14801da177e4SLinus Torvalds } 14811da177e4SLinus Torvalds 148267edfef7SAndi Kleen /* Initialize TSO segments for a packet. */ 14835bbb432cSEric Dumazet static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1484f6302d1dSDavid S. Miller { 14854a64fd6cSEric Dumazet if (skb->len <= mss_now) { 1486f6302d1dSDavid S. Miller /* Avoid the costly divide in the normal 1487f6302d1dSDavid S. Miller * non-TSO case. 1488f6302d1dSDavid S. Miller */ 1489cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 1); 1490f69ad292SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_size = 0; 1491f6302d1dSDavid S. Miller } else { 1492cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); 1493f69ad292SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_size = mss_now; 14941da177e4SLinus Torvalds } 14951da177e4SLinus Torvalds } 14961da177e4SLinus Torvalds 1497797108d1SIlpo Järvinen /* Pcount in the middle of the write queue got changed, we need to do various 1498797108d1SIlpo Järvinen * tweaks to fix counters 1499797108d1SIlpo Järvinen */ 1500cf533ea5SEric Dumazet static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1501797108d1SIlpo Järvinen { 1502797108d1SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1503797108d1SIlpo Järvinen 1504797108d1SIlpo Järvinen tp->packets_out -= decr; 1505797108d1SIlpo Järvinen 1506797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1507797108d1SIlpo Järvinen tp->sacked_out -= decr; 1508797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1509797108d1SIlpo Järvinen tp->retrans_out -= decr; 1510797108d1SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1511797108d1SIlpo Järvinen tp->lost_out -= decr; 1512797108d1SIlpo Järvinen 1513797108d1SIlpo Järvinen /* Reno case is special. Sigh... */ 1514797108d1SIlpo Järvinen if (tcp_is_reno(tp) && decr > 0) 1515797108d1SIlpo Järvinen tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1516797108d1SIlpo Järvinen 1517797108d1SIlpo Järvinen if (tp->lost_skb_hint && 1518797108d1SIlpo Järvinen before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 1519713bafeaSYuchung Cheng (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 1520797108d1SIlpo Järvinen tp->lost_cnt_hint -= decr; 1521797108d1SIlpo Järvinen 1522797108d1SIlpo Järvinen tcp_verify_left_out(tp); 1523797108d1SIlpo Järvinen } 1524797108d1SIlpo Järvinen 15250a2cf20cSSoheil Hassas Yeganeh static bool tcp_has_tx_tstamp(const struct sk_buff *skb) 15260a2cf20cSSoheil Hassas Yeganeh { 15270a2cf20cSSoheil Hassas Yeganeh return TCP_SKB_CB(skb)->txstamp_ack || 15280a2cf20cSSoheil Hassas Yeganeh (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); 15290a2cf20cSSoheil Hassas Yeganeh } 15300a2cf20cSSoheil Hassas Yeganeh 1531490cc7d0SWillem de Bruijn static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) 1532490cc7d0SWillem de Bruijn { 1533490cc7d0SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 1534490cc7d0SWillem de Bruijn 15350a2cf20cSSoheil Hassas Yeganeh if (unlikely(tcp_has_tx_tstamp(skb)) && 1536490cc7d0SWillem de Bruijn !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { 1537490cc7d0SWillem de Bruijn struct skb_shared_info *shinfo2 = skb_shinfo(skb2); 1538490cc7d0SWillem de Bruijn u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; 1539490cc7d0SWillem de Bruijn 1540490cc7d0SWillem de Bruijn shinfo->tx_flags &= ~tsflags; 1541490cc7d0SWillem de Bruijn shinfo2->tx_flags |= tsflags; 1542490cc7d0SWillem de Bruijn swap(shinfo->tskey, shinfo2->tskey); 1543b51e13faSMartin KaFai Lau TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; 1544b51e13faSMartin KaFai Lau TCP_SKB_CB(skb)->txstamp_ack = 0; 1545490cc7d0SWillem de Bruijn } 1546490cc7d0SWillem de Bruijn } 1547490cc7d0SWillem de Bruijn 1548a166140eSMartin KaFai Lau static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) 1549a166140eSMartin KaFai Lau { 1550a166140eSMartin KaFai Lau TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; 1551a166140eSMartin KaFai Lau TCP_SKB_CB(skb)->eor = 0; 1552a166140eSMartin KaFai Lau } 1553a166140eSMartin KaFai Lau 155475c119afSEric Dumazet /* Insert buff after skb on the write or rtx queue of sk. */ 155575c119afSEric Dumazet static void tcp_insert_write_queue_after(struct sk_buff *skb, 155675c119afSEric Dumazet struct sk_buff *buff, 155775c119afSEric Dumazet struct sock *sk, 155875c119afSEric Dumazet enum tcp_queue tcp_queue) 155975c119afSEric Dumazet { 156075c119afSEric Dumazet if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE) 156175c119afSEric Dumazet __skb_queue_after(&sk->sk_write_queue, skb, buff); 156275c119afSEric Dumazet else 156375c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); 156475c119afSEric Dumazet } 156575c119afSEric Dumazet 15661da177e4SLinus Torvalds /* Function to create two new TCP segments. Shrinks the given segment 15671da177e4SLinus Torvalds * to the specified size and appends a new segment with the rest of the 15681da177e4SLinus Torvalds * packet to the list. This won't be called frequently, I hope. 15691da177e4SLinus Torvalds * Remember, these are still headerless SKBs at this point. 15701da177e4SLinus Torvalds */ 157175c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 157275c119afSEric Dumazet struct sk_buff *skb, u32 len, 15736cc55e09SOctavian Purdila unsigned int mss_now, gfp_t gfp) 15741da177e4SLinus Torvalds { 15751da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15761da177e4SLinus Torvalds struct sk_buff *buff; 1577b4a24397SEric Dumazet int old_factor; 1578b617158dSEric Dumazet long limit; 1579b60b49eaSHerbert Xu int nlen; 15809ce01461SIlpo Järvinen u8 flags; 15811da177e4SLinus Torvalds 15822fceec13SIlpo Järvinen if (WARN_ON(len > skb->len)) 15832fceec13SIlpo Järvinen return -EINVAL; 15846a438bbeSStephen Hemminger 1585b4a24397SEric Dumazet DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); 15861da177e4SLinus Torvalds 1587b617158dSEric Dumazet /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. 1588b617158dSEric Dumazet * We need some allowance to not penalize applications setting small 1589b617158dSEric Dumazet * SO_SNDBUF values. 1590b617158dSEric Dumazet * Also allow first and last skb in retransmit queue to be split. 1591b617158dSEric Dumazet */ 15927c4e983cSAlexander Duyck limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); 1593b617158dSEric Dumazet if (unlikely((sk->sk_wmem_queued >> 1) > limit && 1594b617158dSEric Dumazet tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && 1595b617158dSEric Dumazet skb != tcp_rtx_queue_head(sk) && 1596b617158dSEric Dumazet skb != tcp_rtx_queue_tail(sk))) { 1597f070ef2aSEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); 1598f070ef2aSEric Dumazet return -ENOMEM; 1599f070ef2aSEric Dumazet } 1600f070ef2aSEric Dumazet 1601c4777efaSEric Dumazet if (skb_unclone_keeptruesize(skb, gfp)) 16021da177e4SLinus Torvalds return -ENOMEM; 16031da177e4SLinus Torvalds 16041da177e4SLinus Torvalds /* Get a new skb... force flag on. */ 16055882efffSEric Dumazet buff = tcp_stream_alloc_skb(sk, gfp, true); 160651456b29SIan Morris if (!buff) 16071da177e4SLinus Torvalds return -ENOMEM; /* We'll just try again later. */ 160841477662SJakub Kicinski skb_copy_decrypted(buff, skb); 16095a369ca6SPaolo Abeni mptcp_skb_ext_copy(buff, skb); 1610ef5cb973SHerbert Xu 1611ab4e846aSEric Dumazet sk_wmem_queued_add(sk, buff->truesize); 16123ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 1613b4a24397SEric Dumazet nlen = skb->len - len; 1614b60b49eaSHerbert Xu buff->truesize += nlen; 1615b60b49eaSHerbert Xu skb->truesize -= nlen; 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds /* Correct the sequence numbers. */ 16181da177e4SLinus Torvalds TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 16191da177e4SLinus Torvalds TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 16201da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 16211da177e4SLinus Torvalds 16221da177e4SLinus Torvalds /* PSH and FIN should only be set in the second packet. */ 16234de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 16244de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 16254de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 1626e14c3cafSHerbert Xu TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1627a166140eSMartin KaFai Lau tcp_skb_fragment_eor(skb, buff); 16281da177e4SLinus Torvalds 16291da177e4SLinus Torvalds skb_split(skb, buff, len); 16301da177e4SLinus Torvalds 1631a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(buff, skb->tstamp, true); 1632490cc7d0SWillem de Bruijn tcp_fragment_tstamp(skb, buff); 16331da177e4SLinus Torvalds 16346475be16SDavid S. Miller old_factor = tcp_skb_pcount(skb); 16356475be16SDavid S. Miller 16361da177e4SLinus Torvalds /* Fix up tso_factor for both original and new SKB. */ 16375bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 16385bbb432cSEric Dumazet tcp_set_skb_tso_segs(buff, mss_now); 16391da177e4SLinus Torvalds 1640b9f64820SYuchung Cheng /* Update delivered info for the new segment */ 1641b9f64820SYuchung Cheng TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; 1642b9f64820SYuchung Cheng 16436475be16SDavid S. Miller /* If this packet has been sent out already, we must 16446475be16SDavid S. Miller * adjust the various packet counters. 16456475be16SDavid S. Miller */ 1646cf0b450cSHerbert Xu if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 16476475be16SDavid S. Miller int diff = old_factor - tcp_skb_pcount(skb) - 16486475be16SDavid S. Miller tcp_skb_pcount(buff); 16491da177e4SLinus Torvalds 1650797108d1SIlpo Järvinen if (diff) 1651797108d1SIlpo Järvinen tcp_adjust_pcount(sk, skb, diff); 16521da177e4SLinus Torvalds } 16531da177e4SLinus Torvalds 16541da177e4SLinus Torvalds /* Link BUFF into the send queue. */ 1655f4a775d1SEric Dumazet __skb_header_release(buff); 165675c119afSEric Dumazet tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); 1657f67971e6SEric Dumazet if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE) 1658e2080072SEric Dumazet list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); 16591da177e4SLinus Torvalds 16601da177e4SLinus Torvalds return 0; 16611da177e4SLinus Torvalds } 16621da177e4SLinus Torvalds 1663f4d01666SEric Dumazet /* This is similar to __pskb_pull_tail(). The difference is that pulled 1664f4d01666SEric Dumazet * data is not copied, but immediately discarded. 16651da177e4SLinus Torvalds */ 16667162fb24SEric Dumazet static int __pskb_trim_head(struct sk_buff *skb, int len) 16671da177e4SLinus Torvalds { 16687b7fc97aSEric Dumazet struct skb_shared_info *shinfo; 16691da177e4SLinus Torvalds int i, k, eat; 16701da177e4SLinus Torvalds 1671b4a24397SEric Dumazet DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); 16721da177e4SLinus Torvalds eat = len; 16731da177e4SLinus Torvalds k = 0; 16747b7fc97aSEric Dumazet shinfo = skb_shinfo(skb); 16757b7fc97aSEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) { 16767b7fc97aSEric Dumazet int size = skb_frag_size(&shinfo->frags[i]); 16779e903e08SEric Dumazet 16789e903e08SEric Dumazet if (size <= eat) { 1679aff65da0SIan Campbell skb_frag_unref(skb, i); 16809e903e08SEric Dumazet eat -= size; 16811da177e4SLinus Torvalds } else { 16827b7fc97aSEric Dumazet shinfo->frags[k] = shinfo->frags[i]; 16831da177e4SLinus Torvalds if (eat) { 1684b54c9d5bSJonathan Lemon skb_frag_off_add(&shinfo->frags[k], eat); 16857b7fc97aSEric Dumazet skb_frag_size_sub(&shinfo->frags[k], eat); 16861da177e4SLinus Torvalds eat = 0; 16871da177e4SLinus Torvalds } 16881da177e4SLinus Torvalds k++; 16891da177e4SLinus Torvalds } 16901da177e4SLinus Torvalds } 16917b7fc97aSEric Dumazet shinfo->nr_frags = k; 16921da177e4SLinus Torvalds 16931da177e4SLinus Torvalds skb->data_len -= len; 16941da177e4SLinus Torvalds skb->len = skb->data_len; 16957162fb24SEric Dumazet return len; 16961da177e4SLinus Torvalds } 16971da177e4SLinus Torvalds 169867edfef7SAndi Kleen /* Remove acked data from a packet in the transmit queue. */ 16991da177e4SLinus Torvalds int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 17001da177e4SLinus Torvalds { 17017162fb24SEric Dumazet u32 delta_truesize; 17027162fb24SEric Dumazet 1703c4777efaSEric Dumazet if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) 17041da177e4SLinus Torvalds return -ENOMEM; 17051da177e4SLinus Torvalds 17067162fb24SEric Dumazet delta_truesize = __pskb_trim_head(skb, len); 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq += len; 17091da177e4SLinus Torvalds 17107162fb24SEric Dumazet skb->truesize -= delta_truesize; 1711ab4e846aSEric Dumazet sk_wmem_queued_add(sk, -delta_truesize); 17129b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) 17137162fb24SEric Dumazet sk_mem_uncharge(sk, delta_truesize); 17141da177e4SLinus Torvalds 17155b35e1e6SNeal Cardwell /* Any change of skb->len requires recalculation of tso factor. */ 17161da177e4SLinus Torvalds if (tcp_skb_pcount(skb) > 1) 17175bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); 17181da177e4SLinus Torvalds 17191da177e4SLinus Torvalds return 0; 17201da177e4SLinus Torvalds } 17211da177e4SLinus Torvalds 17221b63edd6SYuchung Cheng /* Calculate MSS not accounting any TCP options. */ 17231b63edd6SYuchung Cheng static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 17245d424d5aSJohn Heffner { 1725cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1726cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 17275d424d5aSJohn Heffner int mss_now; 17285d424d5aSJohn Heffner 17295d424d5aSJohn Heffner /* Calculate base mss without TCP options: 17305d424d5aSJohn Heffner It is MMS_S - sizeof(tcphdr) of rfc1122 17315d424d5aSJohn Heffner */ 17325d424d5aSJohn Heffner mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 17335d424d5aSJohn Heffner 17345d424d5aSJohn Heffner /* Clamp it (mss_clamp does not include tcp options) */ 17355d424d5aSJohn Heffner if (mss_now > tp->rx_opt.mss_clamp) 17365d424d5aSJohn Heffner mss_now = tp->rx_opt.mss_clamp; 17375d424d5aSJohn Heffner 17385d424d5aSJohn Heffner /* Now subtract optional transport overhead */ 17395d424d5aSJohn Heffner mss_now -= icsk->icsk_ext_hdr_len; 17405d424d5aSJohn Heffner 17415d424d5aSJohn Heffner /* Then reserve room for full set of TCP options and 8 bytes of data */ 174278eb166cSKuniyuki Iwashima mss_now = max(mss_now, 174378eb166cSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); 17445d424d5aSJohn Heffner return mss_now; 17455d424d5aSJohn Heffner } 17465d424d5aSJohn Heffner 17471b63edd6SYuchung Cheng /* Calculate MSS. Not accounting for SACKs here. */ 17481b63edd6SYuchung Cheng int tcp_mtu_to_mss(struct sock *sk, int pmtu) 17491b63edd6SYuchung Cheng { 17501b63edd6SYuchung Cheng /* Subtract TCP options size, not including SACKs */ 17511b63edd6SYuchung Cheng return __tcp_mtu_to_mss(sk, pmtu) - 17521b63edd6SYuchung Cheng (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 17531b63edd6SYuchung Cheng } 1754c7bb4b89SEric Dumazet EXPORT_SYMBOL(tcp_mtu_to_mss); 17551b63edd6SYuchung Cheng 17565d424d5aSJohn Heffner /* Inverse of above */ 175767469601SEric Dumazet int tcp_mss_to_mtu(struct sock *sk, int mss) 17585d424d5aSJohn Heffner { 1759cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1760cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 17615d424d5aSJohn Heffner 1762e57a3447SYan Zhai return mss + 17635d424d5aSJohn Heffner tp->tcp_header_len + 17645d424d5aSJohn Heffner icsk->icsk_ext_hdr_len + 17655d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 17665d424d5aSJohn Heffner } 1767556c6b46SNeal Cardwell EXPORT_SYMBOL(tcp_mss_to_mtu); 17685d424d5aSJohn Heffner 176967edfef7SAndi Kleen /* MTU probing init per socket */ 17705d424d5aSJohn Heffner void tcp_mtup_init(struct sock *sk) 17715d424d5aSJohn Heffner { 17725d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 17735d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 1774b0f9ca53SFan Du struct net *net = sock_net(sk); 17755d424d5aSJohn Heffner 1776f47d00e0SKuniyuki Iwashima icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; 17775d424d5aSJohn Heffner icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 17785d424d5aSJohn Heffner icsk->icsk_af_ops->net_header_len; 177988d78bc0SKuniyuki Iwashima icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); 17805d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 178105cbc0dbSFan Du if (icsk->icsk_mtup.enabled) 1782c74df29aSEric Dumazet icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 17835d424d5aSJohn Heffner } 17844bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_mtup_init); 17855d424d5aSJohn Heffner 17861da177e4SLinus Torvalds /* This function synchronize snd mss to current pmtu/exthdr set. 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 17891da177e4SLinus Torvalds for TCP options, but includes only bare TCP header. 17901da177e4SLinus Torvalds 17911da177e4SLinus Torvalds tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1792caa20d9aSStephen Hemminger It is minimum of user_mss and mss received with SYN. 17931da177e4SLinus Torvalds It also does not include TCP options. 17941da177e4SLinus Torvalds 1795d83d8461SArnaldo Carvalho de Melo inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 17961da177e4SLinus Torvalds 17971da177e4SLinus Torvalds tp->mss_cache is current effective sending mss, including 17981da177e4SLinus Torvalds all tcp options except for SACKs. It is evaluated, 17991da177e4SLinus Torvalds taking into account current pmtu, but never exceeds 18001da177e4SLinus Torvalds tp->rx_opt.mss_clamp. 18011da177e4SLinus Torvalds 18021da177e4SLinus Torvalds NOTE1. rfc1122 clearly states that advertised MSS 18031da177e4SLinus Torvalds DOES NOT include either tcp or ip options. 18041da177e4SLinus Torvalds 1805d83d8461SArnaldo Carvalho de Melo NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1806d83d8461SArnaldo Carvalho de Melo are READ ONLY outside this function. --ANK (980731) 18071da177e4SLinus Torvalds */ 18081da177e4SLinus Torvalds unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 18091da177e4SLinus Torvalds { 18101da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1811d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 18125d424d5aSJohn Heffner int mss_now; 18131da177e4SLinus Torvalds 18145d424d5aSJohn Heffner if (icsk->icsk_mtup.search_high > pmtu) 18155d424d5aSJohn Heffner icsk->icsk_mtup.search_high = pmtu; 18161da177e4SLinus Torvalds 18175d424d5aSJohn Heffner mss_now = tcp_mtu_to_mss(sk, pmtu); 1818409d22b4SIlpo Järvinen mss_now = tcp_bound_to_half_wnd(tp, mss_now); 18191da177e4SLinus Torvalds 18201da177e4SLinus Torvalds /* And store cached results */ 1821d83d8461SArnaldo Carvalho de Melo icsk->icsk_pmtu_cookie = pmtu; 18225d424d5aSJohn Heffner if (icsk->icsk_mtup.enabled) 18235d424d5aSJohn Heffner mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1824c1b4a7e6SDavid S. Miller tp->mss_cache = mss_now; 18251da177e4SLinus Torvalds 18261da177e4SLinus Torvalds return mss_now; 18271da177e4SLinus Torvalds } 18284bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sync_mss); 18291da177e4SLinus Torvalds 18301da177e4SLinus Torvalds /* Compute the current effective MSS, taking SACKs and IP options, 18311da177e4SLinus Torvalds * and even PMTU discovery events into account. 18321da177e4SLinus Torvalds */ 18330c54b85fSIlpo Järvinen unsigned int tcp_current_mss(struct sock *sk) 18341da177e4SLinus Torvalds { 1835cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1836cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 1837c1b4a7e6SDavid S. Miller u32 mss_now; 183895c96174SEric Dumazet unsigned int header_len; 183933ad798cSAdam Langley struct tcp_out_options opts; 18401e03d32bSDmitry Safonov struct tcp_key key; 18411da177e4SLinus Torvalds 1842c1b4a7e6SDavid S. Miller mss_now = tp->mss_cache; 1843c1b4a7e6SDavid S. Miller 18441da177e4SLinus Torvalds if (dst) { 18451da177e4SLinus Torvalds u32 mtu = dst_mtu(dst); 1846d83d8461SArnaldo Carvalho de Melo if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 18471da177e4SLinus Torvalds mss_now = tcp_sync_mss(sk, mtu); 18481da177e4SLinus Torvalds } 18491e03d32bSDmitry Safonov tcp_get_current_key(sk, &key); 18501e03d32bSDmitry Safonov header_len = tcp_established_options(sk, NULL, &opts, &key) + 185133ad798cSAdam Langley sizeof(struct tcphdr); 185233ad798cSAdam Langley /* The mss_cache is sized based on tp->tcp_header_len, which assumes 185333ad798cSAdam Langley * some common options. If this is an odd packet (because we have SACK 185433ad798cSAdam Langley * blocks etc) then our calculated header_len will be different, and 185533ad798cSAdam Langley * we have to adjust mss_now correspondingly */ 185633ad798cSAdam Langley if (header_len != tp->tcp_header_len) { 185733ad798cSAdam Langley int delta = (int) header_len - tp->tcp_header_len; 185833ad798cSAdam Langley mss_now -= delta; 185933ad798cSAdam Langley } 1860cfb6eeb4SYOSHIFUJI Hideaki 18611da177e4SLinus Torvalds return mss_now; 18621da177e4SLinus Torvalds } 18631da177e4SLinus Torvalds 186486fd14adSWeiping Pan /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 186586fd14adSWeiping Pan * As additional protections, we do not touch cwnd in retransmission phases, 186686fd14adSWeiping Pan * and if application hit its sndbuf limit recently. 186786fd14adSWeiping Pan */ 186886fd14adSWeiping Pan static void tcp_cwnd_application_limited(struct sock *sk) 1869a762a980SDavid S. Miller { 18709e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 1871a762a980SDavid S. Miller 187286fd14adSWeiping Pan if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 187386fd14adSWeiping Pan sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 187486fd14adSWeiping Pan /* Limited by application or receiver window. */ 187586fd14adSWeiping Pan u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 187686fd14adSWeiping Pan u32 win_used = max(tp->snd_cwnd_used, init_win); 187740570375SEric Dumazet if (win_used < tcp_snd_cwnd(tp)) { 187886fd14adSWeiping Pan tp->snd_ssthresh = tcp_current_ssthresh(sk); 187940570375SEric Dumazet tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); 188086fd14adSWeiping Pan } 188186fd14adSWeiping Pan tp->snd_cwnd_used = 0; 188286fd14adSWeiping Pan } 1883c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 188486fd14adSWeiping Pan } 188586fd14adSWeiping Pan 1886ca8a2263SNeal Cardwell static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) 1887a762a980SDavid S. Miller { 18881b1fc3fdSWei Wang const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1889a762a980SDavid S. Miller struct tcp_sock *tp = tcp_sk(sk); 1890a762a980SDavid S. Miller 1891f4ce91ceSNeal Cardwell /* Track the strongest available signal of the degree to which the cwnd 1892f4ce91ceSNeal Cardwell * is fully utilized. If cwnd-limited then remember that fact for the 1893f4ce91ceSNeal Cardwell * current window. If not cwnd-limited then track the maximum number of 1894f4ce91ceSNeal Cardwell * outstanding packets in the current window. (If cwnd-limited then we 1895f4ce91ceSNeal Cardwell * chose to not update tp->max_packets_out to avoid an extra else 1896f4ce91ceSNeal Cardwell * clause with no functional impact.) 1897ca8a2263SNeal Cardwell */ 1898f4ce91ceSNeal Cardwell if (!before(tp->snd_una, tp->cwnd_usage_seq) || 1899f4ce91ceSNeal Cardwell is_cwnd_limited || 1900f4ce91ceSNeal Cardwell (!tp->is_cwnd_limited && 1901f4ce91ceSNeal Cardwell tp->packets_out > tp->max_packets_out)) { 1902ca8a2263SNeal Cardwell tp->is_cwnd_limited = is_cwnd_limited; 1903f4ce91ceSNeal Cardwell tp->max_packets_out = tp->packets_out; 1904f4ce91ceSNeal Cardwell tp->cwnd_usage_seq = tp->snd_nxt; 1905ca8a2263SNeal Cardwell } 1906e114a710SEric Dumazet 190724901551SEric Dumazet if (tcp_is_cwnd_limited(sk)) { 1908a762a980SDavid S. Miller /* Network is feed fully. */ 1909a762a980SDavid S. Miller tp->snd_cwnd_used = 0; 1910c2203cf7SEric Dumazet tp->snd_cwnd_stamp = tcp_jiffies32; 1911a762a980SDavid S. Miller } else { 1912a762a980SDavid S. Miller /* Network starves. */ 1913a762a980SDavid S. Miller if (tp->packets_out > tp->snd_cwnd_used) 1914a762a980SDavid S. Miller tp->snd_cwnd_used = tp->packets_out; 1915a762a980SDavid S. Miller 19164845b571SKuniyuki Iwashima if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && 1917c2203cf7SEric Dumazet (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && 19181b1fc3fdSWei Wang !ca_ops->cong_control) 1919a762a980SDavid S. Miller tcp_cwnd_application_limited(sk); 1920b0f71bd3SFrancis Yan 1921b0f71bd3SFrancis Yan /* The following conditions together indicate the starvation 1922b0f71bd3SFrancis Yan * is caused by insufficient sender buffer: 1923b0f71bd3SFrancis Yan * 1) just sent some data (see tcp_write_xmit) 1924b0f71bd3SFrancis Yan * 2) not cwnd limited (this else condition) 192575c119afSEric Dumazet * 3) no more data to send (tcp_write_queue_empty()) 1926b0f71bd3SFrancis Yan * 4) application is hitting buffer limit (SOCK_NOSPACE) 1927b0f71bd3SFrancis Yan */ 192875c119afSEric Dumazet if (tcp_write_queue_empty(sk) && sk->sk_socket && 1929b0f71bd3SFrancis Yan test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && 1930b0f71bd3SFrancis Yan (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 1931b0f71bd3SFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); 1932a762a980SDavid S. Miller } 1933a762a980SDavid S. Miller } 1934a762a980SDavid S. Miller 1935d4589926SEric Dumazet /* Minshall's variant of the Nagle send check. */ 1936d4589926SEric Dumazet static bool tcp_minshall_check(const struct tcp_sock *tp) 1937d4589926SEric Dumazet { 1938d4589926SEric Dumazet return after(tp->snd_sml, tp->snd_una) && 1939d4589926SEric Dumazet !after(tp->snd_sml, tp->snd_nxt); 1940d4589926SEric Dumazet } 1941d4589926SEric Dumazet 1942d4589926SEric Dumazet /* Update snd_sml if this skb is under mss 1943d4589926SEric Dumazet * Note that a TSO packet might end with a sub-mss segment 1944d4589926SEric Dumazet * The test is really : 1945d4589926SEric Dumazet * if ((skb->len % mss) != 0) 1946d4589926SEric Dumazet * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1947d4589926SEric Dumazet * But we can avoid doing the divide again given we already have 1948d4589926SEric Dumazet * skb_pcount = skb->len / mss_now 19490e3a4803SIlpo Järvinen */ 1950d4589926SEric Dumazet static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, 1951d4589926SEric Dumazet const struct sk_buff *skb) 1952d4589926SEric Dumazet { 1953d4589926SEric Dumazet if (skb->len < tcp_skb_pcount(skb) * mss_now) 1954d4589926SEric Dumazet tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1955d4589926SEric Dumazet } 1956d4589926SEric Dumazet 1957d4589926SEric Dumazet /* Return false, if packet can be sent now without violation Nagle's rules: 1958d4589926SEric Dumazet * 1. It is full sized. (provided by caller in %partial bool) 1959d4589926SEric Dumazet * 2. Or it contains FIN. (already checked by caller) 1960d4589926SEric Dumazet * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1961d4589926SEric Dumazet * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1962d4589926SEric Dumazet * With Minshall's modification: all sent small packets are ACKed. 1963d4589926SEric Dumazet */ 1964d4589926SEric Dumazet static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, 1965cc93fc51SPeter Pan(潘卫平) int nonagle) 1966d4589926SEric Dumazet { 1967d4589926SEric Dumazet return partial && 1968d4589926SEric Dumazet ((nonagle & TCP_NAGLE_CORK) || 1969d4589926SEric Dumazet (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1970d4589926SEric Dumazet } 1971605ad7f1SEric Dumazet 1972605ad7f1SEric Dumazet /* Return how many segs we'd like on a TSO packet, 197365466904SEric Dumazet * depending on current pacing rate, and how close the peer is. 197465466904SEric Dumazet * 197565466904SEric Dumazet * Rationale is: 197665466904SEric Dumazet * - For close peers, we rather send bigger packets to reduce 197765466904SEric Dumazet * cpu costs, because occasional losses will be repaired fast. 197865466904SEric Dumazet * - For long distance/rtt flows, we would like to get ACK clocking 197965466904SEric Dumazet * with 1 ACK per ms. 198065466904SEric Dumazet * 198165466904SEric Dumazet * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting 198265466904SEric Dumazet * in bigger TSO bursts. We we cut the RTT-based allowance in half 198365466904SEric Dumazet * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance 198465466904SEric Dumazet * is below 1500 bytes after 6 * ~500 usec = 3ms. 1985605ad7f1SEric Dumazet */ 1986dcb8c9b4SEric Dumazet static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, 19871b3878caSNeal Cardwell int min_tso_segs) 1988605ad7f1SEric Dumazet { 198965466904SEric Dumazet unsigned long bytes; 199065466904SEric Dumazet u32 r; 1991605ad7f1SEric Dumazet 199228b24f90SEric Dumazet bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift); 1993605ad7f1SEric Dumazet 19942455e61bSKuniyuki Iwashima r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); 199565466904SEric Dumazet if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) 199665466904SEric Dumazet bytes += sk->sk_gso_max_size >> r; 1997605ad7f1SEric Dumazet 199865466904SEric Dumazet bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); 199965466904SEric Dumazet 200065466904SEric Dumazet return max_t(u32, bytes / mss_now, min_tso_segs); 2001605ad7f1SEric Dumazet } 2002605ad7f1SEric Dumazet 2003ed6e7268SNeal Cardwell /* Return the number of segments we want in the skb we are transmitting. 2004ed6e7268SNeal Cardwell * See if congestion control module wants to decide; otherwise, autosize. 2005ed6e7268SNeal Cardwell */ 2006ed6e7268SNeal Cardwell static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) 2007ed6e7268SNeal Cardwell { 2008ed6e7268SNeal Cardwell const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 2009dcb8c9b4SEric Dumazet u32 min_tso, tso_segs; 2010ed6e7268SNeal Cardwell 2011dcb8c9b4SEric Dumazet min_tso = ca_ops->min_tso_segs ? 2012dcb8c9b4SEric Dumazet ca_ops->min_tso_segs(sk) : 2013e0bb4ab9SKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); 2014dcb8c9b4SEric Dumazet 2015dcb8c9b4SEric Dumazet tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); 2016350c9f48SEric Dumazet return min_t(u32, tso_segs, sk->sk_gso_max_segs); 2017ed6e7268SNeal Cardwell } 2018ed6e7268SNeal Cardwell 2019d4589926SEric Dumazet /* Returns the portion of skb which can be sent right away */ 2020d4589926SEric Dumazet static unsigned int tcp_mss_split_point(const struct sock *sk, 2021d4589926SEric Dumazet const struct sk_buff *skb, 2022d4589926SEric Dumazet unsigned int mss_now, 2023d4589926SEric Dumazet unsigned int max_segs, 2024d4589926SEric Dumazet int nonagle) 2025c1b4a7e6SDavid S. Miller { 2026cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 2027d4589926SEric Dumazet u32 partial, needed, window, max_len; 2028c1b4a7e6SDavid S. Miller 202990840defSIlpo Järvinen window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 20301485348dSBen Hutchings max_len = mss_now * max_segs; 20310e3a4803SIlpo Järvinen 20321485348dSBen Hutchings if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 20331485348dSBen Hutchings return max_len; 20340e3a4803SIlpo Järvinen 20355ea3a748SIlpo Järvinen needed = min(skb->len, window); 20365ea3a748SIlpo Järvinen 20371485348dSBen Hutchings if (max_len <= needed) 20381485348dSBen Hutchings return max_len; 20390e3a4803SIlpo Järvinen 2040d4589926SEric Dumazet partial = needed % mss_now; 2041d4589926SEric Dumazet /* If last segment is not a full MSS, check if Nagle rules allow us 2042d4589926SEric Dumazet * to include this last segment in this skb. 2043d4589926SEric Dumazet * Otherwise, we'll split the skb at last MSS boundary 2044d4589926SEric Dumazet */ 2045cc93fc51SPeter Pan(潘卫平) if (tcp_nagle_check(partial != 0, tp, nonagle)) 2046d4589926SEric Dumazet return needed - partial; 2047d4589926SEric Dumazet 2048d4589926SEric Dumazet return needed; 2049c1b4a7e6SDavid S. Miller } 2050c1b4a7e6SDavid S. Miller 2051c1b4a7e6SDavid S. Miller /* Can at least one segment of SKB be sent right now, according to the 2052c1b4a7e6SDavid S. Miller * congestion window rules? If so, return how many segments are allowed. 2053c1b4a7e6SDavid S. Miller */ 2054cf533ea5SEric Dumazet static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 2055cf533ea5SEric Dumazet const struct sk_buff *skb) 2056c1b4a7e6SDavid S. Miller { 2057d649a7a8SEric Dumazet u32 in_flight, cwnd, halfcwnd; 2058c1b4a7e6SDavid S. Miller 2059c1b4a7e6SDavid S. Miller /* Don't be strict about the congestion window for the final FIN. */ 20604de075e0SEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 20614de075e0SEric Dumazet tcp_skb_pcount(skb) == 1) 2062c1b4a7e6SDavid S. Miller return 1; 2063c1b4a7e6SDavid S. Miller 2064c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 206540570375SEric Dumazet cwnd = tcp_snd_cwnd(tp); 2066d649a7a8SEric Dumazet if (in_flight >= cwnd) 2067c1b4a7e6SDavid S. Miller return 0; 2068d649a7a8SEric Dumazet 2069d649a7a8SEric Dumazet /* For better scheduling, ensure we have at least 2070d649a7a8SEric Dumazet * 2 GSO packets in flight. 2071d649a7a8SEric Dumazet */ 2072d649a7a8SEric Dumazet halfcwnd = max(cwnd >> 1, 1U); 2073d649a7a8SEric Dumazet return min(halfcwnd, cwnd - in_flight); 2074c1b4a7e6SDavid S. Miller } 2075c1b4a7e6SDavid S. Miller 2076b595076aSUwe Kleine-König /* Initialize TSO state of a skb. 207767edfef7SAndi Kleen * This must be invoked the first time we consider transmitting 2078c1b4a7e6SDavid S. Miller * SKB onto the wire. 2079c1b4a7e6SDavid S. Miller */ 20805bbb432cSEric Dumazet static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) 2081c1b4a7e6SDavid S. Miller { 2082c1b4a7e6SDavid S. Miller int tso_segs = tcp_skb_pcount(skb); 2083c1b4a7e6SDavid S. Miller 2084f8269a49SIlpo Järvinen if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 20855bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 2086c1b4a7e6SDavid S. Miller tso_segs = tcp_skb_pcount(skb); 2087c1b4a7e6SDavid S. Miller } 2088c1b4a7e6SDavid S. Miller return tso_segs; 2089c1b4a7e6SDavid S. Miller } 2090c1b4a7e6SDavid S. Miller 2091c1b4a7e6SDavid S. Miller 2092a2a385d6SEric Dumazet /* Return true if the Nagle test allows this packet to be 2093c1b4a7e6SDavid S. Miller * sent now. 2094c1b4a7e6SDavid S. Miller */ 2095a2a385d6SEric Dumazet static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 2096c1b4a7e6SDavid S. Miller unsigned int cur_mss, int nonagle) 2097c1b4a7e6SDavid S. Miller { 2098c1b4a7e6SDavid S. Miller /* Nagle rule does not apply to frames, which sit in the middle of the 2099c1b4a7e6SDavid S. Miller * write_queue (they have no chances to get new data). 2100c1b4a7e6SDavid S. Miller * 2101c1b4a7e6SDavid S. Miller * This is implemented in the callers, where they modify the 'nonagle' 2102c1b4a7e6SDavid S. Miller * argument based upon the location of SKB in the send queue. 2103c1b4a7e6SDavid S. Miller */ 2104c1b4a7e6SDavid S. Miller if (nonagle & TCP_NAGLE_PUSH) 2105a2a385d6SEric Dumazet return true; 2106c1b4a7e6SDavid S. Miller 21079b44190dSYuchung Cheng /* Don't use the nagle rule for urgent data (or for the final FIN). */ 21089b44190dSYuchung Cheng if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 2109a2a385d6SEric Dumazet return true; 2110c1b4a7e6SDavid S. Miller 2111cc93fc51SPeter Pan(潘卫平) if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) 2112a2a385d6SEric Dumazet return true; 2113c1b4a7e6SDavid S. Miller 2114a2a385d6SEric Dumazet return false; 2115c1b4a7e6SDavid S. Miller } 2116c1b4a7e6SDavid S. Miller 2117c1b4a7e6SDavid S. Miller /* Does at least the first segment of SKB fit into the send window? */ 2118a2a385d6SEric Dumazet static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 2119a2a385d6SEric Dumazet const struct sk_buff *skb, 2120056834d9SIlpo Järvinen unsigned int cur_mss) 2121c1b4a7e6SDavid S. Miller { 2122c1b4a7e6SDavid S. Miller u32 end_seq = TCP_SKB_CB(skb)->end_seq; 2123c1b4a7e6SDavid S. Miller 2124c1b4a7e6SDavid S. Miller if (skb->len > cur_mss) 2125c1b4a7e6SDavid S. Miller end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 2126c1b4a7e6SDavid S. Miller 212790840defSIlpo Järvinen return !after(end_seq, tcp_wnd_end(tp)); 2128c1b4a7e6SDavid S. Miller } 2129c1b4a7e6SDavid S. Miller 2130c1b4a7e6SDavid S. Miller /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 2131c1b4a7e6SDavid S. Miller * which is put after SKB on the list. It is very much like 2132c1b4a7e6SDavid S. Miller * tcp_fragment() except that it may make several kinds of assumptions 2133c1b4a7e6SDavid S. Miller * in order to speed up the splitting operation. In particular, we 2134c1b4a7e6SDavid S. Miller * know that all the data is in scatter-gather pages, and that the 2135c1b4a7e6SDavid S. Miller * packet has never been sent out before (and thus is not cloned). 2136c1b4a7e6SDavid S. Miller */ 213756483341SEric Dumazet static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 2138c4ead4c5SEric Dumazet unsigned int mss_now, gfp_t gfp) 2139c1b4a7e6SDavid S. Miller { 2140c1b4a7e6SDavid S. Miller int nlen = skb->len - len; 214156483341SEric Dumazet struct sk_buff *buff; 21429ce01461SIlpo Järvinen u8 flags; 2143c1b4a7e6SDavid S. Miller 2144c1b4a7e6SDavid S. Miller /* All of a TSO frame must be composed of paged data. */ 2145b4a24397SEric Dumazet DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); 2146c1b4a7e6SDavid S. Miller 21475882efffSEric Dumazet buff = tcp_stream_alloc_skb(sk, gfp, true); 214851456b29SIan Morris if (unlikely(!buff)) 2149c1b4a7e6SDavid S. Miller return -ENOMEM; 215041477662SJakub Kicinski skb_copy_decrypted(buff, skb); 21515a369ca6SPaolo Abeni mptcp_skb_ext_copy(buff, skb); 2152c1b4a7e6SDavid S. Miller 2153ab4e846aSEric Dumazet sk_wmem_queued_add(sk, buff->truesize); 21543ab224beSHideo Aoki sk_mem_charge(sk, buff->truesize); 2155b60b49eaSHerbert Xu buff->truesize += nlen; 2156c1b4a7e6SDavid S. Miller skb->truesize -= nlen; 2157c1b4a7e6SDavid S. Miller 2158c1b4a7e6SDavid S. Miller /* Correct the sequence numbers. */ 2159c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 2160c1b4a7e6SDavid S. Miller TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 2161c1b4a7e6SDavid S. Miller TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 2162c1b4a7e6SDavid S. Miller 2163c1b4a7e6SDavid S. Miller /* PSH and FIN should only be set in the second packet. */ 21644de075e0SEric Dumazet flags = TCP_SKB_CB(skb)->tcp_flags; 21654de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 21664de075e0SEric Dumazet TCP_SKB_CB(buff)->tcp_flags = flags; 2167c1b4a7e6SDavid S. Miller 2168a166140eSMartin KaFai Lau tcp_skb_fragment_eor(skb, buff); 2169a166140eSMartin KaFai Lau 2170c1b4a7e6SDavid S. Miller skb_split(skb, buff, len); 2171490cc7d0SWillem de Bruijn tcp_fragment_tstamp(skb, buff); 2172c1b4a7e6SDavid S. Miller 2173c1b4a7e6SDavid S. Miller /* Fix up tso_factor for both original and new SKB. */ 21745bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 21755bbb432cSEric Dumazet tcp_set_skb_tso_segs(buff, mss_now); 2176c1b4a7e6SDavid S. Miller 2177c1b4a7e6SDavid S. Miller /* Link BUFF into the send queue. */ 2178f4a775d1SEric Dumazet __skb_header_release(buff); 217956483341SEric Dumazet tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE); 2180c1b4a7e6SDavid S. Miller 2181c1b4a7e6SDavid S. Miller return 0; 2182c1b4a7e6SDavid S. Miller } 2183c1b4a7e6SDavid S. Miller 2184c1b4a7e6SDavid S. Miller /* Try to defer sending, if possible, in order to minimize the amount 2185c1b4a7e6SDavid S. Miller * of TSO splitting we do. View it as a kind of TSO Nagle test. 2186c1b4a7e6SDavid S. Miller * 2187c1b4a7e6SDavid S. Miller * This algorithm is from John Heffner. 2188c1b4a7e6SDavid S. Miller */ 2189ca8a2263SNeal Cardwell static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 2190f9bfe4e6SEric Dumazet bool *is_cwnd_limited, 2191f9bfe4e6SEric Dumazet bool *is_rwnd_limited, 2192f9bfe4e6SEric Dumazet u32 max_segs) 2193c1b4a7e6SDavid S. Miller { 21946687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 2195f1c6ea38SEric Dumazet u32 send_win, cong_win, limit, in_flight; 219650c8339eSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 219750c8339eSEric Dumazet struct sk_buff *head; 2198ad9f4f50SEric Dumazet int win_divisor; 2199f1c6ea38SEric Dumazet s64 delta; 2200c1b4a7e6SDavid S. Miller 220199d7662aSEric Dumazet if (icsk->icsk_ca_state >= TCP_CA_Recovery) 2202ae8064acSJohn Heffner goto send_now; 2203ae8064acSJohn Heffner 22045f852eb5SEric Dumazet /* Avoid bursty behavior by allowing defer 2205a682850aSEric Dumazet * only if the last write was recent (1 ms). 2206a682850aSEric Dumazet * Note that tp->tcp_wstamp_ns can be in the future if we have 2207a682850aSEric Dumazet * packets waiting in a qdisc or device for EDT delivery. 22085f852eb5SEric Dumazet */ 2209a682850aSEric Dumazet delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; 2210a682850aSEric Dumazet if (delta > 0) 2211ae8064acSJohn Heffner goto send_now; 2212908a75c1SDavid S. Miller 2213c1b4a7e6SDavid S. Miller in_flight = tcp_packets_in_flight(tp); 2214c1b4a7e6SDavid S. Miller 2215c8c9aeb5SStefano Brivio BUG_ON(tcp_skb_pcount(skb) <= 1); 221640570375SEric Dumazet BUG_ON(tcp_snd_cwnd(tp) <= in_flight); 2217c1b4a7e6SDavid S. Miller 221890840defSIlpo Järvinen send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2219c1b4a7e6SDavid S. Miller 2220c1b4a7e6SDavid S. Miller /* From in_flight test above, we know that cwnd > in_flight. */ 222140570375SEric Dumazet cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; 2222c1b4a7e6SDavid S. Miller 2223c1b4a7e6SDavid S. Miller limit = min(send_win, cong_win); 2224c1b4a7e6SDavid S. Miller 2225ba244fe9SDavid S. Miller /* If a full-sized TSO skb can be sent, do it. */ 2226605ad7f1SEric Dumazet if (limit >= max_segs * tp->mss_cache) 2227ae8064acSJohn Heffner goto send_now; 2228ba244fe9SDavid S. Miller 222962ad2761SIlpo Järvinen /* Middle in queue won't get any more data, full sendable already? */ 223062ad2761SIlpo Järvinen if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 223162ad2761SIlpo Järvinen goto send_now; 223262ad2761SIlpo Järvinen 22335bbcc0f5SLinus Torvalds win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); 2234ad9f4f50SEric Dumazet if (win_divisor) { 223540570375SEric Dumazet u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); 2236c1b4a7e6SDavid S. Miller 2237c1b4a7e6SDavid S. Miller /* If at least some fraction of a window is available, 2238c1b4a7e6SDavid S. Miller * just use it. 2239c1b4a7e6SDavid S. Miller */ 2240ad9f4f50SEric Dumazet chunk /= win_divisor; 2241c1b4a7e6SDavid S. Miller if (limit >= chunk) 2242ae8064acSJohn Heffner goto send_now; 2243c1b4a7e6SDavid S. Miller } else { 2244c1b4a7e6SDavid S. Miller /* Different approach, try not to defer past a single 2245c1b4a7e6SDavid S. Miller * ACK. Receiver should ACK every other full sized 2246c1b4a7e6SDavid S. Miller * frame, so if we have space for more than 3 frames 2247c1b4a7e6SDavid S. Miller * then send now. 2248c1b4a7e6SDavid S. Miller */ 22496b5a5c0dSNeal Cardwell if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 2250ae8064acSJohn Heffner goto send_now; 2251c1b4a7e6SDavid S. Miller } 2252c1b4a7e6SDavid S. Miller 225375c119afSEric Dumazet /* TODO : use tsorted_sent_queue ? */ 225475c119afSEric Dumazet head = tcp_rtx_queue_head(sk); 225575c119afSEric Dumazet if (!head) 225675c119afSEric Dumazet goto send_now; 2257f1c6ea38SEric Dumazet delta = tp->tcp_clock_cache - head->tstamp; 225850c8339eSEric Dumazet /* If next ACK is likely to come too late (half srtt), do not defer */ 2259f1c6ea38SEric Dumazet if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) 226050c8339eSEric Dumazet goto send_now; 226150c8339eSEric Dumazet 2262f9bfe4e6SEric Dumazet /* Ok, it looks like it is advisable to defer. 2263f9bfe4e6SEric Dumazet * Three cases are tracked : 2264f9bfe4e6SEric Dumazet * 1) We are cwnd-limited 2265f9bfe4e6SEric Dumazet * 2) We are rwnd-limited 2266f9bfe4e6SEric Dumazet * 3) We are application limited. 2267f9bfe4e6SEric Dumazet */ 2268f9bfe4e6SEric Dumazet if (cong_win < send_win) { 2269f9bfe4e6SEric Dumazet if (cong_win <= skb->len) { 2270ca8a2263SNeal Cardwell *is_cwnd_limited = true; 2271f9bfe4e6SEric Dumazet return true; 2272f9bfe4e6SEric Dumazet } 2273f9bfe4e6SEric Dumazet } else { 2274f9bfe4e6SEric Dumazet if (send_win <= skb->len) { 2275f9bfe4e6SEric Dumazet *is_rwnd_limited = true; 2276f9bfe4e6SEric Dumazet return true; 2277f9bfe4e6SEric Dumazet } 2278f9bfe4e6SEric Dumazet } 2279f9bfe4e6SEric Dumazet 2280f9bfe4e6SEric Dumazet /* If this packet won't get more data, do not wait. */ 2281d8ed257fSEric Dumazet if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || 2282d8ed257fSEric Dumazet TCP_SKB_CB(skb)->eor) 2283f9bfe4e6SEric Dumazet goto send_now; 2284ca8a2263SNeal Cardwell 2285a2a385d6SEric Dumazet return true; 2286ae8064acSJohn Heffner 2287ae8064acSJohn Heffner send_now: 2288a2a385d6SEric Dumazet return false; 2289c1b4a7e6SDavid S. Miller } 2290c1b4a7e6SDavid S. Miller 229105cbc0dbSFan Du static inline void tcp_mtu_check_reprobe(struct sock *sk) 229205cbc0dbSFan Du { 229305cbc0dbSFan Du struct inet_connection_sock *icsk = inet_csk(sk); 229405cbc0dbSFan Du struct tcp_sock *tp = tcp_sk(sk); 229505cbc0dbSFan Du struct net *net = sock_net(sk); 229605cbc0dbSFan Du u32 interval; 229705cbc0dbSFan Du s32 delta; 229805cbc0dbSFan Du 22992a85388fSKuniyuki Iwashima interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); 2300c74df29aSEric Dumazet delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; 230105cbc0dbSFan Du if (unlikely(delta >= interval * HZ)) { 230205cbc0dbSFan Du int mss = tcp_current_mss(sk); 230305cbc0dbSFan Du 230405cbc0dbSFan Du /* Update current search range */ 230505cbc0dbSFan Du icsk->icsk_mtup.probe_size = 0; 230605cbc0dbSFan Du icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + 230705cbc0dbSFan Du sizeof(struct tcphdr) + 230805cbc0dbSFan Du icsk->icsk_af_ops->net_header_len; 230905cbc0dbSFan Du icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 231005cbc0dbSFan Du 231105cbc0dbSFan Du /* Update probe time stamp */ 2312c74df29aSEric Dumazet icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 231305cbc0dbSFan Du } 231405cbc0dbSFan Du } 231505cbc0dbSFan Du 2316808cf9e3SIlya Lesokhin static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) 2317808cf9e3SIlya Lesokhin { 2318808cf9e3SIlya Lesokhin struct sk_buff *skb, *next; 2319808cf9e3SIlya Lesokhin 2320808cf9e3SIlya Lesokhin skb = tcp_send_head(sk); 2321808cf9e3SIlya Lesokhin tcp_for_write_queue_from_safe(skb, next, sk) { 2322808cf9e3SIlya Lesokhin if (len <= skb->len) 2323808cf9e3SIlya Lesokhin break; 2324808cf9e3SIlya Lesokhin 23259b65b17dSTalal Ahmad if (unlikely(TCP_SKB_CB(skb)->eor) || 23269b65b17dSTalal Ahmad tcp_has_tx_tstamp(skb) || 23279b65b17dSTalal Ahmad !skb_pure_zcopy_same(skb, next)) 2328808cf9e3SIlya Lesokhin return false; 2329808cf9e3SIlya Lesokhin 2330808cf9e3SIlya Lesokhin len -= skb->len; 2331808cf9e3SIlya Lesokhin } 2332808cf9e3SIlya Lesokhin 2333808cf9e3SIlya Lesokhin return true; 2334808cf9e3SIlya Lesokhin } 2335808cf9e3SIlya Lesokhin 233673601329SEric Dumazet static int tcp_clone_payload(struct sock *sk, struct sk_buff *to, 233773601329SEric Dumazet int probe_size) 233873601329SEric Dumazet { 233973601329SEric Dumazet skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; 234073601329SEric Dumazet int i, todo, len = 0, nr_frags = 0; 234173601329SEric Dumazet const struct sk_buff *skb; 234273601329SEric Dumazet 234373601329SEric Dumazet if (!sk_wmem_schedule(sk, to->truesize + probe_size)) 234473601329SEric Dumazet return -ENOMEM; 234573601329SEric Dumazet 234673601329SEric Dumazet skb_queue_walk(&sk->sk_write_queue, skb) { 234773601329SEric Dumazet const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; 234873601329SEric Dumazet 234973601329SEric Dumazet if (skb_headlen(skb)) 235073601329SEric Dumazet return -EINVAL; 235173601329SEric Dumazet 235273601329SEric Dumazet for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { 235373601329SEric Dumazet if (len >= probe_size) 235473601329SEric Dumazet goto commit; 235573601329SEric Dumazet todo = min_t(int, skb_frag_size(fragfrom), 235673601329SEric Dumazet probe_size - len); 235773601329SEric Dumazet len += todo; 235873601329SEric Dumazet if (lastfrag && 235973601329SEric Dumazet skb_frag_page(fragfrom) == skb_frag_page(lastfrag) && 236073601329SEric Dumazet skb_frag_off(fragfrom) == skb_frag_off(lastfrag) + 236173601329SEric Dumazet skb_frag_size(lastfrag)) { 236273601329SEric Dumazet skb_frag_size_add(lastfrag, todo); 236373601329SEric Dumazet continue; 236473601329SEric Dumazet } 236573601329SEric Dumazet if (unlikely(nr_frags == MAX_SKB_FRAGS)) 236673601329SEric Dumazet return -E2BIG; 236773601329SEric Dumazet skb_frag_page_copy(fragto, fragfrom); 236873601329SEric Dumazet skb_frag_off_copy(fragto, fragfrom); 236973601329SEric Dumazet skb_frag_size_set(fragto, todo); 237073601329SEric Dumazet nr_frags++; 237173601329SEric Dumazet lastfrag = fragto++; 237273601329SEric Dumazet } 237373601329SEric Dumazet } 237473601329SEric Dumazet commit: 237573601329SEric Dumazet WARN_ON_ONCE(len != probe_size); 237673601329SEric Dumazet for (i = 0; i < nr_frags; i++) 237773601329SEric Dumazet skb_frag_ref(to, i); 237873601329SEric Dumazet 237973601329SEric Dumazet skb_shinfo(to)->nr_frags = nr_frags; 238073601329SEric Dumazet to->truesize += probe_size; 238173601329SEric Dumazet to->len += probe_size; 238273601329SEric Dumazet to->data_len += probe_size; 238373601329SEric Dumazet __skb_header_release(to); 238473601329SEric Dumazet return 0; 238573601329SEric Dumazet } 238673601329SEric Dumazet 23875d424d5aSJohn Heffner /* Create a new MTU probe if we are ready. 238867edfef7SAndi Kleen * MTU probe is regularly attempting to increase the path MTU by 238967edfef7SAndi Kleen * deliberately sending larger packets. This discovers routing 239067edfef7SAndi Kleen * changes resulting in larger path MTUs. 239167edfef7SAndi Kleen * 23925d424d5aSJohn Heffner * Returns 0 if we should wait to probe (no cwnd available), 23935d424d5aSJohn Heffner * 1 if a probe was sent, 2394056834d9SIlpo Järvinen * -1 otherwise 2395056834d9SIlpo Järvinen */ 23965d424d5aSJohn Heffner static int tcp_mtu_probe(struct sock *sk) 23975d424d5aSJohn Heffner { 23985d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 239912a59abcSEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 24005d424d5aSJohn Heffner struct sk_buff *skb, *nskb, *next; 24016b58e0a5SFan Du struct net *net = sock_net(sk); 24025d424d5aSJohn Heffner int probe_size; 240391cc17c0SIlpo Järvinen int size_needed; 240412a59abcSEric Dumazet int copy, len; 24055d424d5aSJohn Heffner int mss_now; 24066b58e0a5SFan Du int interval; 24075d424d5aSJohn Heffner 24085d424d5aSJohn Heffner /* Not currently probing/verifying, 24095d424d5aSJohn Heffner * not in recovery, 24105d424d5aSJohn Heffner * have enough cwnd, and 241112a59abcSEric Dumazet * not SACKing (the variable headers throw things off) 241212a59abcSEric Dumazet */ 241312a59abcSEric Dumazet if (likely(!icsk->icsk_mtup.enabled || 24145d424d5aSJohn Heffner icsk->icsk_mtup.probe_size || 24155d424d5aSJohn Heffner inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 241640570375SEric Dumazet tcp_snd_cwnd(tp) < 11 || 241712a59abcSEric Dumazet tp->rx_opt.num_sacks || tp->rx_opt.dsack)) 24185d424d5aSJohn Heffner return -1; 24195d424d5aSJohn Heffner 24206b58e0a5SFan Du /* Use binary search for probe_size between tcp_mss_base, 24216b58e0a5SFan Du * and current mss_clamp. if (search_high - search_low) 24226b58e0a5SFan Du * smaller than a threshold, backoff from probing. 24236b58e0a5SFan Du */ 24240c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 24256b58e0a5SFan Du probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + 24266b58e0a5SFan Du icsk->icsk_mtup.search_low) >> 1); 242791cc17c0SIlpo Järvinen size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 24286b58e0a5SFan Du interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; 242905cbc0dbSFan Du /* When misfortune happens, we are reprobing actively, 243005cbc0dbSFan Du * and then reprobe timer has expired. We stick with current 243105cbc0dbSFan Du * probing process by not resetting search range to its orignal. 243205cbc0dbSFan Du */ 24336b58e0a5SFan Du if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || 243492c0aa41SKuniyuki Iwashima interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { 243505cbc0dbSFan Du /* Check whether enough time has elaplased for 243605cbc0dbSFan Du * another round of probing. 243705cbc0dbSFan Du */ 243805cbc0dbSFan Du tcp_mtu_check_reprobe(sk); 24395d424d5aSJohn Heffner return -1; 24405d424d5aSJohn Heffner } 24415d424d5aSJohn Heffner 24425d424d5aSJohn Heffner /* Have enough data in the send queue to probe? */ 24437f9c33e5SIlpo Järvinen if (tp->write_seq - tp->snd_nxt < size_needed) 24445d424d5aSJohn Heffner return -1; 24455d424d5aSJohn Heffner 244691cc17c0SIlpo Järvinen if (tp->snd_wnd < size_needed) 24475d424d5aSJohn Heffner return -1; 244890840defSIlpo Järvinen if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 24495d424d5aSJohn Heffner return 0; 24505d424d5aSJohn Heffner 2451d67c58e9SIlpo Järvinen /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 245240570375SEric Dumazet if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) { 2453d67c58e9SIlpo Järvinen if (!tcp_packets_in_flight(tp)) 24545d424d5aSJohn Heffner return -1; 24555d424d5aSJohn Heffner else 24565d424d5aSJohn Heffner return 0; 24575d424d5aSJohn Heffner } 24585d424d5aSJohn Heffner 2459808cf9e3SIlya Lesokhin if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) 2460808cf9e3SIlya Lesokhin return -1; 2461808cf9e3SIlya Lesokhin 24625d424d5aSJohn Heffner /* We're allowed to probe. Build it now. */ 24635882efffSEric Dumazet nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false); 246451456b29SIan Morris if (!nskb) 24655d424d5aSJohn Heffner return -1; 246673601329SEric Dumazet 246773601329SEric Dumazet /* build the payload, and be prepared to abort if this fails. */ 246873601329SEric Dumazet if (tcp_clone_payload(sk, nskb, probe_size)) { 246971c299c7SJakub Kicinski tcp_skb_tsorted_anchor_cleanup(nskb); 247073601329SEric Dumazet consume_skb(nskb); 247173601329SEric Dumazet return -1; 247273601329SEric Dumazet } 2473ab4e846aSEric Dumazet sk_wmem_queued_add(sk, nskb->truesize); 24743ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 24755d424d5aSJohn Heffner 2476fe067e8aSDavid S. Miller skb = tcp_send_head(sk); 247741477662SJakub Kicinski skb_copy_decrypted(nskb, skb); 24785a369ca6SPaolo Abeni mptcp_skb_ext_copy(nskb, skb); 24795d424d5aSJohn Heffner 24805d424d5aSJohn Heffner TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 24815d424d5aSJohn Heffner TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 24824de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 24835d424d5aSJohn Heffner 248450c4817eSIlpo Järvinen tcp_insert_write_queue_before(nskb, skb, sk); 24852b7cda9cSEric Dumazet tcp_highest_sack_replace(sk, skb, nskb); 248650c4817eSIlpo Järvinen 24875d424d5aSJohn Heffner len = 0; 2488234b6860SIlpo Järvinen tcp_for_write_queue_from_safe(skb, next, sk) { 24895d424d5aSJohn Heffner copy = min_t(int, skb->len, probe_size - len); 24905d424d5aSJohn Heffner 24915d424d5aSJohn Heffner if (skb->len <= copy) { 24925d424d5aSJohn Heffner /* We've eaten all the data from this skb. 24935d424d5aSJohn Heffner * Throw it away. */ 24944de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2495808cf9e3SIlya Lesokhin /* If this is the last SKB we copy and eor is set 2496808cf9e3SIlya Lesokhin * we need to propagate it to the new skb. 2497808cf9e3SIlya Lesokhin */ 2498808cf9e3SIlya Lesokhin TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; 2499888a5c53SWillem de Bruijn tcp_skb_collapse_tstamp(nskb, skb); 2500fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 250103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 25025d424d5aSJohn Heffner } else { 25034de075e0SEric Dumazet TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 2504a3433f35SChangli Gao ~(TCPHDR_FIN|TCPHDR_PSH); 25055d424d5aSJohn Heffner __pskb_trim_head(skb, copy); 25065bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss_now); 25075d424d5aSJohn Heffner TCP_SKB_CB(skb)->seq += copy; 25085d424d5aSJohn Heffner } 25095d424d5aSJohn Heffner 25105d424d5aSJohn Heffner len += copy; 2511234b6860SIlpo Järvinen 2512234b6860SIlpo Järvinen if (len >= probe_size) 2513234b6860SIlpo Järvinen break; 25145d424d5aSJohn Heffner } 25155bbb432cSEric Dumazet tcp_init_tso_segs(nskb, nskb->len); 25165d424d5aSJohn Heffner 25175d424d5aSJohn Heffner /* We're ready to send. If this fails, the probe will 25187faee5c0SEric Dumazet * be resegmented into mss-sized pieces by tcp_write_xmit(). 25197faee5c0SEric Dumazet */ 25205d424d5aSJohn Heffner if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 25215d424d5aSJohn Heffner /* Decrement cwnd here because we are sending 25225d424d5aSJohn Heffner * effectively two packets. */ 252340570375SEric Dumazet tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); 252466f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, nskb); 25255d424d5aSJohn Heffner 25265d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 25270e7b1368SJohn Heffner tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 25280e7b1368SJohn Heffner tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 25295d424d5aSJohn Heffner 25305d424d5aSJohn Heffner return 1; 25315d424d5aSJohn Heffner } 25325d424d5aSJohn Heffner 25335d424d5aSJohn Heffner return -1; 25345d424d5aSJohn Heffner } 25355d424d5aSJohn Heffner 2536864e5c09SEric Dumazet static bool tcp_pacing_check(struct sock *sk) 2537218af599SEric Dumazet { 2538864e5c09SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 2539864e5c09SEric Dumazet 2540864e5c09SEric Dumazet if (!tcp_needs_internal_pacing(sk)) 2541864e5c09SEric Dumazet return false; 2542864e5c09SEric Dumazet 2543864e5c09SEric Dumazet if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) 2544864e5c09SEric Dumazet return false; 2545864e5c09SEric Dumazet 2546864e5c09SEric Dumazet if (!hrtimer_is_queued(&tp->pacing_timer)) { 2547864e5c09SEric Dumazet hrtimer_start(&tp->pacing_timer, 2548864e5c09SEric Dumazet ns_to_ktime(tp->tcp_wstamp_ns), 2549864e5c09SEric Dumazet HRTIMER_MODE_ABS_PINNED_SOFT); 2550864e5c09SEric Dumazet sock_hold(sk); 2551864e5c09SEric Dumazet } 2552864e5c09SEric Dumazet return true; 2553218af599SEric Dumazet } 2554218af599SEric Dumazet 2555f921a4a5SEric Dumazet static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk) 2556f921a4a5SEric Dumazet { 2557f921a4a5SEric Dumazet const struct rb_node *node = sk->tcp_rtx_queue.rb_node; 2558f921a4a5SEric Dumazet 2559f921a4a5SEric Dumazet /* No skb in the rtx queue. */ 2560f921a4a5SEric Dumazet if (!node) 2561f921a4a5SEric Dumazet return true; 2562f921a4a5SEric Dumazet 2563f921a4a5SEric Dumazet /* Only one skb in rtx queue. */ 2564f921a4a5SEric Dumazet return !node->rb_left && !node->rb_right; 2565f921a4a5SEric Dumazet } 2566f921a4a5SEric Dumazet 2567f9616c35SEric Dumazet /* TCP Small Queues : 2568f9616c35SEric Dumazet * Control number of packets in qdisc/devices to two packets / or ~1 ms. 2569f9616c35SEric Dumazet * (These limits are doubled for retransmits) 2570f9616c35SEric Dumazet * This allows for : 2571f9616c35SEric Dumazet * - better RTT estimation and ACK scheduling 2572f9616c35SEric Dumazet * - faster recovery 2573f9616c35SEric Dumazet * - high rates 2574f9616c35SEric Dumazet * Alas, some drivers / subsystems require a fair amount 2575f9616c35SEric Dumazet * of queued bytes to ensure line rate. 2576f9616c35SEric Dumazet * One example is wifi aggregation (802.11 AMPDU) 2577f9616c35SEric Dumazet */ 2578f9616c35SEric Dumazet static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, 2579f9616c35SEric Dumazet unsigned int factor) 2580f9616c35SEric Dumazet { 258176a9ebe8SEric Dumazet unsigned long limit; 2582f9616c35SEric Dumazet 258376a9ebe8SEric Dumazet limit = max_t(unsigned long, 258476a9ebe8SEric Dumazet 2 * skb->truesize, 258528b24f90SEric Dumazet READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift)); 2586c73e5807SEric Dumazet if (sk->sk_pacing_status == SK_PACING_NONE) 258776a9ebe8SEric Dumazet limit = min_t(unsigned long, limit, 25889fb90193SKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); 2589f9616c35SEric Dumazet limit <<= factor; 2590f9616c35SEric Dumazet 2591a842fe14SEric Dumazet if (static_branch_unlikely(&tcp_tx_delay_enabled) && 2592a842fe14SEric Dumazet tcp_sk(sk)->tcp_tx_delay) { 259328b24f90SEric Dumazet u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) * 259428b24f90SEric Dumazet tcp_sk(sk)->tcp_tx_delay; 2595a842fe14SEric Dumazet 2596a842fe14SEric Dumazet /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we 2597a842fe14SEric Dumazet * approximate our needs assuming an ~100% skb->truesize overhead. 2598a842fe14SEric Dumazet * USEC_PER_SEC is approximated by 2^20. 2599a842fe14SEric Dumazet * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. 2600a842fe14SEric Dumazet */ 2601a842fe14SEric Dumazet extra_bytes >>= (20 - 1); 2602a842fe14SEric Dumazet limit += extra_bytes; 2603a842fe14SEric Dumazet } 260414afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2605f921a4a5SEric Dumazet /* Always send skb if rtx queue is empty or has one skb. 260675eefc6cSEric Dumazet * No need to wait for TX completion to call us back, 260775eefc6cSEric Dumazet * after softirq/tasklet schedule. 260875eefc6cSEric Dumazet * This helps when TX completions are delayed too much. 260975eefc6cSEric Dumazet */ 2610f921a4a5SEric Dumazet if (tcp_rtx_queue_empty_or_single_skb(sk)) 261175eefc6cSEric Dumazet return false; 261275eefc6cSEric Dumazet 26137aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 2614f9616c35SEric Dumazet /* It is possible TX completion already happened 2615f9616c35SEric Dumazet * before we set TSQ_THROTTLED, so we must 2616f9616c35SEric Dumazet * test again the condition. 2617f9616c35SEric Dumazet */ 2618f9616c35SEric Dumazet smp_mb__after_atomic(); 2619ce8299b6SEric Dumazet if (refcount_read(&sk->sk_wmem_alloc) > limit) 2620f9616c35SEric Dumazet return true; 2621f9616c35SEric Dumazet } 2622f9616c35SEric Dumazet return false; 2623f9616c35SEric Dumazet } 2624f9616c35SEric Dumazet 262505b055e8SFrancis Yan static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) 262605b055e8SFrancis Yan { 2627628174ccSEric Dumazet const u32 now = tcp_jiffies32; 2628efe967cdSArnd Bergmann enum tcp_chrono old = tp->chrono_type; 262905b055e8SFrancis Yan 2630efe967cdSArnd Bergmann if (old > TCP_CHRONO_UNSPEC) 2631efe967cdSArnd Bergmann tp->chrono_stat[old - 1] += now - tp->chrono_start; 263205b055e8SFrancis Yan tp->chrono_start = now; 263305b055e8SFrancis Yan tp->chrono_type = new; 263405b055e8SFrancis Yan } 263505b055e8SFrancis Yan 263605b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) 263705b055e8SFrancis Yan { 263805b055e8SFrancis Yan struct tcp_sock *tp = tcp_sk(sk); 263905b055e8SFrancis Yan 264005b055e8SFrancis Yan /* If there are multiple conditions worthy of tracking in a 26410f87230dSFrancis Yan * chronograph then the highest priority enum takes precedence 26420f87230dSFrancis Yan * over the other conditions. So that if something "more interesting" 264305b055e8SFrancis Yan * starts happening, stop the previous chrono and start a new one. 264405b055e8SFrancis Yan */ 264505b055e8SFrancis Yan if (type > tp->chrono_type) 264605b055e8SFrancis Yan tcp_chrono_set(tp, type); 264705b055e8SFrancis Yan } 264805b055e8SFrancis Yan 264905b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) 265005b055e8SFrancis Yan { 265105b055e8SFrancis Yan struct tcp_sock *tp = tcp_sk(sk); 265205b055e8SFrancis Yan 26530f87230dSFrancis Yan 26540f87230dSFrancis Yan /* There are multiple conditions worthy of tracking in a 26550f87230dSFrancis Yan * chronograph, so that the highest priority enum takes 26560f87230dSFrancis Yan * precedence over the other conditions (see tcp_chrono_start). 26570f87230dSFrancis Yan * If a condition stops, we only stop chrono tracking if 26580f87230dSFrancis Yan * it's the "most interesting" or current chrono we are 26590f87230dSFrancis Yan * tracking and starts busy chrono if we have pending data. 26600f87230dSFrancis Yan */ 266175c119afSEric Dumazet if (tcp_rtx_and_write_queues_empty(sk)) 266205b055e8SFrancis Yan tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); 26630f87230dSFrancis Yan else if (type == tp->chrono_type) 26640f87230dSFrancis Yan tcp_chrono_set(tp, TCP_CHRONO_BUSY); 266505b055e8SFrancis Yan } 266605b055e8SFrancis Yan 26671da177e4SLinus Torvalds /* This routine writes packets to the network. It advances the 26681da177e4SLinus Torvalds * send_head. This happens as incoming acks open up the remote 26691da177e4SLinus Torvalds * window for us. 26701da177e4SLinus Torvalds * 2671f8269a49SIlpo Järvinen * LARGESEND note: !tcp_urg_mode is overkill, only frames between 2672f8269a49SIlpo Järvinen * snd_up-64k-mss .. snd_up cannot be large. However, taking into 2673f8269a49SIlpo Järvinen * account rare use of URG, this is not a big flaw. 2674f8269a49SIlpo Järvinen * 26756ba8a3b1SNandita Dukkipati * Send at most one packet when push_one > 0. Temporarily ignore 26766ba8a3b1SNandita Dukkipati * cwnd limit to force at most one packet out when push_one == 2. 26776ba8a3b1SNandita Dukkipati 2678a2a385d6SEric Dumazet * Returns true, if no segments are in flight and we have queued segments, 2679a2a385d6SEric Dumazet * but cannot send anything now because of SWS or another problem. 26801da177e4SLinus Torvalds */ 2681a2a385d6SEric Dumazet static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 2682d5dd9175SIlpo Järvinen int push_one, gfp_t gfp) 26831da177e4SLinus Torvalds { 26841da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 268592df7b51SDavid S. Miller struct sk_buff *skb; 2686c1b4a7e6SDavid S. Miller unsigned int tso_segs, sent_pkts; 2687c1b4a7e6SDavid S. Miller int cwnd_quota; 26885d424d5aSJohn Heffner int result; 26895615f886SFrancis Yan bool is_cwnd_limited = false, is_rwnd_limited = false; 2690605ad7f1SEric Dumazet u32 max_segs; 26911da177e4SLinus Torvalds 2692c1b4a7e6SDavid S. Miller sent_pkts = 0; 26935d424d5aSJohn Heffner 2694ee1836aeSEric Dumazet tcp_mstamp_refresh(tp); 2695d5dd9175SIlpo Järvinen if (!push_one) { 26965d424d5aSJohn Heffner /* Do MTU probing. */ 2697d5dd9175SIlpo Järvinen result = tcp_mtu_probe(sk); 2698d5dd9175SIlpo Järvinen if (!result) { 2699a2a385d6SEric Dumazet return false; 27005d424d5aSJohn Heffner } else if (result > 0) { 27015d424d5aSJohn Heffner sent_pkts = 1; 27025d424d5aSJohn Heffner } 2703d5dd9175SIlpo Järvinen } 27045d424d5aSJohn Heffner 2705ed6e7268SNeal Cardwell max_segs = tcp_tso_segs(sk, mss_now); 2706fe067e8aSDavid S. Miller while ((skb = tcp_send_head(sk))) { 2707c8ac3774SHerbert Xu unsigned int limit; 2708c8ac3774SHerbert Xu 270979861919SEric Dumazet if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 271079861919SEric Dumazet /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2711a1ac9c8aSMartin KaFai Lau tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2712a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); 271379861919SEric Dumazet list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 2714bf50b606SEric Dumazet tcp_init_tso_segs(skb, mss_now); 271579861919SEric Dumazet goto repair; /* Skip network transmission */ 271679861919SEric Dumazet } 271779861919SEric Dumazet 2718218af599SEric Dumazet if (tcp_pacing_check(sk)) 2719218af599SEric Dumazet break; 2720218af599SEric Dumazet 27215bbb432cSEric Dumazet tso_segs = tcp_init_tso_segs(skb, mss_now); 2722c1b4a7e6SDavid S. Miller BUG_ON(!tso_segs); 2723c1b4a7e6SDavid S. Miller 2724b68e9f85SHerbert Xu cwnd_quota = tcp_cwnd_test(tp, skb); 27256ba8a3b1SNandita Dukkipati if (!cwnd_quota) { 27266ba8a3b1SNandita Dukkipati if (push_one == 2) 27276ba8a3b1SNandita Dukkipati /* Force out a loss probe pkt. */ 27286ba8a3b1SNandita Dukkipati cwnd_quota = 1; 27296ba8a3b1SNandita Dukkipati else 2730b68e9f85SHerbert Xu break; 27316ba8a3b1SNandita Dukkipati } 2732b68e9f85SHerbert Xu 27335615f886SFrancis Yan if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { 27345615f886SFrancis Yan is_rwnd_limited = true; 2735b68e9f85SHerbert Xu break; 27365615f886SFrancis Yan } 2737b68e9f85SHerbert Xu 2738d6a4e26aSEric Dumazet if (tso_segs == 1) { 2739aa93466bSDavid S. Miller if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2740aa93466bSDavid S. Miller (tcp_skb_is_last(sk, skb) ? 2741aa93466bSDavid S. Miller nonagle : TCP_NAGLE_PUSH)))) 2742aa93466bSDavid S. Miller break; 2743c1b4a7e6SDavid S. Miller } else { 2744ca8a2263SNeal Cardwell if (!push_one && 2745605ad7f1SEric Dumazet tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2746f9bfe4e6SEric Dumazet &is_rwnd_limited, max_segs)) 2747aa93466bSDavid S. Miller break; 2748c1b4a7e6SDavid S. Miller } 2749aa93466bSDavid S. Miller 2750605ad7f1SEric Dumazet limit = mss_now; 2751d6a4e26aSEric Dumazet if (tso_segs > 1 && !tcp_urg_mode(tp)) 2752605ad7f1SEric Dumazet limit = tcp_mss_split_point(sk, skb, mss_now, 2753605ad7f1SEric Dumazet min_t(unsigned int, 2754605ad7f1SEric Dumazet cwnd_quota, 2755605ad7f1SEric Dumazet max_segs), 2756605ad7f1SEric Dumazet nonagle); 2757605ad7f1SEric Dumazet 2758605ad7f1SEric Dumazet if (skb->len > limit && 275956483341SEric Dumazet unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2760605ad7f1SEric Dumazet break; 2761605ad7f1SEric Dumazet 2762f9616c35SEric Dumazet if (tcp_small_queue_check(sk, skb, 0)) 276346d3ceabSEric Dumazet break; 2764c9eeec26SEric Dumazet 27651f85e626SEric Dumazet /* Argh, we hit an empty skb(), presumably a thread 27661f85e626SEric Dumazet * is sleeping in sendmsg()/sk_stream_wait_memory(). 27671f85e626SEric Dumazet * We do not want to send a pure-ack packet and have 27681f85e626SEric Dumazet * a strange looking rtx queue with empty packet(s). 27691f85e626SEric Dumazet */ 27701f85e626SEric Dumazet if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) 27711f85e626SEric Dumazet break; 27721f85e626SEric Dumazet 2773d5dd9175SIlpo Järvinen if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 27741da177e4SLinus Torvalds break; 27751da177e4SLinus Torvalds 2776ec342325SAndrew Vagin repair: 27771da177e4SLinus Torvalds /* Advance the send_head. This one is sent out. 27781da177e4SLinus Torvalds * This call will increment packets_out. 27791da177e4SLinus Torvalds */ 278066f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 27811da177e4SLinus Torvalds 27821da177e4SLinus Torvalds tcp_minshall_update(tp, mss_now, skb); 2783a262f0cdSNandita Dukkipati sent_pkts += tcp_skb_pcount(skb); 2784d5dd9175SIlpo Järvinen 2785d5dd9175SIlpo Järvinen if (push_one) 2786d5dd9175SIlpo Järvinen break; 27871da177e4SLinus Torvalds } 27881da177e4SLinus Torvalds 27895615f886SFrancis Yan if (is_rwnd_limited) 27905615f886SFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); 27915615f886SFrancis Yan else 27925615f886SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); 27935615f886SFrancis Yan 279440570375SEric Dumazet is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)); 2795299bcb55SNeal Cardwell if (likely(sent_pkts || is_cwnd_limited)) 2796299bcb55SNeal Cardwell tcp_cwnd_validate(sk, is_cwnd_limited); 2797299bcb55SNeal Cardwell 2798aa93466bSDavid S. Miller if (likely(sent_pkts)) { 2799684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 2800684bad11SYuchung Cheng tp->prr_out += sent_pkts; 28016ba8a3b1SNandita Dukkipati 28026ba8a3b1SNandita Dukkipati /* Send one loss probe per tail loss episode. */ 28036ba8a3b1SNandita Dukkipati if (push_one != 2) 2804ed66dfafSNeal Cardwell tcp_schedule_loss_probe(sk, false); 2805a2a385d6SEric Dumazet return false; 28061da177e4SLinus Torvalds } 280775c119afSEric Dumazet return !tp->packets_out && !tcp_write_queue_empty(sk); 28086ba8a3b1SNandita Dukkipati } 28096ba8a3b1SNandita Dukkipati 2810ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) 28116ba8a3b1SNandita Dukkipati { 28126ba8a3b1SNandita Dukkipati struct inet_connection_sock *icsk = inet_csk(sk); 28136ba8a3b1SNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 28141c2709cfSNeal Cardwell u32 timeout, timeout_us, rto_delta_us; 28152ae21cf5SEric Dumazet int early_retrans; 28166ba8a3b1SNandita Dukkipati 28176ba8a3b1SNandita Dukkipati /* Don't do any loss probe on a Fast Open connection before 3WHS 28186ba8a3b1SNandita Dukkipati * finishes. 28196ba8a3b1SNandita Dukkipati */ 2820d983ea6fSEric Dumazet if (rcu_access_pointer(tp->fastopen_rsk)) 28216ba8a3b1SNandita Dukkipati return false; 28226ba8a3b1SNandita Dukkipati 282352e65865SKuniyuki Iwashima early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); 28246ba8a3b1SNandita Dukkipati /* Schedule a loss probe in 2*RTT for SACK capable connections 2825b4f70c3dSNeal Cardwell * not in loss recovery, that are either limited by cwnd or application. 28266ba8a3b1SNandita Dukkipati */ 28272ae21cf5SEric Dumazet if ((early_retrans != 3 && early_retrans != 4) || 2828bec41a11SYuchung Cheng !tp->packets_out || !tcp_is_sack(tp) || 2829b4f70c3dSNeal Cardwell (icsk->icsk_ca_state != TCP_CA_Open && 2830b4f70c3dSNeal Cardwell icsk->icsk_ca_state != TCP_CA_CWR)) 28316ba8a3b1SNandita Dukkipati return false; 28326ba8a3b1SNandita Dukkipati 2833bb4d991aSYuchung Cheng /* Probe timeout is 2*rtt. Add minimum RTO to account 2834f9b99582SYuchung Cheng * for delayed ack when there's one outstanding packet. If no RTT 2835f9b99582SYuchung Cheng * sample is available then probe after TCP_TIMEOUT_INIT. 28366ba8a3b1SNandita Dukkipati */ 2837bb4d991aSYuchung Cheng if (tp->srtt_us) { 28381c2709cfSNeal Cardwell timeout_us = tp->srtt_us >> 2; 28396ba8a3b1SNandita Dukkipati if (tp->packets_out == 1) 28401c2709cfSNeal Cardwell timeout_us += tcp_rto_min_us(sk); 2841bb4d991aSYuchung Cheng else 28421c2709cfSNeal Cardwell timeout_us += TCP_TIMEOUT_MIN_US; 28431c2709cfSNeal Cardwell timeout = usecs_to_jiffies(timeout_us); 2844bb4d991aSYuchung Cheng } else { 2845bb4d991aSYuchung Cheng timeout = TCP_TIMEOUT_INIT; 2846bb4d991aSYuchung Cheng } 28476ba8a3b1SNandita Dukkipati 2848a2815817SNeal Cardwell /* If the RTO formula yields an earlier time, then use that time. */ 2849ed66dfafSNeal Cardwell rto_delta_us = advancing_rto ? 2850ed66dfafSNeal Cardwell jiffies_to_usecs(inet_csk(sk)->icsk_rto) : 2851ed66dfafSNeal Cardwell tcp_rto_delta_us(sk); /* How far in future is RTO? */ 2852a2815817SNeal Cardwell if (rto_delta_us > 0) 2853a2815817SNeal Cardwell timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); 28546ba8a3b1SNandita Dukkipati 28558dc242adSEric Dumazet tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX); 28566ba8a3b1SNandita Dukkipati return true; 28576ba8a3b1SNandita Dukkipati } 28586ba8a3b1SNandita Dukkipati 28591f3279aeSEric Dumazet /* Thanks to skb fast clones, we can detect if a prior transmit of 28601f3279aeSEric Dumazet * a packet is still in a qdisc or driver queue. 28611f3279aeSEric Dumazet * In this case, there is very little point doing a retransmit ! 28621f3279aeSEric Dumazet */ 2863f4dae54eSEric Dumazet static bool skb_still_in_host_queue(struct sock *sk, 28641f3279aeSEric Dumazet const struct sk_buff *skb) 28651f3279aeSEric Dumazet { 286639bb5e62SEric Dumazet if (unlikely(skb_fclone_busy(sk, skb))) { 2867f4dae54eSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 2868f4dae54eSEric Dumazet smp_mb__after_atomic(); 2869f4dae54eSEric Dumazet if (skb_fclone_busy(sk, skb)) { 2870c10d9310SEric Dumazet NET_INC_STATS(sock_net(sk), 28711f3279aeSEric Dumazet LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 28721f3279aeSEric Dumazet return true; 28731f3279aeSEric Dumazet } 2874f4dae54eSEric Dumazet } 28751f3279aeSEric Dumazet return false; 28761f3279aeSEric Dumazet } 28771f3279aeSEric Dumazet 2878b340b264SYuchung Cheng /* When probe timeout (PTO) fires, try send a new segment if possible, else 28796ba8a3b1SNandita Dukkipati * retransmit the last segment. 28806ba8a3b1SNandita Dukkipati */ 28816ba8a3b1SNandita Dukkipati void tcp_send_loss_probe(struct sock *sk) 28826ba8a3b1SNandita Dukkipati { 28839b717a8dSNandita Dukkipati struct tcp_sock *tp = tcp_sk(sk); 28846ba8a3b1SNandita Dukkipati struct sk_buff *skb; 28856ba8a3b1SNandita Dukkipati int pcount; 28866ba8a3b1SNandita Dukkipati int mss = tcp_current_mss(sk); 28876ba8a3b1SNandita Dukkipati 288876be93fcSYuchung Cheng /* At most one outstanding TLP */ 288976be93fcSYuchung Cheng if (tp->tlp_high_seq) 289076be93fcSYuchung Cheng goto rearm_timer; 289176be93fcSYuchung Cheng 289276be93fcSYuchung Cheng tp->tlp_retrans = 0; 2893b340b264SYuchung Cheng skb = tcp_send_head(sk); 289475c119afSEric Dumazet if (skb && tcp_snd_wnd_test(tp, skb, mss)) { 2895b340b264SYuchung Cheng pcount = tp->packets_out; 2896b340b264SYuchung Cheng tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 2897b340b264SYuchung Cheng if (tp->packets_out > pcount) 2898b340b264SYuchung Cheng goto probe_sent; 28996ba8a3b1SNandita Dukkipati goto rearm_timer; 29006ba8a3b1SNandita Dukkipati } 290175c119afSEric Dumazet skb = skb_rb_last(&sk->tcp_rtx_queue); 2902b2b7af86SYuchung Cheng if (unlikely(!skb)) { 2903b2b7af86SYuchung Cheng WARN_ONCE(tp->packets_out, 2904b2b7af86SYuchung Cheng "invalid inflight: %u state %u cwnd %u mss %d\n", 290540570375SEric Dumazet tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss); 2906b2b7af86SYuchung Cheng inet_csk(sk)->icsk_pending = 0; 2907b2b7af86SYuchung Cheng return; 2908b2b7af86SYuchung Cheng } 29096ba8a3b1SNandita Dukkipati 29101f3279aeSEric Dumazet if (skb_still_in_host_queue(sk, skb)) 29111f3279aeSEric Dumazet goto rearm_timer; 29121f3279aeSEric Dumazet 29136ba8a3b1SNandita Dukkipati pcount = tcp_skb_pcount(skb); 29146ba8a3b1SNandita Dukkipati if (WARN_ON(!pcount)) 29156ba8a3b1SNandita Dukkipati goto rearm_timer; 29166ba8a3b1SNandita Dukkipati 29176ba8a3b1SNandita Dukkipati if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 291875c119afSEric Dumazet if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 291975c119afSEric Dumazet (pcount - 1) * mss, mss, 29206cc55e09SOctavian Purdila GFP_ATOMIC))) 29216ba8a3b1SNandita Dukkipati goto rearm_timer; 292275c119afSEric Dumazet skb = skb_rb_next(skb); 29236ba8a3b1SNandita Dukkipati } 29246ba8a3b1SNandita Dukkipati 29256ba8a3b1SNandita Dukkipati if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 29266ba8a3b1SNandita Dukkipati goto rearm_timer; 29276ba8a3b1SNandita Dukkipati 292810d3be56SEric Dumazet if (__tcp_retransmit_skb(sk, skb, 1)) 2929b340b264SYuchung Cheng goto rearm_timer; 29306ba8a3b1SNandita Dukkipati 293176be93fcSYuchung Cheng tp->tlp_retrans = 1; 293276be93fcSYuchung Cheng 293376be93fcSYuchung Cheng probe_sent: 29349b717a8dSNandita Dukkipati /* Record snd_nxt for loss detection. */ 29359b717a8dSNandita Dukkipati tp->tlp_high_seq = tp->snd_nxt; 29369b717a8dSNandita Dukkipati 2937c10d9310SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 2938fcd16c0aSYuchung Cheng /* Reset s.t. tcp_rearm_rto will restart timer from now */ 2939fcd16c0aSYuchung Cheng inet_csk(sk)->icsk_pending = 0; 2940b340b264SYuchung Cheng rearm_timer: 2941fcd16c0aSYuchung Cheng tcp_rearm_rto(sk); 29421da177e4SLinus Torvalds } 29431da177e4SLinus Torvalds 2944a762a980SDavid S. Miller /* Push out any pending frames which were held back due to 2945a762a980SDavid S. Miller * TCP_CORK or attempt at coalescing tiny packets. 2946a762a980SDavid S. Miller * The socket must be locked by the caller. 2947a762a980SDavid S. Miller */ 29489e412ba7SIlpo Järvinen void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 29499e412ba7SIlpo Järvinen int nonagle) 2950a762a980SDavid S. Miller { 2951726e07a8SIlpo Järvinen /* If we are closed, the bytes will have to remain here. 2952726e07a8SIlpo Järvinen * In time closedown will finish, we empty the write queue and 2953726e07a8SIlpo Järvinen * all will be happy. 2954726e07a8SIlpo Järvinen */ 2955726e07a8SIlpo Järvinen if (unlikely(sk->sk_state == TCP_CLOSE)) 2956726e07a8SIlpo Järvinen return; 2957726e07a8SIlpo Järvinen 295899a1dec7SMel Gorman if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 29597450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC))) 29609e412ba7SIlpo Järvinen tcp_check_probe_timer(sk); 2961a762a980SDavid S. Miller } 2962a762a980SDavid S. Miller 2963c1b4a7e6SDavid S. Miller /* Send _single_ skb sitting at the send head. This function requires 2964c1b4a7e6SDavid S. Miller * true push pending frames to setup probe timer etc. 2965c1b4a7e6SDavid S. Miller */ 2966c1b4a7e6SDavid S. Miller void tcp_push_one(struct sock *sk, unsigned int mss_now) 2967c1b4a7e6SDavid S. Miller { 2968fe067e8aSDavid S. Miller struct sk_buff *skb = tcp_send_head(sk); 2969c1b4a7e6SDavid S. Miller 2970c1b4a7e6SDavid S. Miller BUG_ON(!skb || skb->len < mss_now); 2971c1b4a7e6SDavid S. Miller 2972d5dd9175SIlpo Järvinen tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2973c1b4a7e6SDavid S. Miller } 2974c1b4a7e6SDavid S. Miller 29751da177e4SLinus Torvalds /* This function returns the amount that we can raise the 29761da177e4SLinus Torvalds * usable window based on the following constraints 29771da177e4SLinus Torvalds * 29781da177e4SLinus Torvalds * 1. The window can never be shrunk once it is offered (RFC 793) 29791da177e4SLinus Torvalds * 2. We limit memory per socket 29801da177e4SLinus Torvalds * 29811da177e4SLinus Torvalds * RFC 1122: 29821da177e4SLinus Torvalds * "the suggested [SWS] avoidance algorithm for the receiver is to keep 29831da177e4SLinus Torvalds * RECV.NEXT + RCV.WIN fixed until: 29841da177e4SLinus Torvalds * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 29851da177e4SLinus Torvalds * 29861da177e4SLinus Torvalds * i.e. don't raise the right edge of the window until you can raise 29871da177e4SLinus Torvalds * it at least MSS bytes. 29881da177e4SLinus Torvalds * 29891da177e4SLinus Torvalds * Unfortunately, the recommended algorithm breaks header prediction, 29901da177e4SLinus Torvalds * since header prediction assumes th->window stays fixed. 29911da177e4SLinus Torvalds * 29921da177e4SLinus Torvalds * Strictly speaking, keeping th->window fixed violates the receiver 29931da177e4SLinus Torvalds * side SWS prevention criteria. The problem is that under this rule 29941da177e4SLinus Torvalds * a stream of single byte packets will cause the right side of the 29951da177e4SLinus Torvalds * window to always advance by a single byte. 29961da177e4SLinus Torvalds * 29971da177e4SLinus Torvalds * Of course, if the sender implements sender side SWS prevention 29981da177e4SLinus Torvalds * then this will not be a problem. 29991da177e4SLinus Torvalds * 30001da177e4SLinus Torvalds * BSD seems to make the following compromise: 30011da177e4SLinus Torvalds * 30021da177e4SLinus Torvalds * If the free space is less than the 1/4 of the maximum 30031da177e4SLinus Torvalds * space available and the free space is less than 1/2 mss, 30041da177e4SLinus Torvalds * then set the window to 0. 30051da177e4SLinus Torvalds * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 30061da177e4SLinus Torvalds * Otherwise, just prevent the window from shrinking 30071da177e4SLinus Torvalds * and from being larger than the largest representable value. 30081da177e4SLinus Torvalds * 30091da177e4SLinus Torvalds * This prevents incremental opening of the window in the regime 30101da177e4SLinus Torvalds * where TCP is limited by the speed of the reader side taking 30111da177e4SLinus Torvalds * data out of the TCP receive queue. It does nothing about 30121da177e4SLinus Torvalds * those cases where the window is constrained on the sender side 30131da177e4SLinus Torvalds * because the pipeline is full. 30141da177e4SLinus Torvalds * 30151da177e4SLinus Torvalds * BSD also seems to "accidentally" limit itself to windows that are a 30161da177e4SLinus Torvalds * multiple of MSS, at least until the free space gets quite small. 30171da177e4SLinus Torvalds * This would appear to be a side effect of the mbuf implementation. 30181da177e4SLinus Torvalds * Combining these two algorithms results in the observed behavior 30191da177e4SLinus Torvalds * of having a fixed window size at almost all times. 30201da177e4SLinus Torvalds * 30211da177e4SLinus Torvalds * Below we obtain similar behavior by forcing the offered window to 30221da177e4SLinus Torvalds * a multiple of the mss when it is feasible to do so. 30231da177e4SLinus Torvalds * 30241da177e4SLinus Torvalds * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 30251da177e4SLinus Torvalds * Regular options like TIMESTAMP are taken into account. 30261da177e4SLinus Torvalds */ 30271da177e4SLinus Torvalds u32 __tcp_select_window(struct sock *sk) 30281da177e4SLinus Torvalds { 3029463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 30301da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3031b650d953Smfreemon@cloudflare.com struct net *net = sock_net(sk); 3032caa20d9aSStephen Hemminger /* MSS for the peer's data. Previous versions used mss_clamp 30331da177e4SLinus Torvalds * here. I don't know if the value based on our guesses 30341da177e4SLinus Torvalds * of peer's MSS is better for the performance. It's more correct 30351da177e4SLinus Torvalds * but may be worse for the performance because of rcv_mss 30361da177e4SLinus Torvalds * fluctuations. --SAW 1998/11/1 30371da177e4SLinus Torvalds */ 3038463c84b9SArnaldo Carvalho de Melo int mss = icsk->icsk_ack.rcv_mss; 30391da177e4SLinus Torvalds int free_space = tcp_space(sk); 304086c1a045SFlorian Westphal int allowed_space = tcp_full_space(sk); 3041071c8ed6SFlorian Westphal int full_space, window; 3042071c8ed6SFlorian Westphal 3043071c8ed6SFlorian Westphal if (sk_is_mptcp(sk)) 3044071c8ed6SFlorian Westphal mptcp_space(sk, &free_space, &allowed_space); 3045071c8ed6SFlorian Westphal 3046071c8ed6SFlorian Westphal full_space = min_t(int, tp->window_clamp, allowed_space); 30471da177e4SLinus Torvalds 304806425c30SEric Dumazet if (unlikely(mss > full_space)) { 30491da177e4SLinus Torvalds mss = full_space; 305006425c30SEric Dumazet if (mss <= 0) 305106425c30SEric Dumazet return 0; 305206425c30SEric Dumazet } 3053b650d953Smfreemon@cloudflare.com 3054b650d953Smfreemon@cloudflare.com /* Only allow window shrink if the sysctl is enabled and we have 3055b650d953Smfreemon@cloudflare.com * a non-zero scaling factor in effect. 3056b650d953Smfreemon@cloudflare.com */ 3057b650d953Smfreemon@cloudflare.com if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) 3058b650d953Smfreemon@cloudflare.com goto shrink_window_allowed; 3059b650d953Smfreemon@cloudflare.com 3060b650d953Smfreemon@cloudflare.com /* do not allow window to shrink */ 3061b650d953Smfreemon@cloudflare.com 3062b92edbe0SEric Dumazet if (free_space < (full_space >> 1)) { 3063463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 30641da177e4SLinus Torvalds 3065b8da51ebSEric Dumazet if (tcp_under_memory_pressure(sk)) 3066053f3684SWei Wang tcp_adjust_rcv_ssthresh(sk); 30671da177e4SLinus Torvalds 306886c1a045SFlorian Westphal /* free_space might become our new window, make sure we don't 306986c1a045SFlorian Westphal * increase it due to wscale. 307086c1a045SFlorian Westphal */ 307186c1a045SFlorian Westphal free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 307286c1a045SFlorian Westphal 307386c1a045SFlorian Westphal /* if free space is less than mss estimate, or is below 1/16th 307486c1a045SFlorian Westphal * of the maximum allowed, try to move to zero-window, else 307586c1a045SFlorian Westphal * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and 307686c1a045SFlorian Westphal * new incoming data is dropped due to memory limits. 307786c1a045SFlorian Westphal * With large window, mss test triggers way too late in order 307886c1a045SFlorian Westphal * to announce zero window in time before rmem limit kicks in. 307986c1a045SFlorian Westphal */ 308086c1a045SFlorian Westphal if (free_space < (allowed_space >> 4) || free_space < mss) 30811da177e4SLinus Torvalds return 0; 30821da177e4SLinus Torvalds } 30831da177e4SLinus Torvalds 30841da177e4SLinus Torvalds if (free_space > tp->rcv_ssthresh) 30851da177e4SLinus Torvalds free_space = tp->rcv_ssthresh; 30861da177e4SLinus Torvalds 30871da177e4SLinus Torvalds /* Don't do rounding if we are using window scaling, since the 30881da177e4SLinus Torvalds * scaled window will not line up with the MSS boundary anyway. 30891da177e4SLinus Torvalds */ 30901da177e4SLinus Torvalds if (tp->rx_opt.rcv_wscale) { 30911da177e4SLinus Torvalds window = free_space; 30921da177e4SLinus Torvalds 30931da177e4SLinus Torvalds /* Advertise enough space so that it won't get scaled away. 30941da177e4SLinus Torvalds * Import case: prevent zero window announcement if 30951da177e4SLinus Torvalds * 1<<rcv_wscale > mss. 30961da177e4SLinus Torvalds */ 30971935299dSGao Feng window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); 30981da177e4SLinus Torvalds } else { 30991935299dSGao Feng window = tp->rcv_wnd; 31001da177e4SLinus Torvalds /* Get the largest window that is a nice multiple of mss. 31011da177e4SLinus Torvalds * Window clamp already applied above. 31021da177e4SLinus Torvalds * If our current window offering is within 1 mss of the 31031da177e4SLinus Torvalds * free space we just keep it. This prevents the divide 31041da177e4SLinus Torvalds * and multiply from happening most of the time. 31051da177e4SLinus Torvalds * We also don't do any window rounding when the free space 31061da177e4SLinus Torvalds * is too small. 31071da177e4SLinus Torvalds */ 31081da177e4SLinus Torvalds if (window <= free_space - mss || window > free_space) 31091935299dSGao Feng window = rounddown(free_space, mss); 311084565070SJohn Heffner else if (mss == full_space && 3111b92edbe0SEric Dumazet free_space > window + (full_space >> 1)) 311284565070SJohn Heffner window = free_space; 31131da177e4SLinus Torvalds } 31141da177e4SLinus Torvalds 31151da177e4SLinus Torvalds return window; 3116b650d953Smfreemon@cloudflare.com 3117b650d953Smfreemon@cloudflare.com shrink_window_allowed: 3118b650d953Smfreemon@cloudflare.com /* new window should always be an exact multiple of scaling factor */ 3119b650d953Smfreemon@cloudflare.com free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 3120b650d953Smfreemon@cloudflare.com 3121b650d953Smfreemon@cloudflare.com if (free_space < (full_space >> 1)) { 3122b650d953Smfreemon@cloudflare.com icsk->icsk_ack.quick = 0; 3123b650d953Smfreemon@cloudflare.com 3124b650d953Smfreemon@cloudflare.com if (tcp_under_memory_pressure(sk)) 3125b650d953Smfreemon@cloudflare.com tcp_adjust_rcv_ssthresh(sk); 3126b650d953Smfreemon@cloudflare.com 3127b650d953Smfreemon@cloudflare.com /* if free space is too low, return a zero window */ 3128b650d953Smfreemon@cloudflare.com if (free_space < (allowed_space >> 4) || free_space < mss || 3129b650d953Smfreemon@cloudflare.com free_space < (1 << tp->rx_opt.rcv_wscale)) 3130b650d953Smfreemon@cloudflare.com return 0; 3131b650d953Smfreemon@cloudflare.com } 3132b650d953Smfreemon@cloudflare.com 3133b650d953Smfreemon@cloudflare.com if (free_space > tp->rcv_ssthresh) { 3134b650d953Smfreemon@cloudflare.com free_space = tp->rcv_ssthresh; 3135b650d953Smfreemon@cloudflare.com /* new window should always be an exact multiple of scaling factor 3136b650d953Smfreemon@cloudflare.com * 3137b650d953Smfreemon@cloudflare.com * For this case, we ALIGN "up" (increase free_space) because 3138b650d953Smfreemon@cloudflare.com * we know free_space is not zero here, it has been reduced from 3139b650d953Smfreemon@cloudflare.com * the memory-based limit, and rcv_ssthresh is not a hard limit 3140b650d953Smfreemon@cloudflare.com * (unlike sk_rcvbuf). 3141b650d953Smfreemon@cloudflare.com */ 3142b650d953Smfreemon@cloudflare.com free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); 3143b650d953Smfreemon@cloudflare.com } 3144b650d953Smfreemon@cloudflare.com 3145b650d953Smfreemon@cloudflare.com return free_space; 31461da177e4SLinus Torvalds } 31471da177e4SLinus Torvalds 3148cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb, 3149082ac2d5SMartin KaFai Lau const struct sk_buff *next_skb) 3150082ac2d5SMartin KaFai Lau { 31510a2cf20cSSoheil Hassas Yeganeh if (unlikely(tcp_has_tx_tstamp(next_skb))) { 31520a2cf20cSSoheil Hassas Yeganeh const struct skb_shared_info *next_shinfo = 31530a2cf20cSSoheil Hassas Yeganeh skb_shinfo(next_skb); 3154082ac2d5SMartin KaFai Lau struct skb_shared_info *shinfo = skb_shinfo(skb); 3155082ac2d5SMartin KaFai Lau 31560a2cf20cSSoheil Hassas Yeganeh shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 3157082ac2d5SMartin KaFai Lau shinfo->tskey = next_shinfo->tskey; 31582de8023eSMartin KaFai Lau TCP_SKB_CB(skb)->txstamp_ack |= 31592de8023eSMartin KaFai Lau TCP_SKB_CB(next_skb)->txstamp_ack; 3160082ac2d5SMartin KaFai Lau } 3161082ac2d5SMartin KaFai Lau } 3162082ac2d5SMartin KaFai Lau 31634a17fc3aSIlpo Järvinen /* Collapses two adjacent SKB's during retransmission. */ 3164f8071cdeSEric Dumazet static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 31651da177e4SLinus Torvalds { 31661da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 316775c119afSEric Dumazet struct sk_buff *next_skb = skb_rb_next(skb); 316813dde04fSWei Yongjun int next_skb_size; 31691da177e4SLinus Torvalds 3170058dc334SIlpo Järvinen next_skb_size = next_skb->len; 31711da177e4SLinus Torvalds 3172058dc334SIlpo Järvinen BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 31731da177e4SLinus Torvalds 3174bd446314SEric Dumazet if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size)) 3175f8071cdeSEric Dumazet return false; 3176bd446314SEric Dumazet 31772b7cda9cSEric Dumazet tcp_highest_sack_replace(sk, next_skb, skb); 3178a6963a6bSIlpo Järvinen 31791da177e4SLinus Torvalds /* Update sequence range on original skb. */ 31801da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 31811da177e4SLinus Torvalds 3182e6c7d085SIlpo Järvinen /* Merge over control information. This moves PSH/FIN etc. over */ 31834de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 31841da177e4SLinus Torvalds 31851da177e4SLinus Torvalds /* All done, get rid of second SKB and account for it so 31861da177e4SLinus Torvalds * packet counting does not break. 31871da177e4SLinus Torvalds */ 31884828e7f4SIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 3189a643b5d4SMartin KaFai Lau TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; 3190b7689205SIlpo Järvinen 3191b7689205SIlpo Järvinen /* changed transmit queue under us so clear hints */ 3192ef9da47cSIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 3193ef9da47cSIlpo Järvinen if (next_skb == tp->retransmit_skb_hint) 3194ef9da47cSIlpo Järvinen tp->retransmit_skb_hint = skb; 3195b7689205SIlpo Järvinen 3196797108d1SIlpo Järvinen tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 3197797108d1SIlpo Järvinen 3198082ac2d5SMartin KaFai Lau tcp_skb_collapse_tstamp(skb, next_skb); 3199082ac2d5SMartin KaFai Lau 320075c119afSEric Dumazet tcp_rtx_queue_unlink_and_free(next_skb, sk); 3201f8071cdeSEric Dumazet return true; 32021da177e4SLinus Torvalds } 32031da177e4SLinus Torvalds 320467edfef7SAndi Kleen /* Check if coalescing SKBs is legal. */ 3205a2a385d6SEric Dumazet static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 32064a17fc3aSIlpo Järvinen { 32074a17fc3aSIlpo Järvinen if (tcp_skb_pcount(skb) > 1) 3208a2a385d6SEric Dumazet return false; 32094a17fc3aSIlpo Järvinen if (skb_cloned(skb)) 3210a2a385d6SEric Dumazet return false; 32112331ccc5SEric Dumazet /* Some heuristics for collapsing over SACK'd could be invented */ 32124a17fc3aSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 3213a2a385d6SEric Dumazet return false; 32144a17fc3aSIlpo Järvinen 3215a2a385d6SEric Dumazet return true; 32164a17fc3aSIlpo Järvinen } 32174a17fc3aSIlpo Järvinen 321867edfef7SAndi Kleen /* Collapse packets in the retransmit queue to make to create 321967edfef7SAndi Kleen * less packets on the wire. This is only done on retransmission. 322067edfef7SAndi Kleen */ 32214a17fc3aSIlpo Järvinen static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 32224a17fc3aSIlpo Järvinen int space) 32234a17fc3aSIlpo Järvinen { 32244a17fc3aSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 32254a17fc3aSIlpo Järvinen struct sk_buff *skb = to, *tmp; 3226a2a385d6SEric Dumazet bool first = true; 32274a17fc3aSIlpo Järvinen 32281a63cb91SKuniyuki Iwashima if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) 32294a17fc3aSIlpo Järvinen return; 32304de075e0SEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 32314a17fc3aSIlpo Järvinen return; 32324a17fc3aSIlpo Järvinen 323375c119afSEric Dumazet skb_rbtree_walk_from_safe(skb, tmp) { 32344a17fc3aSIlpo Järvinen if (!tcp_can_collapse(sk, skb)) 32354a17fc3aSIlpo Järvinen break; 32364a17fc3aSIlpo Järvinen 323785712484SMat Martineau if (!tcp_skb_can_collapse(to, skb)) 3238a643b5d4SMartin KaFai Lau break; 3239a643b5d4SMartin KaFai Lau 32404a17fc3aSIlpo Järvinen space -= skb->len; 32414a17fc3aSIlpo Järvinen 32424a17fc3aSIlpo Järvinen if (first) { 3243a2a385d6SEric Dumazet first = false; 32444a17fc3aSIlpo Järvinen continue; 32454a17fc3aSIlpo Järvinen } 32464a17fc3aSIlpo Järvinen 32474a17fc3aSIlpo Järvinen if (space < 0) 32484a17fc3aSIlpo Järvinen break; 32494a17fc3aSIlpo Järvinen 32504a17fc3aSIlpo Järvinen if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 32514a17fc3aSIlpo Järvinen break; 32524a17fc3aSIlpo Järvinen 3253f8071cdeSEric Dumazet if (!tcp_collapse_retrans(sk, to)) 3254f8071cdeSEric Dumazet break; 32554a17fc3aSIlpo Järvinen } 32564a17fc3aSIlpo Järvinen } 32574a17fc3aSIlpo Järvinen 32581da177e4SLinus Torvalds /* This retransmits one SKB. Policy decisions and retransmit queue 32591da177e4SLinus Torvalds * state updates are done by the caller. Returns non-zero if an 32601da177e4SLinus Torvalds * error occurred which prevented the send. 32611da177e4SLinus Torvalds */ 326210d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 32631da177e4SLinus Torvalds { 32645d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 326510d3be56SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 32667d227cd2SSridhar Samudrala unsigned int cur_mss; 326710d3be56SEric Dumazet int diff, len, err; 3268536a6c8eSYonglong Li int avail_wnd; 326910d3be56SEric Dumazet 327010d3be56SEric Dumazet /* Inconclusive MTU probe */ 327110d3be56SEric Dumazet if (icsk->icsk_mtup.probe_size) 32725d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 32735d424d5aSJohn Heffner 32741f3279aeSEric Dumazet if (skb_still_in_host_queue(sk, skb)) 32751f3279aeSEric Dumazet return -EBUSY; 32761f3279aeSEric Dumazet 32771da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 32787f582b24SEric Dumazet if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { 32797f582b24SEric Dumazet WARN_ON_ONCE(1); 32807f582b24SEric Dumazet return -EINVAL; 32817f582b24SEric Dumazet } 32821da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 32831da177e4SLinus Torvalds return -ENOMEM; 32841da177e4SLinus Torvalds } 32851da177e4SLinus Torvalds 32867d227cd2SSridhar Samudrala if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 32877d227cd2SSridhar Samudrala return -EHOSTUNREACH; /* Routing failure or similar. */ 32887d227cd2SSridhar Samudrala 32890c54b85fSIlpo Järvinen cur_mss = tcp_current_mss(sk); 3290536a6c8eSYonglong Li avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 32917d227cd2SSridhar Samudrala 32921da177e4SLinus Torvalds /* If receiver has shrunk his window, and skb is out of 32931da177e4SLinus Torvalds * new window, do not retransmit it. The exception is the 32941da177e4SLinus Torvalds * case, when window is shrunk to zero. In this case 3295536a6c8eSYonglong Li * our retransmit of one segment serves as a zero window probe. 32961da177e4SLinus Torvalds */ 3297536a6c8eSYonglong Li if (avail_wnd <= 0) { 3298536a6c8eSYonglong Li if (TCP_SKB_CB(skb)->seq != tp->snd_una) 32991da177e4SLinus Torvalds return -EAGAIN; 3300536a6c8eSYonglong Li avail_wnd = cur_mss; 3301536a6c8eSYonglong Li } 33021da177e4SLinus Torvalds 330310d3be56SEric Dumazet len = cur_mss * segs; 3304536a6c8eSYonglong Li if (len > avail_wnd) { 3305536a6c8eSYonglong Li len = rounddown(avail_wnd, cur_mss); 3306536a6c8eSYonglong Li if (!len) 3307536a6c8eSYonglong Li len = avail_wnd; 3308536a6c8eSYonglong Li } 330910d3be56SEric Dumazet if (skb->len > len) { 331075c119afSEric Dumazet if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, 331175c119afSEric Dumazet cur_mss, GFP_ATOMIC)) 33121da177e4SLinus Torvalds return -ENOMEM; /* We'll try again later. */ 331302276f3cSIlpo Järvinen } else { 3314c4777efaSEric Dumazet if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) 3315c52e2421SEric Dumazet return -ENOMEM; 331610d3be56SEric Dumazet 331710d3be56SEric Dumazet diff = tcp_skb_pcount(skb); 331810d3be56SEric Dumazet tcp_set_skb_tso_segs(skb, cur_mss); 331910d3be56SEric Dumazet diff -= tcp_skb_pcount(skb); 332010d3be56SEric Dumazet if (diff) 332110d3be56SEric Dumazet tcp_adjust_pcount(sk, skb, diff); 3322536a6c8eSYonglong Li avail_wnd = min_t(int, avail_wnd, cur_mss); 3323536a6c8eSYonglong Li if (skb->len < avail_wnd) 3324536a6c8eSYonglong Li tcp_retrans_try_collapse(sk, skb, avail_wnd); 33251da177e4SLinus Torvalds } 33261da177e4SLinus Torvalds 332749213555SDaniel Borkmann /* RFC3168, section 6.1.1.1. ECN fallback */ 332849213555SDaniel Borkmann if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) 332949213555SDaniel Borkmann tcp_ecn_clear_syn(sk, skb); 333049213555SDaniel Borkmann 3331678550c6SYuchung Cheng /* Update global and local TCP statistics. */ 3332678550c6SYuchung Cheng segs = tcp_skb_pcount(skb); 3333678550c6SYuchung Cheng TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); 3334678550c6SYuchung Cheng if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 3335678550c6SYuchung Cheng __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 3336678550c6SYuchung Cheng tp->total_retrans += segs; 3337fb31c9b9SWei Wang tp->bytes_retrans += skb->len; 3338678550c6SYuchung Cheng 333950bceae9SThomas Graf /* make sure skb->data is aligned on arches that require it 334050bceae9SThomas Graf * and check if ack-trimming & collapsing extended the headroom 334150bceae9SThomas Graf * beyond what csum_start can cover. 334250bceae9SThomas Graf */ 334350bceae9SThomas Graf if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 334450bceae9SThomas Graf skb_headroom(skb) >= 0xFFFF)) { 334510a81980SEric Dumazet struct sk_buff *nskb; 334610a81980SEric Dumazet 3347e2080072SEric Dumazet tcp_skb_tsorted_save(skb) { 334810a81980SEric Dumazet nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 334907f8e4d0SFlorian Westphal if (nskb) { 335007f8e4d0SFlorian Westphal nskb->dev = NULL; 335107f8e4d0SFlorian Westphal err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC); 335207f8e4d0SFlorian Westphal } else { 335307f8e4d0SFlorian Westphal err = -ENOBUFS; 335407f8e4d0SFlorian Westphal } 3355e2080072SEric Dumazet } tcp_skb_tsorted_restore(skb); 3356e2080072SEric Dumazet 33575889e2c0SYousuk Seung if (!err) { 3358a7a25630SEric Dumazet tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); 33595889e2c0SYousuk Seung tcp_rate_skb_sent(sk, skb); 33605889e2c0SYousuk Seung } 3361117632e6SEric Dumazet } else { 3362c84a5711SYuchung Cheng err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3363117632e6SEric Dumazet } 3364c84a5711SYuchung Cheng 33657f12422cSYuchung Cheng /* To avoid taking spuriously low RTT samples based on a timestamp 33667f12422cSYuchung Cheng * for a transmit that never happened, always mark EVER_RETRANS 33677f12422cSYuchung Cheng */ 33687f12422cSYuchung Cheng TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 33697f12422cSYuchung Cheng 3370a31ad29eSLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) 3371a31ad29eSLawrence Brakmo tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, 3372a31ad29eSLawrence Brakmo TCP_SKB_CB(skb)->seq, segs, err); 3373a31ad29eSLawrence Brakmo 3374fc9f3501SEric Dumazet if (likely(!err)) { 3375e086101bSCong Wang trace_tcp_retransmit_skb(sk, skb); 3376678550c6SYuchung Cheng } else if (err != -EBUSY) { 3377ec641b39SYuchung Cheng NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); 3378fc9f3501SEric Dumazet } 3379c84a5711SYuchung Cheng return err; 338093b174adSYuchung Cheng } 338193b174adSYuchung Cheng 338210d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 338393b174adSYuchung Cheng { 338493b174adSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 338510d3be56SEric Dumazet int err = __tcp_retransmit_skb(sk, skb, segs); 33861da177e4SLinus Torvalds 33871da177e4SLinus Torvalds if (err == 0) { 33881da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 33891da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 3390e87cc472SJoe Perches net_dbg_ratelimited("retrans_out leaked\n"); 33911da177e4SLinus Torvalds } 33921da177e4SLinus Torvalds #endif 33931da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 33941da177e4SLinus Torvalds tp->retrans_out += tcp_skb_pcount(skb); 33957ae18975SYuchung Cheng } 33961da177e4SLinus Torvalds 33977ae18975SYuchung Cheng /* Save stamp of the first (attempted) retransmit. */ 33981da177e4SLinus Torvalds if (!tp->retrans_stamp) 3399614e8316SEric Dumazet tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb); 34001da177e4SLinus Torvalds 34016e08d5e3SYuchung Cheng if (tp->undo_retrans < 0) 34026e08d5e3SYuchung Cheng tp->undo_retrans = 0; 34036e08d5e3SYuchung Cheng tp->undo_retrans += tcp_skb_pcount(skb); 34041da177e4SLinus Torvalds return err; 34051da177e4SLinus Torvalds } 34061da177e4SLinus Torvalds 34071da177e4SLinus Torvalds /* This gets called after a retransmit timeout, and the initially 34081da177e4SLinus Torvalds * retransmitted data is acknowledged. It tries to continue 34091da177e4SLinus Torvalds * resending the rest of the retransmit queue, until either 34101da177e4SLinus Torvalds * we've sent it all or the congestion window limit is reached. 34111da177e4SLinus Torvalds */ 34121da177e4SLinus Torvalds void tcp_xmit_retransmit_queue(struct sock *sk) 34131da177e4SLinus Torvalds { 34146687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 3415b9f1f1ceSEric Dumazet struct sk_buff *skb, *rtx_head, *hole = NULL; 34161da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3417916e6d1aSEric Dumazet bool rearm_timer = false; 3418840a3cbeSYuchung Cheng u32 max_segs; 341961eb55f4SIlpo Järvinen int mib_idx; 34206a438bbeSStephen Hemminger 342145e77d31SIlpo Järvinen if (!tp->packets_out) 342245e77d31SIlpo Järvinen return; 342345e77d31SIlpo Järvinen 342475c119afSEric Dumazet rtx_head = tcp_rtx_queue_head(sk); 3425b9f1f1ceSEric Dumazet skb = tp->retransmit_skb_hint ?: rtx_head; 3426ed6e7268SNeal Cardwell max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); 342775c119afSEric Dumazet skb_rbtree_walk_from(skb) { 3428dca0aaf8SEric Dumazet __u8 sacked; 342910d3be56SEric Dumazet int segs; 34301da177e4SLinus Torvalds 3431218af599SEric Dumazet if (tcp_pacing_check(sk)) 3432218af599SEric Dumazet break; 3433218af599SEric Dumazet 34346a438bbeSStephen Hemminger /* we could do better than to assign each time */ 343551456b29SIan Morris if (!hole) 34366a438bbeSStephen Hemminger tp->retransmit_skb_hint = skb; 34376a438bbeSStephen Hemminger 343840570375SEric Dumazet segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); 343910d3be56SEric Dumazet if (segs <= 0) 3440916e6d1aSEric Dumazet break; 3441dca0aaf8SEric Dumazet sacked = TCP_SKB_CB(skb)->sacked; 3442a3d2e9f8SEric Dumazet /* In case tcp_shift_skb_data() have aggregated large skbs, 3443a3d2e9f8SEric Dumazet * we need to make sure not sending too bigs TSO packets 3444a3d2e9f8SEric Dumazet */ 3445a3d2e9f8SEric Dumazet segs = min_t(int, segs, max_segs); 34460e1c54c2SIlpo Järvinen 3447840a3cbeSYuchung Cheng if (tp->retrans_out >= tp->lost_out) { 3448006f582cSIlpo Järvinen break; 34490e1c54c2SIlpo Järvinen } else if (!(sacked & TCPCB_LOST)) { 345051456b29SIan Morris if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 34510e1c54c2SIlpo Järvinen hole = skb; 345261eb55f4SIlpo Järvinen continue; 34531da177e4SLinus Torvalds 34540e1c54c2SIlpo Järvinen } else { 34550e1c54c2SIlpo Järvinen if (icsk->icsk_ca_state != TCP_CA_Loss) 34560e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPFASTRETRANS; 34570e1c54c2SIlpo Järvinen else 34580e1c54c2SIlpo Järvinen mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 34590e1c54c2SIlpo Järvinen } 34600e1c54c2SIlpo Järvinen 34610e1c54c2SIlpo Järvinen if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 346261eb55f4SIlpo Järvinen continue; 346340b215e5SPavel Emelyanov 3464f9616c35SEric Dumazet if (tcp_small_queue_check(sk, skb, 1)) 3465916e6d1aSEric Dumazet break; 3466f9616c35SEric Dumazet 346710d3be56SEric Dumazet if (tcp_retransmit_skb(sk, skb, segs)) 3468916e6d1aSEric Dumazet break; 346924ab6becSYuchung Cheng 3470de1d6578SYuchung Cheng NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); 34711da177e4SLinus Torvalds 3472684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 3473a262f0cdSNandita Dukkipati tp->prr_out += tcp_skb_pcount(skb); 3474a262f0cdSNandita Dukkipati 347575c119afSEric Dumazet if (skb == rtx_head && 347657dde7f7SYuchung Cheng icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) 3477916e6d1aSEric Dumazet rearm_timer = true; 3478916e6d1aSEric Dumazet 3479916e6d1aSEric Dumazet } 3480916e6d1aSEric Dumazet if (rearm_timer) 34813f80e08fSEric Dumazet tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 34823f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, 34838dc242adSEric Dumazet TCP_RTO_MAX); 34841da177e4SLinus Torvalds } 34851da177e4SLinus Torvalds 3486d83769a5SEric Dumazet /* We allow to exceed memory limits for FIN packets to expedite 3487d83769a5SEric Dumazet * connection tear down and (memory) recovery. 3488845704a5SEric Dumazet * Otherwise tcp_send_fin() could be tempted to either delay FIN 3489845704a5SEric Dumazet * or even be forced to close flow without any FIN. 3490a6c5ea4cSEric Dumazet * In general, we want to allow one skb per socket to avoid hangs 3491a6c5ea4cSEric Dumazet * with edge trigger epoll() 3492d83769a5SEric Dumazet */ 3493a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size) 3494d83769a5SEric Dumazet { 3495c4ee1185SEric Dumazet int delta, amt; 3496d83769a5SEric Dumazet 3497c4ee1185SEric Dumazet delta = size - sk->sk_forward_alloc; 3498c4ee1185SEric Dumazet if (delta <= 0) 3499d83769a5SEric Dumazet return; 3500c4ee1185SEric Dumazet amt = sk_mem_pages(delta); 35015e6300e7SEric Dumazet sk_forward_alloc_add(sk, amt << PAGE_SHIFT); 3502e805605cSJohannes Weiner sk_memory_allocated_add(sk, amt); 3503e805605cSJohannes Weiner 3504baac50bbSJohannes Weiner if (mem_cgroup_sockets_enabled && sk->sk_memcg) 35054b1327beSWei Wang mem_cgroup_charge_skmem(sk->sk_memcg, amt, 35064b1327beSWei Wang gfp_memcg_charge() | __GFP_NOFAIL); 3507d83769a5SEric Dumazet } 3508d83769a5SEric Dumazet 3509845704a5SEric Dumazet /* Send a FIN. The caller locks the socket for us. 3510845704a5SEric Dumazet * We should try to send a FIN packet really hard, but eventually give up. 35111da177e4SLinus Torvalds */ 35121da177e4SLinus Torvalds void tcp_send_fin(struct sock *sk) 35131da177e4SLinus Torvalds { 3514ee2aabd3SEric Dumazet struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk); 35151da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 35161da177e4SLinus Torvalds 3517845704a5SEric Dumazet /* Optimization, tack on the FIN if we have one skb in write queue and 3518845704a5SEric Dumazet * this skb was not yet sent, or we are under memory pressure. 3519845704a5SEric Dumazet * Note: in the latter case, FIN packet will be sent after a timeout, 3520845704a5SEric Dumazet * as TCP stack thinks it has already been transmitted. 35211da177e4SLinus Torvalds */ 3522ee2aabd3SEric Dumazet tskb = tail; 352375c119afSEric Dumazet if (!tskb && tcp_under_memory_pressure(sk)) 352475c119afSEric Dumazet tskb = skb_rb_last(&sk->tcp_rtx_queue); 352575c119afSEric Dumazet 352675c119afSEric Dumazet if (tskb) { 3527845704a5SEric Dumazet TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; 3528845704a5SEric Dumazet TCP_SKB_CB(tskb)->end_seq++; 35291da177e4SLinus Torvalds tp->write_seq++; 3530ee2aabd3SEric Dumazet if (!tail) { 3531845704a5SEric Dumazet /* This means tskb was already sent. 3532845704a5SEric Dumazet * Pretend we included the FIN on previous transmit. 3533845704a5SEric Dumazet * We need to set tp->snd_nxt to the value it would have 3534845704a5SEric Dumazet * if FIN had been sent. This is because retransmit path 3535845704a5SEric Dumazet * does not change tp->snd_nxt. 3536845704a5SEric Dumazet */ 3537e0d694d6SEric Dumazet WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); 3538845704a5SEric Dumazet return; 3539845704a5SEric Dumazet } 35401da177e4SLinus Torvalds } else { 3541845704a5SEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); 3542d1edc085SColin Ian King if (unlikely(!skb)) 3543845704a5SEric Dumazet return; 3544d1edc085SColin Ian King 3545e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 3546d83769a5SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 3547a6c5ea4cSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 35481da177e4SLinus Torvalds /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 3549e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tp->write_seq, 3550a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_FIN); 35511da177e4SLinus Torvalds tcp_queue_skb(sk, skb); 35521da177e4SLinus Torvalds } 3553845704a5SEric Dumazet __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); 35541da177e4SLinus Torvalds } 35551da177e4SLinus Torvalds 35561da177e4SLinus Torvalds /* We get here when a process closes a file descriptor (either due to 35571da177e4SLinus Torvalds * an explicit close() or as a byproduct of exit()'ing) and there 35581da177e4SLinus Torvalds * was unread data in the receive queue. This behavior is recommended 355965bb723cSGerrit Renker * by RFC 2525, section 2.17. -DaveM 35601da177e4SLinus Torvalds */ 3561dd0fc66fSAl Viro void tcp_send_active_reset(struct sock *sk, gfp_t priority) 35621da177e4SLinus Torvalds { 35631da177e4SLinus Torvalds struct sk_buff *skb; 35641da177e4SLinus Torvalds 35657cc2b043SGao Feng TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 35667cc2b043SGao Feng 35671da177e4SLinus Torvalds /* NOTE: No TCP options attached and we never retransmit this. */ 35681da177e4SLinus Torvalds skb = alloc_skb(MAX_TCP_HEADER, priority); 35691da177e4SLinus Torvalds if (!skb) { 35704e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 35711da177e4SLinus Torvalds return; 35721da177e4SLinus Torvalds } 35731da177e4SLinus Torvalds 35741da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 35751da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 3576e870a8efSIlpo Järvinen tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 3577a3433f35SChangli Gao TCPHDR_ACK | TCPHDR_RST); 35789a568de4SEric Dumazet tcp_mstamp_refresh(tcp_sk(sk)); 35791da177e4SLinus Torvalds /* Send it off. */ 3580dfb4b9dcSDavid S. Miller if (tcp_transmit_skb(sk, skb, 0, priority)) 35814e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3582c24b14c4SSong Liu 3583c24b14c4SSong Liu /* skb of trace_tcp_send_reset() keeps the skb that caused RST, 3584c24b14c4SSong Liu * skb here is different to the troublesome skb, so use NULL 3585c24b14c4SSong Liu */ 3586c24b14c4SSong Liu trace_tcp_send_reset(sk, NULL); 35871da177e4SLinus Torvalds } 35881da177e4SLinus Torvalds 358967edfef7SAndi Kleen /* Send a crossed SYN-ACK during socket establishment. 359067edfef7SAndi Kleen * WARNING: This routine must only be called when we have already sent 35911da177e4SLinus Torvalds * a SYN packet that crossed the incoming SYN that caused this routine 35921da177e4SLinus Torvalds * to get called. If this assumption fails then the initial rcv_wnd 35931da177e4SLinus Torvalds * and rcv_wscale values will not be correct. 35941da177e4SLinus Torvalds */ 35951da177e4SLinus Torvalds int tcp_send_synack(struct sock *sk) 35961da177e4SLinus Torvalds { 35971da177e4SLinus Torvalds struct sk_buff *skb; 35981da177e4SLinus Torvalds 359975c119afSEric Dumazet skb = tcp_rtx_queue_head(sk); 360051456b29SIan Morris if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 360175c119afSEric Dumazet pr_err("%s: wrong queue state\n", __func__); 36021da177e4SLinus Torvalds return -EFAULT; 36031da177e4SLinus Torvalds } 36044de075e0SEric Dumazet if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 36051da177e4SLinus Torvalds if (skb_cloned(skb)) { 3606e2080072SEric Dumazet struct sk_buff *nskb; 3607e2080072SEric Dumazet 3608e2080072SEric Dumazet tcp_skb_tsorted_save(skb) { 3609e2080072SEric Dumazet nskb = skb_copy(skb, GFP_ATOMIC); 3610e2080072SEric Dumazet } tcp_skb_tsorted_restore(skb); 361151456b29SIan Morris if (!nskb) 36121da177e4SLinus Torvalds return -ENOMEM; 3613e2080072SEric Dumazet INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); 36142bec445fSEric Dumazet tcp_highest_sack_replace(sk, skb, nskb); 361575c119afSEric Dumazet tcp_rtx_queue_unlink_and_free(skb, sk); 3616f4a775d1SEric Dumazet __skb_header_release(nskb); 361775c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); 3618ab4e846aSEric Dumazet sk_wmem_queued_add(sk, nskb->truesize); 36193ab224beSHideo Aoki sk_mem_charge(sk, nskb->truesize); 36201da177e4SLinus Torvalds skb = nskb; 36211da177e4SLinus Torvalds } 36221da177e4SLinus Torvalds 36234de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 3624735d3831SFlorian Westphal tcp_ecn_send_synack(sk, skb); 36251da177e4SLinus Torvalds } 3626dfb4b9dcSDavid S. Miller return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 36271da177e4SLinus Torvalds } 36281da177e4SLinus Torvalds 36294aea39c1SEric Dumazet /** 3630331fca43SMartin KaFai Lau * tcp_make_synack - Allocate one skb and build a SYNACK packet. 3631331fca43SMartin KaFai Lau * @sk: listener socket 3632331fca43SMartin KaFai Lau * @dst: dst entry attached to the SYNACK. It is consumed and caller 3633331fca43SMartin KaFai Lau * should not use it again. 3634331fca43SMartin KaFai Lau * @req: request_sock pointer 3635331fca43SMartin KaFai Lau * @foc: cookie for tcp fast open 3636331fca43SMartin KaFai Lau * @synack_type: Type of synack to prepare 3637331fca43SMartin KaFai Lau * @syn_skb: SYN packet just received. It could be NULL for rtx case. 36384aea39c1SEric Dumazet */ 36395d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 3640e6b4d113SWilliam Allen Simpson struct request_sock *req, 3641ca6fb065SEric Dumazet struct tcp_fastopen_cookie *foc, 3642331fca43SMartin KaFai Lau enum tcp_synack_type synack_type, 3643331fca43SMartin KaFai Lau struct sk_buff *syn_skb) 36441da177e4SLinus Torvalds { 36452e6599cbSArnaldo Carvalho de Melo struct inet_request_sock *ireq = inet_rsk(req); 36465d062de7SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 364780f03e27SEric Dumazet struct tcp_md5sig_key *md5 = NULL; 36485d062de7SEric Dumazet struct tcp_out_options opts; 36491e03d32bSDmitry Safonov struct tcp_key key = {}; 36505d062de7SEric Dumazet struct sk_buff *skb; 3651bd0388aeSWilliam Allen Simpson int tcp_header_size; 36525d062de7SEric Dumazet struct tcphdr *th; 3653f5fff5dcSTom Quetchenbach int mss; 3654a842fe14SEric Dumazet u64 now; 36551da177e4SLinus Torvalds 3656ca6fb065SEric Dumazet skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 36574aea39c1SEric Dumazet if (unlikely(!skb)) { 36584aea39c1SEric Dumazet dst_release(dst); 36591da177e4SLinus Torvalds return NULL; 36604aea39c1SEric Dumazet } 36611da177e4SLinus Torvalds /* Reserve space for headers. */ 36621da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 36631da177e4SLinus Torvalds 3664b3d05147SEric Dumazet switch (synack_type) { 3665b3d05147SEric Dumazet case TCP_SYNACK_NORMAL: 36669e17f8a4SEric Dumazet skb_set_owner_w(skb, req_to_sk(req)); 3667b3d05147SEric Dumazet break; 3668b3d05147SEric Dumazet case TCP_SYNACK_COOKIE: 3669b3d05147SEric Dumazet /* Under synflood, we do not attach skb to a socket, 3670b3d05147SEric Dumazet * to avoid false sharing. 3671b3d05147SEric Dumazet */ 3672b3d05147SEric Dumazet break; 3673b3d05147SEric Dumazet case TCP_SYNACK_FASTOPEN: 3674ca6fb065SEric Dumazet /* sk is a const pointer, because we want to express multiple 3675ca6fb065SEric Dumazet * cpu might call us concurrently. 3676ca6fb065SEric Dumazet * sk->sk_wmem_alloc in an atomic, we can promote to rw. 3677ca6fb065SEric Dumazet */ 3678ca6fb065SEric Dumazet skb_set_owner_w(skb, (struct sock *)sk); 3679b3d05147SEric Dumazet break; 3680ca6fb065SEric Dumazet } 36814aea39c1SEric Dumazet skb_dst_set(skb, dst); 36821da177e4SLinus Torvalds 36833541f9e8SEric Dumazet mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3684f5fff5dcSTom Quetchenbach 368533ad798cSAdam Langley memset(&opts, 0, sizeof(opts)); 3686614e8316SEric Dumazet if (tcp_rsk(req)->req_usec_ts < 0) 3687614e8316SEric Dumazet tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst); 3688a842fe14SEric Dumazet now = tcp_clock_ns(); 36898b5f12d0SFlorian Westphal #ifdef CONFIG_SYN_COOKIES 3690f8ace8d9SFlorian Westphal if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) 3691a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, cookie_init_timestamp(req, now), 3692a1ac9c8aSMartin KaFai Lau true); 36938b5f12d0SFlorian Westphal else 36948b5f12d0SFlorian Westphal #endif 36959e450c1eSYuchung Cheng { 3696a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, now, true); 36979e450c1eSYuchung Cheng if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ 36989e450c1eSYuchung Cheng tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); 36999e450c1eSYuchung Cheng } 370080f03e27SEric Dumazet 370180f03e27SEric Dumazet #ifdef CONFIG_TCP_MD5SIG 370280f03e27SEric Dumazet rcu_read_lock(); 3703fd3a154aSEric Dumazet md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 37041e03d32bSDmitry Safonov if (md5) 37051e03d32bSDmitry Safonov key.type = TCP_KEY_MD5; 370680f03e27SEric Dumazet #endif 37075e526552SEric Dumazet skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4); 3708331fca43SMartin KaFai Lau /* bpf program will be interested in the tcp_flags */ 3709331fca43SMartin KaFai Lau TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; 371060e2a778SUrsula Braun tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, 3711331fca43SMartin KaFai Lau foc, synack_type, 3712331fca43SMartin KaFai Lau syn_skb) + sizeof(*th); 371333ad798cSAdam Langley 3714aa8223c7SArnaldo Carvalho de Melo skb_push(skb, tcp_header_size); 3715aa8223c7SArnaldo Carvalho de Melo skb_reset_transport_header(skb); 37161da177e4SLinus Torvalds 3717ea1627c2SEric Dumazet th = (struct tcphdr *)skb->data; 37181da177e4SLinus Torvalds memset(th, 0, sizeof(struct tcphdr)); 37191da177e4SLinus Torvalds th->syn = 1; 37201da177e4SLinus Torvalds th->ack = 1; 37216ac705b1SEric Dumazet tcp_ecn_make_synack(req, th); 3722b44084c2SEric Dumazet th->source = htons(ireq->ir_num); 3723634fb979SEric Dumazet th->dest = ireq->ir_rmt_port; 3724e05a90ecSJamal Hadi Salim skb->mark = ireq->ir_mark; 37253b117750SEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 37263b117750SEric Dumazet th->seq = htonl(tcp_rsk(req)->snt_isn); 37278336886fSJerry Chu /* XXX data is queued and acked as is. No buffer/window check */ 37288336886fSJerry Chu th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 37291da177e4SLinus Torvalds 37301da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 3731ed53d0abSEric Dumazet th->window = htons(min(req->rsk_rcv_wnd, 65535U)); 37321e03d32bSDmitry Safonov tcp_options_write(th, NULL, &opts, &key); 37331da177e4SLinus Torvalds th->doff = (tcp_header_size >> 2); 3734bced3f7dSBreno Leitao TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 3735cfb6eeb4SYOSHIFUJI Hideaki 3736cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3737cfb6eeb4SYOSHIFUJI Hideaki /* Okay, we have all we need - do the md5 hash if needed */ 373880f03e27SEric Dumazet if (md5) 3739bd0388aeSWilliam Allen Simpson tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 374039f8e58eSEric Dumazet md5, req_to_sk(req), skb); 374180f03e27SEric Dumazet rcu_read_unlock(); 3742cfb6eeb4SYOSHIFUJI Hideaki #endif 3743cfb6eeb4SYOSHIFUJI Hideaki 3744331fca43SMartin KaFai Lau bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, 3745331fca43SMartin KaFai Lau synack_type, &opts); 3746331fca43SMartin KaFai Lau 3747a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(skb, now, true); 3748a842fe14SEric Dumazet tcp_add_tx_delay(skb, tp); 3749a842fe14SEric Dumazet 37501da177e4SLinus Torvalds return skb; 37511da177e4SLinus Torvalds } 37524bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_make_synack); 37531da177e4SLinus Torvalds 375481164413SDaniel Borkmann static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 375581164413SDaniel Borkmann { 375681164413SDaniel Borkmann struct inet_connection_sock *icsk = inet_csk(sk); 375781164413SDaniel Borkmann const struct tcp_congestion_ops *ca; 375881164413SDaniel Borkmann u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 375981164413SDaniel Borkmann 376081164413SDaniel Borkmann if (ca_key == TCP_CA_UNSPEC) 376181164413SDaniel Borkmann return; 376281164413SDaniel Borkmann 376381164413SDaniel Borkmann rcu_read_lock(); 376481164413SDaniel Borkmann ca = tcp_ca_find_key(ca_key); 37650baf26b0SMartin KaFai Lau if (likely(ca && bpf_try_module_get(ca, ca->owner))) { 37660baf26b0SMartin KaFai Lau bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); 376781164413SDaniel Borkmann icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 376881164413SDaniel Borkmann icsk->icsk_ca_ops = ca; 376981164413SDaniel Borkmann } 377081164413SDaniel Borkmann rcu_read_unlock(); 377181164413SDaniel Borkmann } 377281164413SDaniel Borkmann 377367edfef7SAndi Kleen /* Do all connect socket setups that can be done AF independent. */ 3774f7e56a76Sstephen hemminger static void tcp_connect_init(struct sock *sk) 37751da177e4SLinus Torvalds { 3776cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 37771da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 37781da177e4SLinus Torvalds __u8 rcv_wscale; 377913d3b1ebSLawrence Brakmo u32 rcv_wnd; 37801da177e4SLinus Torvalds 37811da177e4SLinus Torvalds /* We'll fix this up when we get a response from the other end. 37821da177e4SLinus Torvalds * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 37831da177e4SLinus Torvalds */ 37845d2ed052SEric Dumazet tp->tcp_header_len = sizeof(struct tcphdr); 37853666f666SKuniyuki Iwashima if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) 37865d2ed052SEric Dumazet tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; 37871da177e4SLinus Torvalds 37887c2ffaf2SDmitry Safonov tcp_ao_connect_init(sk); 37897c2ffaf2SDmitry Safonov 37901da177e4SLinus Torvalds /* If user gave his TCP_MAXSEG, record it to clamp */ 37911da177e4SLinus Torvalds if (tp->rx_opt.user_mss) 37921da177e4SLinus Torvalds tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 37931da177e4SLinus Torvalds tp->max_window = 0; 37945d424d5aSJohn Heffner tcp_mtup_init(sk); 37951da177e4SLinus Torvalds tcp_sync_mss(sk, dst_mtu(dst)); 37961da177e4SLinus Torvalds 379781164413SDaniel Borkmann tcp_ca_dst_init(sk, dst); 379881164413SDaniel Borkmann 37991da177e4SLinus Torvalds if (!tp->window_clamp) 38001da177e4SLinus Torvalds tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 38013541f9e8SEric Dumazet tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3802f5fff5dcSTom Quetchenbach 38031da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 38041da177e4SLinus Torvalds 3805e88c64f0SHagen Paul Pfeifer /* limit the window selection if the user enforce a smaller rx buffer */ 3806e88c64f0SHagen Paul Pfeifer if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 3807e88c64f0SHagen Paul Pfeifer (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 3808e88c64f0SHagen Paul Pfeifer tp->window_clamp = tcp_full_space(sk); 3809e88c64f0SHagen Paul Pfeifer 381013d3b1ebSLawrence Brakmo rcv_wnd = tcp_rwnd_init_bpf(sk); 381113d3b1ebSLawrence Brakmo if (rcv_wnd == 0) 381213d3b1ebSLawrence Brakmo rcv_wnd = dst_metric(dst, RTAX_INITRWND); 381313d3b1ebSLawrence Brakmo 3814ceef9ab6SEric Dumazet tcp_select_initial_window(sk, tcp_full_space(sk), 38151da177e4SLinus Torvalds tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 38161da177e4SLinus Torvalds &tp->rcv_wnd, 38171da177e4SLinus Torvalds &tp->window_clamp, 38183666f666SKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), 381931d12926Slaurent chavey &rcv_wscale, 382013d3b1ebSLawrence Brakmo rcv_wnd); 38211da177e4SLinus Torvalds 38221da177e4SLinus Torvalds tp->rx_opt.rcv_wscale = rcv_wscale; 38231da177e4SLinus Torvalds tp->rcv_ssthresh = tp->rcv_wnd; 38241da177e4SLinus Torvalds 3825e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, 0); 38261da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 38271da177e4SLinus Torvalds tp->snd_wnd = 0; 3828ee7537b6SHantzis Fotis tcp_init_wl(tp, 0); 38297f582b24SEric Dumazet tcp_write_queue_purge(sk); 38301da177e4SLinus Torvalds tp->snd_una = tp->write_seq; 38311da177e4SLinus Torvalds tp->snd_sml = tp->write_seq; 383233f5f57eSIlpo Järvinen tp->snd_up = tp->write_seq; 3833e0d694d6SEric Dumazet WRITE_ONCE(tp->snd_nxt, tp->write_seq); 3834ee995283SPavel Emelyanov 3835ee995283SPavel Emelyanov if (likely(!tp->repair)) 38361da177e4SLinus Torvalds tp->rcv_nxt = 0; 3837c7781a6eSAndrew Vagin else 383870eabf0eSEric Dumazet tp->rcv_tstamp = tcp_jiffies32; 3839ee995283SPavel Emelyanov tp->rcv_wup = tp->rcv_nxt; 38407db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 38411da177e4SLinus Torvalds 38428550f328SLawrence Brakmo inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); 3843463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 38441da177e4SLinus Torvalds tcp_clear_retrans(tp); 38451da177e4SLinus Torvalds } 38461da177e4SLinus Torvalds 3847783237e8SYuchung Cheng static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 3848783237e8SYuchung Cheng { 3849783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 3850783237e8SYuchung Cheng struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 3851783237e8SYuchung Cheng 3852783237e8SYuchung Cheng tcb->end_seq += skb->len; 3853f4a775d1SEric Dumazet __skb_header_release(skb); 3854ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 3855783237e8SYuchung Cheng sk_mem_charge(sk, skb->truesize); 38560f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tcb->end_seq); 3857783237e8SYuchung Cheng tp->packets_out += tcp_skb_pcount(skb); 3858783237e8SYuchung Cheng } 3859783237e8SYuchung Cheng 3860783237e8SYuchung Cheng /* Build and send a SYN with data and (cached) Fast Open cookie. However, 3861783237e8SYuchung Cheng * queue a data-only packet after the regular SYN, such that regular SYNs 3862783237e8SYuchung Cheng * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 3863783237e8SYuchung Cheng * only the SYN sequence, the data are retransmitted in the first ACK. 3864783237e8SYuchung Cheng * If cookie is not cached or other error occurs, falls back to send a 3865783237e8SYuchung Cheng * regular SYN with Fast Open cookie request option. 3866783237e8SYuchung Cheng */ 3867783237e8SYuchung Cheng static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 3868783237e8SYuchung Cheng { 3869ed0c99dcSJakub Kicinski struct inet_connection_sock *icsk = inet_csk(sk); 3870783237e8SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 3871783237e8SYuchung Cheng struct tcp_fastopen_request *fo = tp->fastopen_req; 3872fbf93406SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 3873355a901eSEric Dumazet struct sk_buff *syn_data; 3874fbf93406SEric Dumazet int space, err = 0; 3875783237e8SYuchung Cheng 387667da22d2SYuchung Cheng tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 3877065263f4SWei Wang if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) 3878783237e8SYuchung Cheng goto fallback; 3879783237e8SYuchung Cheng 3880783237e8SYuchung Cheng /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 3881783237e8SYuchung Cheng * user-MSS. Reserve maximum option space for middleboxes that add 3882783237e8SYuchung Cheng * private TCP options. The cost is reduced data space in SYN :( 3883783237e8SYuchung Cheng */ 38843541f9e8SEric Dumazet tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); 3885ed0c99dcSJakub Kicinski /* Sync mss_cache after updating the mss_clamp */ 3886ed0c99dcSJakub Kicinski tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 38873541f9e8SEric Dumazet 3888ed0c99dcSJakub Kicinski space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - 3889783237e8SYuchung Cheng MAX_TCP_OPTION_SPACE; 3890783237e8SYuchung Cheng 3891f5ddcbbbSEric Dumazet space = min_t(size_t, space, fo->size); 3892f5ddcbbbSEric Dumazet 3893fbf93406SEric Dumazet if (space && 3894fbf93406SEric Dumazet !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE), 3895fbf93406SEric Dumazet pfrag, sk->sk_allocation)) 3896fbf93406SEric Dumazet goto fallback; 38975882efffSEric Dumazet syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); 3898355a901eSEric Dumazet if (!syn_data) 3899783237e8SYuchung Cheng goto fallback; 3900355a901eSEric Dumazet memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 390107e100f9SEric Dumazet if (space) { 3902fbf93406SEric Dumazet space = min_t(size_t, space, pfrag->size - pfrag->offset); 3903fbf93406SEric Dumazet space = tcp_wmem_schedule(sk, space); 3904fbf93406SEric Dumazet } 3905fbf93406SEric Dumazet if (space) { 3906fbf93406SEric Dumazet space = copy_page_from_iter(pfrag->page, pfrag->offset, 3907fbf93406SEric Dumazet space, &fo->data->msg_iter); 3908fbf93406SEric Dumazet if (unlikely(!space)) { 3909ba233b34SEric Dumazet tcp_skb_tsorted_anchor_cleanup(syn_data); 3910355a901eSEric Dumazet kfree_skb(syn_data); 3911783237e8SYuchung Cheng goto fallback; 3912783237e8SYuchung Cheng } 3913fbf93406SEric Dumazet skb_fill_page_desc(syn_data, 0, pfrag->page, 3914fbf93406SEric Dumazet pfrag->offset, space); 3915fbf93406SEric Dumazet page_ref_inc(pfrag->page); 3916fbf93406SEric Dumazet pfrag->offset += space; 3917fbf93406SEric Dumazet skb_len_add(syn_data, space); 3918f859a448SWillem de Bruijn skb_zcopy_set(syn_data, fo->uarg, NULL); 391907e100f9SEric Dumazet } 3920355a901eSEric Dumazet /* No more data pending in inet_wait_for_connect() */ 3921355a901eSEric Dumazet if (space == fo->size) 3922355a901eSEric Dumazet fo->data = NULL; 3923355a901eSEric Dumazet fo->copied = space; 3924783237e8SYuchung Cheng 3925355a901eSEric Dumazet tcp_connect_queue_skb(sk, syn_data); 39260f87230dSFrancis Yan if (syn_data->len) 39270f87230dSFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_BUSY); 3928355a901eSEric Dumazet 3929355a901eSEric Dumazet err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); 3930355a901eSEric Dumazet 3931a1ac9c8aSMartin KaFai Lau skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); 3932355a901eSEric Dumazet 3933355a901eSEric Dumazet /* Now full SYN+DATA was cloned and sent (or not), 3934355a901eSEric Dumazet * remove the SYN from the original skb (syn_data) 3935355a901eSEric Dumazet * we keep in write queue in case of a retransmit, as we 3936355a901eSEric Dumazet * also have the SYN packet (with no data) in the same queue. 3937431a9124SEric Dumazet */ 3938355a901eSEric Dumazet TCP_SKB_CB(syn_data)->seq++; 3939355a901eSEric Dumazet TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; 3940355a901eSEric Dumazet if (!err) { 394167da22d2SYuchung Cheng tp->syn_data = (fo->copied > 0); 394275c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); 3943f19c29e3SYuchung Cheng NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); 3944783237e8SYuchung Cheng goto done; 3945783237e8SYuchung Cheng } 3946783237e8SYuchung Cheng 394775c119afSEric Dumazet /* data was not sent, put it in write_queue */ 394875c119afSEric Dumazet __skb_queue_tail(&sk->sk_write_queue, syn_data); 3949b5b7db8dSEric Dumazet tp->packets_out -= tcp_skb_pcount(syn_data); 3950b5b7db8dSEric Dumazet 3951783237e8SYuchung Cheng fallback: 3952783237e8SYuchung Cheng /* Send a regular SYN with Fast Open cookie request option */ 3953783237e8SYuchung Cheng if (fo->cookie.len > 0) 3954783237e8SYuchung Cheng fo->cookie.len = 0; 3955783237e8SYuchung Cheng err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 3956783237e8SYuchung Cheng if (err) 3957783237e8SYuchung Cheng tp->syn_fastopen = 0; 3958783237e8SYuchung Cheng done: 3959783237e8SYuchung Cheng fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 3960783237e8SYuchung Cheng return err; 3961783237e8SYuchung Cheng } 3962783237e8SYuchung Cheng 396367edfef7SAndi Kleen /* Build a SYN and send it off. */ 39641da177e4SLinus Torvalds int tcp_connect(struct sock *sk) 39651da177e4SLinus Torvalds { 39661da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 39671da177e4SLinus Torvalds struct sk_buff *buff; 3968ee586811SEric Paris int err; 39691da177e4SLinus Torvalds 3970de525be2SLawrence Brakmo tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL); 39718ba60924SEric Dumazet 39720aadc739SDmitry Safonov #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO) 39730aadc739SDmitry Safonov /* Has to be checked late, after setting daddr/saddr/ops. 39740aadc739SDmitry Safonov * Return error if the peer has both a md5 and a tcp-ao key 39750aadc739SDmitry Safonov * configured as this is ambiguous. 39760aadc739SDmitry Safonov */ 39770aadc739SDmitry Safonov if (unlikely(rcu_dereference_protected(tp->md5sig_info, 39780aadc739SDmitry Safonov lockdep_sock_is_held(sk)))) { 39790aadc739SDmitry Safonov bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); 39800aadc739SDmitry Safonov bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); 39810aadc739SDmitry Safonov struct tcp_ao_info *ao_info; 39820aadc739SDmitry Safonov 39830aadc739SDmitry Safonov ao_info = rcu_dereference_check(tp->ao_info, 39840aadc739SDmitry Safonov lockdep_sock_is_held(sk)); 39850aadc739SDmitry Safonov if (ao_info) { 39860aadc739SDmitry Safonov /* This is an extra check: tcp_ao_required() in 39870aadc739SDmitry Safonov * tcp_v{4,6}_parse_md5_keys() should prevent adding 39880aadc739SDmitry Safonov * md5 keys on ao_required socket. 39890aadc739SDmitry Safonov */ 39900aadc739SDmitry Safonov needs_ao |= ao_info->ao_required; 39910aadc739SDmitry Safonov WARN_ON_ONCE(ao_info->ao_required && needs_md5); 39920aadc739SDmitry Safonov } 39930aadc739SDmitry Safonov if (needs_md5 && needs_ao) 39940aadc739SDmitry Safonov return -EKEYREJECTED; 39950aadc739SDmitry Safonov 39960aadc739SDmitry Safonov /* If we have a matching md5 key and no matching tcp-ao key 39970aadc739SDmitry Safonov * then free up ao_info if allocated. 39980aadc739SDmitry Safonov */ 39990aadc739SDmitry Safonov if (needs_md5) { 4000*decde258SDmitry Safonov tcp_ao_destroy_sock(sk, false); 40010aadc739SDmitry Safonov } else if (needs_ao) { 40020aadc739SDmitry Safonov tcp_clear_md5_list(sk); 40030aadc739SDmitry Safonov kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 40040aadc739SDmitry Safonov lockdep_sock_is_held(sk))); 40050aadc739SDmitry Safonov } 40060aadc739SDmitry Safonov } 40070aadc739SDmitry Safonov #endif 40080aadc739SDmitry Safonov #ifdef CONFIG_TCP_AO 40090aadc739SDmitry Safonov if (unlikely(rcu_dereference_protected(tp->ao_info, 40100aadc739SDmitry Safonov lockdep_sock_is_held(sk)))) { 40110aadc739SDmitry Safonov /* Don't allow connecting if ao is configured but no 40120aadc739SDmitry Safonov * matching key is found. 40130aadc739SDmitry Safonov */ 40140aadc739SDmitry Safonov if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) 40150aadc739SDmitry Safonov return -EKEYREJECTED; 40160aadc739SDmitry Safonov } 40170aadc739SDmitry Safonov #endif 40180aadc739SDmitry Safonov 40198ba60924SEric Dumazet if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 40208ba60924SEric Dumazet return -EHOSTUNREACH; /* Routing failure or similar. */ 40218ba60924SEric Dumazet 40221da177e4SLinus Torvalds tcp_connect_init(sk); 40231da177e4SLinus Torvalds 40242b916477SAndrey Vagin if (unlikely(tp->repair)) { 40252b916477SAndrey Vagin tcp_finish_connect(sk, NULL); 40262b916477SAndrey Vagin return 0; 40272b916477SAndrey Vagin } 40282b916477SAndrey Vagin 40295882efffSEric Dumazet buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); 4030355a901eSEric Dumazet if (unlikely(!buff)) 40311da177e4SLinus Torvalds return -ENOBUFS; 40321da177e4SLinus Torvalds 4033a3433f35SChangli Gao tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 40349a568de4SEric Dumazet tcp_mstamp_refresh(tp); 40359d0c00f5SEric Dumazet tp->retrans_stamp = tcp_time_stamp_ts(tp); 4036783237e8SYuchung Cheng tcp_connect_queue_skb(sk, buff); 4037735d3831SFlorian Westphal tcp_ecn_send_syn(sk, buff); 403875c119afSEric Dumazet tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); 40391da177e4SLinus Torvalds 4040783237e8SYuchung Cheng /* Send off SYN; include data in Fast Open. */ 4041783237e8SYuchung Cheng err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 4042783237e8SYuchung Cheng tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 4043ee586811SEric Paris if (err == -ECONNREFUSED) 4044ee586811SEric Paris return err; 4045bd37a088SWei Yongjun 4046bd37a088SWei Yongjun /* We change tp->snd_nxt after the tcp_transmit_skb() call 4047bd37a088SWei Yongjun * in order to make this packet get counted in tcpOutSegs. 4048bd37a088SWei Yongjun */ 4049e0d694d6SEric Dumazet WRITE_ONCE(tp->snd_nxt, tp->write_seq); 4050bd37a088SWei Yongjun tp->pushed_seq = tp->write_seq; 4051b5b7db8dSEric Dumazet buff = tcp_send_head(sk); 4052b5b7db8dSEric Dumazet if (unlikely(buff)) { 4053e0d694d6SEric Dumazet WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); 4054b5b7db8dSEric Dumazet tp->pushed_seq = TCP_SKB_CB(buff)->seq; 4055b5b7db8dSEric Dumazet } 405681cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 40571da177e4SLinus Torvalds 40581da177e4SLinus Torvalds /* Timer for repeating the SYN until an answer. */ 40593f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 40603f421baaSArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 40611da177e4SLinus Torvalds return 0; 40621da177e4SLinus Torvalds } 40634bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_connect); 40641da177e4SLinus Torvalds 4065bbf80d71SEric Dumazet u32 tcp_delack_max(const struct sock *sk) 4066bbf80d71SEric Dumazet { 4067bbf80d71SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 4068bbf80d71SEric Dumazet u32 delack_max = inet_csk(sk)->icsk_delack_max; 4069bbf80d71SEric Dumazet 4070bbf80d71SEric Dumazet if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) { 4071bbf80d71SEric Dumazet u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 4072bbf80d71SEric Dumazet u32 delack_from_rto_min = max_t(int, 1, rto_min - 1); 4073bbf80d71SEric Dumazet 4074bbf80d71SEric Dumazet delack_max = min_t(u32, delack_max, delack_from_rto_min); 4075bbf80d71SEric Dumazet } 4076bbf80d71SEric Dumazet return delack_max; 4077bbf80d71SEric Dumazet } 4078bbf80d71SEric Dumazet 40791da177e4SLinus Torvalds /* Send out a delayed ack, the caller does the policy checking 40801da177e4SLinus Torvalds * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 40811da177e4SLinus Torvalds * for details. 40821da177e4SLinus Torvalds */ 40831da177e4SLinus Torvalds void tcp_send_delayed_ack(struct sock *sk) 40841da177e4SLinus Torvalds { 4085463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 4086463c84b9SArnaldo Carvalho de Melo int ato = icsk->icsk_ack.ato; 40871da177e4SLinus Torvalds unsigned long timeout; 40881da177e4SLinus Torvalds 40891da177e4SLinus Torvalds if (ato > TCP_DELACK_MIN) { 4090463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 40911da177e4SLinus Torvalds int max_ato = HZ / 2; 40921da177e4SLinus Torvalds 409331954cd8SWei Wang if (inet_csk_in_pingpong_mode(sk) || 4094056834d9SIlpo Järvinen (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 40951da177e4SLinus Torvalds max_ato = TCP_DELACK_MAX; 40961da177e4SLinus Torvalds 40971da177e4SLinus Torvalds /* Slow path, intersegment interval is "high". */ 40981da177e4SLinus Torvalds 40991da177e4SLinus Torvalds /* If some rtt estimate is known, use it to bound delayed ack. 4100463c84b9SArnaldo Carvalho de Melo * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 41011da177e4SLinus Torvalds * directly. 41021da177e4SLinus Torvalds */ 4103740b0f18SEric Dumazet if (tp->srtt_us) { 4104740b0f18SEric Dumazet int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), 4105740b0f18SEric Dumazet TCP_DELACK_MIN); 41061da177e4SLinus Torvalds 41071da177e4SLinus Torvalds if (rtt < max_ato) 41081da177e4SLinus Torvalds max_ato = rtt; 41091da177e4SLinus Torvalds } 41101da177e4SLinus Torvalds 41111da177e4SLinus Torvalds ato = min(ato, max_ato); 41121da177e4SLinus Torvalds } 41131da177e4SLinus Torvalds 4114bbf80d71SEric Dumazet ato = min_t(u32, ato, tcp_delack_max(sk)); 41152b8ee4f0SMartin KaFai Lau 41161da177e4SLinus Torvalds /* Stay within the limit we were given */ 41171da177e4SLinus Torvalds timeout = jiffies + ato; 41181da177e4SLinus Torvalds 41191da177e4SLinus Torvalds /* Use new timeout only if there wasn't a older one earlier. */ 4120463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 4121b6b6d653SEric Dumazet /* If delack timer is about to expire, send ACK now. */ 4122b6b6d653SEric Dumazet if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 41231da177e4SLinus Torvalds tcp_send_ack(sk); 41241da177e4SLinus Torvalds return; 41251da177e4SLinus Torvalds } 41261da177e4SLinus Torvalds 4127463c84b9SArnaldo Carvalho de Melo if (!time_before(timeout, icsk->icsk_ack.timeout)) 4128463c84b9SArnaldo Carvalho de Melo timeout = icsk->icsk_ack.timeout; 41291da177e4SLinus Torvalds } 4130463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 4131463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.timeout = timeout; 4132463c84b9SArnaldo Carvalho de Melo sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 41331da177e4SLinus Torvalds } 41341da177e4SLinus Torvalds 41351da177e4SLinus Torvalds /* This routine sends an ack and also updates the window. */ 41362987babbSYuchung Cheng void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) 41371da177e4SLinus Torvalds { 41381da177e4SLinus Torvalds struct sk_buff *buff; 41391da177e4SLinus Torvalds 4140058dc334SIlpo Järvinen /* If we have been reset, we may not send again. */ 4141058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 4142058dc334SIlpo Järvinen return; 4143058dc334SIlpo Järvinen 41441da177e4SLinus Torvalds /* We are not putting this on the write queue, so 41451da177e4SLinus Torvalds * tcp_transmit_skb() will set the ownership to this 41461da177e4SLinus Torvalds * sock. 41471da177e4SLinus Torvalds */ 41487450aaf6SEric Dumazet buff = alloc_skb(MAX_TCP_HEADER, 41497450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 41507450aaf6SEric Dumazet if (unlikely(!buff)) { 4151a37c2134SEric Dumazet struct inet_connection_sock *icsk = inet_csk(sk); 4152a37c2134SEric Dumazet unsigned long delay; 4153a37c2134SEric Dumazet 4154a37c2134SEric Dumazet delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; 4155a37c2134SEric Dumazet if (delay < TCP_RTO_MAX) 4156a37c2134SEric Dumazet icsk->icsk_ack.retry++; 4157463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 4158a37c2134SEric Dumazet icsk->icsk_ack.ato = TCP_ATO_MIN; 4159a37c2134SEric Dumazet inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX); 41601da177e4SLinus Torvalds return; 41611da177e4SLinus Torvalds } 41621da177e4SLinus Torvalds 41631da177e4SLinus Torvalds /* Reserve space for headers and prepare control bits. */ 41641da177e4SLinus Torvalds skb_reserve(buff, MAX_TCP_HEADER); 4165a3433f35SChangli Gao tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 41661da177e4SLinus Torvalds 416798781965SEric Dumazet /* We do not want pure acks influencing TCP Small Queues or fq/pacing 416898781965SEric Dumazet * too much. 416998781965SEric Dumazet * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 417098781965SEric Dumazet */ 417198781965SEric Dumazet skb_set_tcp_pure_ack(buff); 417298781965SEric Dumazet 41731da177e4SLinus Torvalds /* Send it off, this clears delayed acks for us. */ 41742987babbSYuchung Cheng __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); 41751da177e4SLinus Torvalds } 417627cde44aSYuchung Cheng EXPORT_SYMBOL_GPL(__tcp_send_ack); 41772987babbSYuchung Cheng 41782987babbSYuchung Cheng void tcp_send_ack(struct sock *sk) 41792987babbSYuchung Cheng { 41802987babbSYuchung Cheng __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); 41811da177e4SLinus Torvalds } 41821da177e4SLinus Torvalds 41831da177e4SLinus Torvalds /* This routine sends a packet with an out of date sequence 41841da177e4SLinus Torvalds * number. It assumes the other end will try to ack it. 41851da177e4SLinus Torvalds * 41861da177e4SLinus Torvalds * Question: what should we make while urgent mode? 41871da177e4SLinus Torvalds * 4.4BSD forces sending single byte of data. We cannot send 41881da177e4SLinus Torvalds * out of window data, because we have SND.NXT==SND.MAX... 41891da177e4SLinus Torvalds * 41901da177e4SLinus Torvalds * Current solution: to send TWO zero-length segments in urgent mode: 41911da177e4SLinus Torvalds * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 41921da177e4SLinus Torvalds * out-of-date with SND.UNA-1 to probe window. 41931da177e4SLinus Torvalds */ 4194e520af48SEric Dumazet static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) 41951da177e4SLinus Torvalds { 41961da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 41971da177e4SLinus Torvalds struct sk_buff *skb; 41981da177e4SLinus Torvalds 41991da177e4SLinus Torvalds /* We don't queue it, tcp_transmit_skb() sets ownership. */ 42007450aaf6SEric Dumazet skb = alloc_skb(MAX_TCP_HEADER, 42017450aaf6SEric Dumazet sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 420251456b29SIan Morris if (!skb) 42031da177e4SLinus Torvalds return -1; 42041da177e4SLinus Torvalds 42051da177e4SLinus Torvalds /* Reserve space for headers and set control bits. */ 42061da177e4SLinus Torvalds skb_reserve(skb, MAX_TCP_HEADER); 42071da177e4SLinus Torvalds /* Use a previous sequence. This should cause the other 42081da177e4SLinus Torvalds * end to send an ack. Don't queue or clone SKB, just 42091da177e4SLinus Torvalds * send it. 42101da177e4SLinus Torvalds */ 4211a3433f35SChangli Gao tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 4212e2e8009fSRenato Westphal NET_INC_STATS(sock_net(sk), mib); 42137450aaf6SEric Dumazet return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); 42141da177e4SLinus Torvalds } 42151da177e4SLinus Torvalds 4216385e2070SEric Dumazet /* Called from setsockopt( ... TCP_REPAIR ) */ 4217ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk) 4218ee995283SPavel Emelyanov { 4219ee995283SPavel Emelyanov if (sk->sk_state == TCP_ESTABLISHED) { 4220ee995283SPavel Emelyanov tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 42219a568de4SEric Dumazet tcp_mstamp_refresh(tcp_sk(sk)); 4222e520af48SEric Dumazet tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); 4223ee995283SPavel Emelyanov } 4224ee995283SPavel Emelyanov } 4225ee995283SPavel Emelyanov 422667edfef7SAndi Kleen /* Initiate keepalive or window probe from timer. */ 4227e520af48SEric Dumazet int tcp_write_wakeup(struct sock *sk, int mib) 42281da177e4SLinus Torvalds { 42291da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 42301da177e4SLinus Torvalds struct sk_buff *skb; 42311da177e4SLinus Torvalds 4232058dc334SIlpo Järvinen if (sk->sk_state == TCP_CLOSE) 4233058dc334SIlpo Järvinen return -1; 4234058dc334SIlpo Järvinen 423500db4124SIan Morris skb = tcp_send_head(sk); 423600db4124SIan Morris if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 42371da177e4SLinus Torvalds int err; 42380c54b85fSIlpo Järvinen unsigned int mss = tcp_current_mss(sk); 423990840defSIlpo Järvinen unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 42401da177e4SLinus Torvalds 42411da177e4SLinus Torvalds if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 42421da177e4SLinus Torvalds tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 42431da177e4SLinus Torvalds 42441da177e4SLinus Torvalds /* We are probing the opening of a window 42451da177e4SLinus Torvalds * but the window size is != 0 42461da177e4SLinus Torvalds * must have been a result SWS avoidance ( sender ) 42471da177e4SLinus Torvalds */ 42481da177e4SLinus Torvalds if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 42491da177e4SLinus Torvalds skb->len > mss) { 42501da177e4SLinus Torvalds seg_size = min(seg_size, mss); 42514de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 425275c119afSEric Dumazet if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, 425375c119afSEric Dumazet skb, seg_size, mss, GFP_ATOMIC)) 42541da177e4SLinus Torvalds return -1; 42551da177e4SLinus Torvalds } else if (!tcp_skb_pcount(skb)) 42565bbb432cSEric Dumazet tcp_set_skb_tso_segs(skb, mss); 42571da177e4SLinus Torvalds 42584de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 4259dfb4b9dcSDavid S. Miller err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 426066f5fe62SIlpo Järvinen if (!err) 426166f5fe62SIlpo Järvinen tcp_event_new_data_sent(sk, skb); 42621da177e4SLinus Torvalds return err; 42631da177e4SLinus Torvalds } else { 426433f5f57eSIlpo Järvinen if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 4265e520af48SEric Dumazet tcp_xmit_probe_skb(sk, 1, mib); 4266e520af48SEric Dumazet return tcp_xmit_probe_skb(sk, 0, mib); 42671da177e4SLinus Torvalds } 42681da177e4SLinus Torvalds } 42691da177e4SLinus Torvalds 42701da177e4SLinus Torvalds /* A window probe timeout has occurred. If window is not closed send 42711da177e4SLinus Torvalds * a partial packet else a zero probe. 42721da177e4SLinus Torvalds */ 42731da177e4SLinus Torvalds void tcp_send_probe0(struct sock *sk) 42741da177e4SLinus Torvalds { 4275463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 42761da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 4277c6214a97SNikolay Borisov struct net *net = sock_net(sk); 4278c1d5674fSYuchung Cheng unsigned long timeout; 42791da177e4SLinus Torvalds int err; 42801da177e4SLinus Torvalds 4281e520af48SEric Dumazet err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); 42821da177e4SLinus Torvalds 428375c119afSEric Dumazet if (tp->packets_out || tcp_write_queue_empty(sk)) { 42841da177e4SLinus Torvalds /* Cancel probe timer, if it is not required. */ 42856687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 4286463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 42879d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0; 42881da177e4SLinus Torvalds return; 42891da177e4SLinus Torvalds } 42901da177e4SLinus Torvalds 4291c1d5674fSYuchung Cheng icsk->icsk_probes_out++; 42921da177e4SLinus Torvalds if (err <= 0) { 429339e24435SKuniyuki Iwashima if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) 4294463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff++; 4295c1d5674fSYuchung Cheng timeout = tcp_probe0_when(sk, TCP_RTO_MAX); 42961da177e4SLinus Torvalds } else { 42971da177e4SLinus Torvalds /* If packet was not sent due to local congestion, 4298c1d5674fSYuchung Cheng * Let senders fight for local resources conservatively. 42991da177e4SLinus Torvalds */ 4300c1d5674fSYuchung Cheng timeout = TCP_RESOURCE_PROBE_INTERVAL; 43011da177e4SLinus Torvalds } 4302344db93aSEnke Chen 4303344db93aSEnke Chen timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); 43048dc242adSEric Dumazet tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX); 43051da177e4SLinus Torvalds } 43065db92c99SOctavian Purdila 4307ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) 43085db92c99SOctavian Purdila { 43095db92c99SOctavian Purdila const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; 43105db92c99SOctavian Purdila struct flowi fl; 43115db92c99SOctavian Purdila int res; 43125db92c99SOctavian Purdila 4313cb6cd2ceSAkhmat Karakotov /* Paired with WRITE_ONCE() in sock_setsockopt() */ 4314cb6cd2ceSAkhmat Karakotov if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) 43155e526552SEric Dumazet WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); 4316331fca43SMartin KaFai Lau res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, 4317331fca43SMartin KaFai Lau NULL); 43185db92c99SOctavian Purdila if (!res) { 43190a375c82SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 43200a375c82SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 4321e9d9da91SEric Dumazet if (unlikely(tcp_passive_fastopen(sk))) { 4322e9d9da91SEric Dumazet /* sk has const attribute because listeners are lockless. 4323e9d9da91SEric Dumazet * However in this case, we are dealing with a passive fastopen 4324e9d9da91SEric Dumazet * socket thus we can change total_retrans value. 4325e9d9da91SEric Dumazet */ 4326e9d9da91SEric Dumazet tcp_sk_rw(sk)->total_retrans++; 4327e9d9da91SEric Dumazet } 4328cf34ce3dSSong Liu trace_tcp_retransmit_synack(sk, req); 43295db92c99SOctavian Purdila } 43305db92c99SOctavian Purdila return res; 43315db92c99SOctavian Purdila } 43325db92c99SOctavian Purdila EXPORT_SYMBOL(tcp_rtx_synack); 4333