11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $ 91da177e4SLinus Torvalds * 1002c30a84SJesper Juhl * Authors: Ross Biro 111da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 121da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 131da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 141da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 151da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 161da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 171da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 181da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 191da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 201da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Changes: 251da177e4SLinus Torvalds * Pedro Roque : Fast Retransmit/Recovery. 261da177e4SLinus Torvalds * Two receive queues. 271da177e4SLinus Torvalds * Retransmit queue handled by TCP. 281da177e4SLinus Torvalds * Better retransmit timer handling. 291da177e4SLinus Torvalds * New congestion avoidance. 301da177e4SLinus Torvalds * Header prediction. 311da177e4SLinus Torvalds * Variable renaming. 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Eric : Fast Retransmit. 341da177e4SLinus Torvalds * Randy Scott : MSS option defines. 351da177e4SLinus Torvalds * Eric Schenk : Fixes to slow start algorithm. 361da177e4SLinus Torvalds * Eric Schenk : Yet another double ACK bug. 371da177e4SLinus Torvalds * Eric Schenk : Delayed ACK bug fixes. 381da177e4SLinus Torvalds * Eric Schenk : Floyd style fast retrans war avoidance. 391da177e4SLinus Torvalds * David S. Miller : Don't allow zero congestion window. 401da177e4SLinus Torvalds * Eric Schenk : Fix retransmitter so that it sends 411da177e4SLinus Torvalds * next packet on ack of previous packet. 421da177e4SLinus Torvalds * Andi Kleen : Moved open_request checking here 431da177e4SLinus Torvalds * and process RSTs for open_requests. 441da177e4SLinus Torvalds * Andi Kleen : Better prune_queue, and other fixes. 45caa20d9aSStephen Hemminger * Andrey Savochkin: Fix RTT measurements in the presence of 461da177e4SLinus Torvalds * timestamps. 471da177e4SLinus Torvalds * Andrey Savochkin: Check sequence numbers correctly when 481da177e4SLinus Torvalds * removing SACKs due to in sequence incoming 491da177e4SLinus Torvalds * data segments. 501da177e4SLinus Torvalds * Andi Kleen: Make sure we never ack data there is not 511da177e4SLinus Torvalds * enough room for. Also make this condition 521da177e4SLinus Torvalds * a fatal error if it might still happen. 531da177e4SLinus Torvalds * Andi Kleen: Add tcp_measure_rcv_mss to make 541da177e4SLinus Torvalds * connections with MSS<min(MTU,ann. MSS) 551da177e4SLinus Torvalds * work without delayed acks. 561da177e4SLinus Torvalds * Andi Kleen: Process packets with PSH set in the 571da177e4SLinus Torvalds * fast path. 581da177e4SLinus Torvalds * J Hadi Salim: ECN support 591da177e4SLinus Torvalds * Andrei Gurtov, 601da177e4SLinus Torvalds * Pasi Sarolahti, 611da177e4SLinus Torvalds * Panu Kuhlberg: Experimental audit of TCP (re)transmission 621da177e4SLinus Torvalds * engine. Lots of bugs are found. 631da177e4SLinus Torvalds * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 641da177e4SLinus Torvalds */ 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds #include <linux/mm.h> 671da177e4SLinus Torvalds #include <linux/module.h> 681da177e4SLinus Torvalds #include <linux/sysctl.h> 691da177e4SLinus Torvalds #include <net/tcp.h> 701da177e4SLinus Torvalds #include <net/inet_common.h> 711da177e4SLinus Torvalds #include <linux/ipsec.h> 721da177e4SLinus Torvalds #include <asm/unaligned.h> 731a2449a8SChris Leech #include <net/netdma.h> 741da177e4SLinus Torvalds 75ab32ea5dSBrian Haley int sysctl_tcp_timestamps __read_mostly = 1; 76ab32ea5dSBrian Haley int sysctl_tcp_window_scaling __read_mostly = 1; 77ab32ea5dSBrian Haley int sysctl_tcp_sack __read_mostly = 1; 78ab32ea5dSBrian Haley int sysctl_tcp_fack __read_mostly = 1; 79ab32ea5dSBrian Haley int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 80ab32ea5dSBrian Haley int sysctl_tcp_ecn __read_mostly; 81ab32ea5dSBrian Haley int sysctl_tcp_dsack __read_mostly = 1; 82ab32ea5dSBrian Haley int sysctl_tcp_app_win __read_mostly = 31; 83ab32ea5dSBrian Haley int sysctl_tcp_adv_win_scale __read_mostly = 2; 841da177e4SLinus Torvalds 85ab32ea5dSBrian Haley int sysctl_tcp_stdurg __read_mostly; 86ab32ea5dSBrian Haley int sysctl_tcp_rfc1337 __read_mostly; 87ab32ea5dSBrian Haley int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 88c96fd3d4SIlpo Järvinen int sysctl_tcp_frto __read_mostly = 2; 893cfe3baaSIlpo Järvinen int sysctl_tcp_frto_response __read_mostly; 90ab32ea5dSBrian Haley int sysctl_tcp_nometrics_save __read_mostly; 911da177e4SLinus Torvalds 92ab32ea5dSBrian Haley int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 93ab32ea5dSBrian Haley int sysctl_tcp_abc __read_mostly; 941da177e4SLinus Torvalds 951da177e4SLinus Torvalds #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 961da177e4SLinus Torvalds #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 971da177e4SLinus Torvalds #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 981da177e4SLinus Torvalds #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 991da177e4SLinus Torvalds #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 1001da177e4SLinus Torvalds #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 1011da177e4SLinus Torvalds #define FLAG_ECE 0x40 /* ECE in this ACK */ 1021da177e4SLinus Torvalds #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ 1031da177e4SLinus Torvalds #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 1044dc2665eSIlpo Järvinen #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 1052e605294SIlpo Järvinen #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 106564262c1SRyousei Takano #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 107009a2e3eSIlpo Järvinen #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 108cadbd031SIlpo Järvinen #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 1091da177e4SLinus Torvalds 1101da177e4SLinus Torvalds #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 1111da177e4SLinus Torvalds #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 1121da177e4SLinus Torvalds #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 1131da177e4SLinus Torvalds #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 1142e605294SIlpo Järvinen #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) 1151da177e4SLinus Torvalds 1164dc2665eSIlpo Järvinen #define IsSackFrto() (sysctl_tcp_frto == 0x2) 1174dc2665eSIlpo Järvinen 1181da177e4SLinus Torvalds #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 119bdf1ee5dSIlpo Järvinen #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 1201da177e4SLinus Torvalds 1211da177e4SLinus Torvalds /* Adapt the MSS value used to make delayed ack decision to the 1221da177e4SLinus Torvalds * real world. 1231da177e4SLinus Torvalds */ 124056834d9SIlpo Järvinen static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 1251da177e4SLinus Torvalds { 126463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 127463c84b9SArnaldo Carvalho de Melo const unsigned int lss = icsk->icsk_ack.last_seg_size; 128463c84b9SArnaldo Carvalho de Melo unsigned int len; 1291da177e4SLinus Torvalds 130463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.last_seg_size = 0; 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds /* skb->len may jitter because of SACKs, even if peer 1331da177e4SLinus Torvalds * sends good full-sized frames. 1341da177e4SLinus Torvalds */ 135ff9b5e0fSHerbert Xu len = skb_shinfo(skb)->gso_size ? : skb->len; 136463c84b9SArnaldo Carvalho de Melo if (len >= icsk->icsk_ack.rcv_mss) { 137463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.rcv_mss = len; 1381da177e4SLinus Torvalds } else { 1391da177e4SLinus Torvalds /* Otherwise, we make more careful check taking into account, 1401da177e4SLinus Torvalds * that SACKs block is variable. 1411da177e4SLinus Torvalds * 1421da177e4SLinus Torvalds * "len" is invariant segment length, including TCP header. 1431da177e4SLinus Torvalds */ 1449c70220bSArnaldo Carvalho de Melo len += skb->data - skb_transport_header(skb); 1451da177e4SLinus Torvalds if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || 1461da177e4SLinus Torvalds /* If PSH is not set, packet should be 1471da177e4SLinus Torvalds * full sized, provided peer TCP is not badly broken. 1481da177e4SLinus Torvalds * This observation (if it is correct 8)) allows 1491da177e4SLinus Torvalds * to handle super-low mtu links fairly. 1501da177e4SLinus Torvalds */ 1511da177e4SLinus Torvalds (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 152aa8223c7SArnaldo Carvalho de Melo !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 1531da177e4SLinus Torvalds /* Subtract also invariant (if peer is RFC compliant), 1541da177e4SLinus Torvalds * tcp header plus fixed timestamp option length. 1551da177e4SLinus Torvalds * Resulting "len" is MSS free of SACK jitter. 1561da177e4SLinus Torvalds */ 157463c84b9SArnaldo Carvalho de Melo len -= tcp_sk(sk)->tcp_header_len; 158463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.last_seg_size = len; 1591da177e4SLinus Torvalds if (len == lss) { 160463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.rcv_mss = len; 1611da177e4SLinus Torvalds return; 1621da177e4SLinus Torvalds } 1631da177e4SLinus Torvalds } 1641ef9696cSAlexey Kuznetsov if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 1651ef9696cSAlexey Kuznetsov icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 166463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 1671da177e4SLinus Torvalds } 1681da177e4SLinus Torvalds } 1691da177e4SLinus Torvalds 170463c84b9SArnaldo Carvalho de Melo static void tcp_incr_quickack(struct sock *sk) 1711da177e4SLinus Torvalds { 172463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 173463c84b9SArnaldo Carvalho de Melo unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 1741da177e4SLinus Torvalds 1751da177e4SLinus Torvalds if (quickacks == 0) 1761da177e4SLinus Torvalds quickacks = 2; 177463c84b9SArnaldo Carvalho de Melo if (quickacks > icsk->icsk_ack.quick) 178463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 1791da177e4SLinus Torvalds } 1801da177e4SLinus Torvalds 181463c84b9SArnaldo Carvalho de Melo void tcp_enter_quickack_mode(struct sock *sk) 1821da177e4SLinus Torvalds { 183463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 184463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 185463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 0; 186463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 1871da177e4SLinus Torvalds } 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds /* Send ACKs quickly, if "quick" count is not exhausted 1901da177e4SLinus Torvalds * and the session is not interactive. 1911da177e4SLinus Torvalds */ 1921da177e4SLinus Torvalds 193463c84b9SArnaldo Carvalho de Melo static inline int tcp_in_quickack_mode(const struct sock *sk) 1941da177e4SLinus Torvalds { 195463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 196463c84b9SArnaldo Carvalho de Melo return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 1971da177e4SLinus Torvalds } 1981da177e4SLinus Torvalds 199bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) 200bdf1ee5dSIlpo Järvinen { 201bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) 202bdf1ee5dSIlpo Järvinen tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 203bdf1ee5dSIlpo Järvinen } 204bdf1ee5dSIlpo Järvinen 205bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) 206bdf1ee5dSIlpo Järvinen { 207bdf1ee5dSIlpo Järvinen if (tcp_hdr(skb)->cwr) 208bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 209bdf1ee5dSIlpo Järvinen } 210bdf1ee5dSIlpo Järvinen 211bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) 212bdf1ee5dSIlpo Järvinen { 213bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 214bdf1ee5dSIlpo Järvinen } 215bdf1ee5dSIlpo Järvinen 216bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) 217bdf1ee5dSIlpo Järvinen { 218bdf1ee5dSIlpo Järvinen if (tp->ecn_flags & TCP_ECN_OK) { 219bdf1ee5dSIlpo Järvinen if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) 220bdf1ee5dSIlpo Järvinen tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 221bdf1ee5dSIlpo Järvinen /* Funny extension: if ECT is not set on a segment, 222bdf1ee5dSIlpo Järvinen * it is surely retransmit. It is not in ECN RFC, 223bdf1ee5dSIlpo Järvinen * but Linux follows this rule. */ 224bdf1ee5dSIlpo Järvinen else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 225bdf1ee5dSIlpo Järvinen tcp_enter_quickack_mode((struct sock *)tp); 226bdf1ee5dSIlpo Järvinen } 227bdf1ee5dSIlpo Järvinen } 228bdf1ee5dSIlpo Järvinen 229bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) 230bdf1ee5dSIlpo Järvinen { 231bdf1ee5dSIlpo Järvinen if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 232bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_OK; 233bdf1ee5dSIlpo Järvinen } 234bdf1ee5dSIlpo Järvinen 235bdf1ee5dSIlpo Järvinen static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) 236bdf1ee5dSIlpo Järvinen { 237bdf1ee5dSIlpo Järvinen if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 238bdf1ee5dSIlpo Järvinen tp->ecn_flags &= ~TCP_ECN_OK; 239bdf1ee5dSIlpo Järvinen } 240bdf1ee5dSIlpo Järvinen 241bdf1ee5dSIlpo Järvinen static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) 242bdf1ee5dSIlpo Järvinen { 243bdf1ee5dSIlpo Järvinen if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 244bdf1ee5dSIlpo Järvinen return 1; 245bdf1ee5dSIlpo Järvinen return 0; 246bdf1ee5dSIlpo Järvinen } 247bdf1ee5dSIlpo Järvinen 2481da177e4SLinus Torvalds /* Buffer size and advertised window tuning. 2491da177e4SLinus Torvalds * 2501da177e4SLinus Torvalds * 1. Tuning sk->sk_sndbuf, when connection enters established state. 2511da177e4SLinus Torvalds */ 2521da177e4SLinus Torvalds 2531da177e4SLinus Torvalds static void tcp_fixup_sndbuf(struct sock *sk) 2541da177e4SLinus Torvalds { 2551da177e4SLinus Torvalds int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 2561da177e4SLinus Torvalds sizeof(struct sk_buff); 2571da177e4SLinus Torvalds 2581da177e4SLinus Torvalds if (sk->sk_sndbuf < 3 * sndmem) 2591da177e4SLinus Torvalds sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 2601da177e4SLinus Torvalds } 2611da177e4SLinus Torvalds 2621da177e4SLinus Torvalds /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 2631da177e4SLinus Torvalds * 2641da177e4SLinus Torvalds * All tcp_full_space() is split to two parts: "network" buffer, allocated 2651da177e4SLinus Torvalds * forward and advertised in receiver window (tp->rcv_wnd) and 2661da177e4SLinus Torvalds * "application buffer", required to isolate scheduling/application 2671da177e4SLinus Torvalds * latencies from network. 2681da177e4SLinus Torvalds * window_clamp is maximal advertised window. It can be less than 2691da177e4SLinus Torvalds * tcp_full_space(), in this case tcp_full_space() - window_clamp 2701da177e4SLinus Torvalds * is reserved for "application" buffer. The less window_clamp is 2711da177e4SLinus Torvalds * the smoother our behaviour from viewpoint of network, but the lower 2721da177e4SLinus Torvalds * throughput and the higher sensitivity of the connection to losses. 8) 2731da177e4SLinus Torvalds * 2741da177e4SLinus Torvalds * rcv_ssthresh is more strict window_clamp used at "slow start" 2751da177e4SLinus Torvalds * phase to predict further behaviour of this connection. 2761da177e4SLinus Torvalds * It is used for two goals: 2771da177e4SLinus Torvalds * - to enforce header prediction at sender, even when application 2781da177e4SLinus Torvalds * requires some significant "application buffer". It is check #1. 2791da177e4SLinus Torvalds * - to prevent pruning of receive queue because of misprediction 2801da177e4SLinus Torvalds * of receiver window. Check #2. 2811da177e4SLinus Torvalds * 2821da177e4SLinus Torvalds * The scheme does not work when sender sends good segments opening 283caa20d9aSStephen Hemminger * window and then starts to feed us spaghetti. But it should work 2841da177e4SLinus Torvalds * in common situations. Otherwise, we have to rely on queue collapsing. 2851da177e4SLinus Torvalds */ 2861da177e4SLinus Torvalds 2871da177e4SLinus Torvalds /* Slow part of check#2. */ 2889e412ba7SIlpo Järvinen static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 2891da177e4SLinus Torvalds { 2909e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 2911da177e4SLinus Torvalds /* Optimize this! */ 292dfd4f0aeSEric Dumazet int truesize = tcp_win_from_space(skb->truesize) >> 1; 293dfd4f0aeSEric Dumazet int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; 2941da177e4SLinus Torvalds 2951da177e4SLinus Torvalds while (tp->rcv_ssthresh <= window) { 2961da177e4SLinus Torvalds if (truesize <= skb->len) 297463c84b9SArnaldo Carvalho de Melo return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 2981da177e4SLinus Torvalds 2991da177e4SLinus Torvalds truesize >>= 1; 3001da177e4SLinus Torvalds window >>= 1; 3011da177e4SLinus Torvalds } 3021da177e4SLinus Torvalds return 0; 3031da177e4SLinus Torvalds } 3041da177e4SLinus Torvalds 305056834d9SIlpo Järvinen static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) 3061da177e4SLinus Torvalds { 3079e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 3089e412ba7SIlpo Järvinen 3091da177e4SLinus Torvalds /* Check #1 */ 3101da177e4SLinus Torvalds if (tp->rcv_ssthresh < tp->window_clamp && 3111da177e4SLinus Torvalds (int)tp->rcv_ssthresh < tcp_space(sk) && 3121da177e4SLinus Torvalds !tcp_memory_pressure) { 3131da177e4SLinus Torvalds int incr; 3141da177e4SLinus Torvalds 3151da177e4SLinus Torvalds /* Check #2. Increase window, if skb with such overhead 3161da177e4SLinus Torvalds * will fit to rcvbuf in future. 3171da177e4SLinus Torvalds */ 3181da177e4SLinus Torvalds if (tcp_win_from_space(skb->truesize) <= skb->len) 3191da177e4SLinus Torvalds incr = 2 * tp->advmss; 3201da177e4SLinus Torvalds else 3219e412ba7SIlpo Järvinen incr = __tcp_grow_window(sk, skb); 3221da177e4SLinus Torvalds 3231da177e4SLinus Torvalds if (incr) { 324056834d9SIlpo Järvinen tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 325056834d9SIlpo Järvinen tp->window_clamp); 326463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.quick |= 1; 3271da177e4SLinus Torvalds } 3281da177e4SLinus Torvalds } 3291da177e4SLinus Torvalds } 3301da177e4SLinus Torvalds 3311da177e4SLinus Torvalds /* 3. Tuning rcvbuf, when connection enters established state. */ 3321da177e4SLinus Torvalds 3331da177e4SLinus Torvalds static void tcp_fixup_rcvbuf(struct sock *sk) 3341da177e4SLinus Torvalds { 3351da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3361da177e4SLinus Torvalds int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 3371da177e4SLinus Torvalds 3381da177e4SLinus Torvalds /* Try to select rcvbuf so that 4 mss-sized segments 339caa20d9aSStephen Hemminger * will fit to window and corresponding skbs will fit to our rcvbuf. 3401da177e4SLinus Torvalds * (was 3; 4 is minimum to allow fast retransmit to work.) 3411da177e4SLinus Torvalds */ 3421da177e4SLinus Torvalds while (tcp_win_from_space(rcvmem) < tp->advmss) 3431da177e4SLinus Torvalds rcvmem += 128; 3441da177e4SLinus Torvalds if (sk->sk_rcvbuf < 4 * rcvmem) 3451da177e4SLinus Torvalds sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds 348caa20d9aSStephen Hemminger /* 4. Try to fixup all. It is made immediately after connection enters 3491da177e4SLinus Torvalds * established state. 3501da177e4SLinus Torvalds */ 3511da177e4SLinus Torvalds static void tcp_init_buffer_space(struct sock *sk) 3521da177e4SLinus Torvalds { 3531da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3541da177e4SLinus Torvalds int maxwin; 3551da177e4SLinus Torvalds 3561da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 3571da177e4SLinus Torvalds tcp_fixup_rcvbuf(sk); 3581da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 3591da177e4SLinus Torvalds tcp_fixup_sndbuf(sk); 3601da177e4SLinus Torvalds 3611da177e4SLinus Torvalds tp->rcvq_space.space = tp->rcv_wnd; 3621da177e4SLinus Torvalds 3631da177e4SLinus Torvalds maxwin = tcp_full_space(sk); 3641da177e4SLinus Torvalds 3651da177e4SLinus Torvalds if (tp->window_clamp >= maxwin) { 3661da177e4SLinus Torvalds tp->window_clamp = maxwin; 3671da177e4SLinus Torvalds 3681da177e4SLinus Torvalds if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) 3691da177e4SLinus Torvalds tp->window_clamp = max(maxwin - 3701da177e4SLinus Torvalds (maxwin >> sysctl_tcp_app_win), 3711da177e4SLinus Torvalds 4 * tp->advmss); 3721da177e4SLinus Torvalds } 3731da177e4SLinus Torvalds 3741da177e4SLinus Torvalds /* Force reservation of one segment. */ 3751da177e4SLinus Torvalds if (sysctl_tcp_app_win && 3761da177e4SLinus Torvalds tp->window_clamp > 2 * tp->advmss && 3771da177e4SLinus Torvalds tp->window_clamp + tp->advmss > maxwin) 3781da177e4SLinus Torvalds tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 3791da177e4SLinus Torvalds 3801da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 3811da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 3821da177e4SLinus Torvalds } 3831da177e4SLinus Torvalds 3841da177e4SLinus Torvalds /* 5. Recalculate window clamp after socket hit its memory bounds. */ 3859e412ba7SIlpo Järvinen static void tcp_clamp_window(struct sock *sk) 3861da177e4SLinus Torvalds { 3879e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 3886687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 3891da177e4SLinus Torvalds 3906687e988SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 3911da177e4SLinus Torvalds 3921da177e4SLinus Torvalds if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 3931da177e4SLinus Torvalds !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 3941da177e4SLinus Torvalds !tcp_memory_pressure && 395326f36e9SJohn Heffner atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 3961da177e4SLinus Torvalds sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 3971da177e4SLinus Torvalds sysctl_tcp_rmem[2]); 3981da177e4SLinus Torvalds } 399326f36e9SJohn Heffner if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 4001da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 4011da177e4SLinus Torvalds } 4021da177e4SLinus Torvalds 40340efc6faSStephen Hemminger /* Initialize RCV_MSS value. 40440efc6faSStephen Hemminger * RCV_MSS is an our guess about MSS used by the peer. 40540efc6faSStephen Hemminger * We haven't any direct information about the MSS. 40640efc6faSStephen Hemminger * It's better to underestimate the RCV_MSS rather than overestimate. 40740efc6faSStephen Hemminger * Overestimations make us ACKing less frequently than needed. 40840efc6faSStephen Hemminger * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 40940efc6faSStephen Hemminger */ 41040efc6faSStephen Hemminger void tcp_initialize_rcv_mss(struct sock *sk) 41140efc6faSStephen Hemminger { 41240efc6faSStephen Hemminger struct tcp_sock *tp = tcp_sk(sk); 41340efc6faSStephen Hemminger unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 41440efc6faSStephen Hemminger 41540efc6faSStephen Hemminger hint = min(hint, tp->rcv_wnd / 2); 41640efc6faSStephen Hemminger hint = min(hint, TCP_MIN_RCVMSS); 41740efc6faSStephen Hemminger hint = max(hint, TCP_MIN_MSS); 41840efc6faSStephen Hemminger 41940efc6faSStephen Hemminger inet_csk(sk)->icsk_ack.rcv_mss = hint; 42040efc6faSStephen Hemminger } 42140efc6faSStephen Hemminger 4221da177e4SLinus Torvalds /* Receiver "autotuning" code. 4231da177e4SLinus Torvalds * 4241da177e4SLinus Torvalds * The algorithm for RTT estimation w/o timestamps is based on 4251da177e4SLinus Torvalds * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 4261da177e4SLinus Torvalds * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps> 4271da177e4SLinus Torvalds * 4281da177e4SLinus Torvalds * More detail on this code can be found at 4291da177e4SLinus Torvalds * <http://www.psc.edu/~jheffner/senior_thesis.ps>, 4301da177e4SLinus Torvalds * though this reference is out of date. A new paper 4311da177e4SLinus Torvalds * is pending. 4321da177e4SLinus Torvalds */ 4331da177e4SLinus Torvalds static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 4341da177e4SLinus Torvalds { 4351da177e4SLinus Torvalds u32 new_sample = tp->rcv_rtt_est.rtt; 4361da177e4SLinus Torvalds long m = sample; 4371da177e4SLinus Torvalds 4381da177e4SLinus Torvalds if (m == 0) 4391da177e4SLinus Torvalds m = 1; 4401da177e4SLinus Torvalds 4411da177e4SLinus Torvalds if (new_sample != 0) { 4421da177e4SLinus Torvalds /* If we sample in larger samples in the non-timestamp 4431da177e4SLinus Torvalds * case, we could grossly overestimate the RTT especially 4441da177e4SLinus Torvalds * with chatty applications or bulk transfer apps which 4451da177e4SLinus Torvalds * are stalled on filesystem I/O. 4461da177e4SLinus Torvalds * 4471da177e4SLinus Torvalds * Also, since we are only going for a minimum in the 44831f34269SStephen Hemminger * non-timestamp case, we do not smooth things out 449caa20d9aSStephen Hemminger * else with timestamps disabled convergence takes too 4501da177e4SLinus Torvalds * long. 4511da177e4SLinus Torvalds */ 4521da177e4SLinus Torvalds if (!win_dep) { 4531da177e4SLinus Torvalds m -= (new_sample >> 3); 4541da177e4SLinus Torvalds new_sample += m; 4551da177e4SLinus Torvalds } else if (m < new_sample) 4561da177e4SLinus Torvalds new_sample = m << 3; 4571da177e4SLinus Torvalds } else { 458caa20d9aSStephen Hemminger /* No previous measure. */ 4591da177e4SLinus Torvalds new_sample = m << 3; 4601da177e4SLinus Torvalds } 4611da177e4SLinus Torvalds 4621da177e4SLinus Torvalds if (tp->rcv_rtt_est.rtt != new_sample) 4631da177e4SLinus Torvalds tp->rcv_rtt_est.rtt = new_sample; 4641da177e4SLinus Torvalds } 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 4671da177e4SLinus Torvalds { 4681da177e4SLinus Torvalds if (tp->rcv_rtt_est.time == 0) 4691da177e4SLinus Torvalds goto new_measure; 4701da177e4SLinus Torvalds if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 4711da177e4SLinus Torvalds return; 472056834d9SIlpo Järvinen tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds new_measure: 4751da177e4SLinus Torvalds tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 4761da177e4SLinus Torvalds tp->rcv_rtt_est.time = tcp_time_stamp; 4771da177e4SLinus Torvalds } 4781da177e4SLinus Torvalds 479056834d9SIlpo Järvinen static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 480056834d9SIlpo Järvinen const struct sk_buff *skb) 4811da177e4SLinus Torvalds { 482463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 4831da177e4SLinus Torvalds if (tp->rx_opt.rcv_tsecr && 4841da177e4SLinus Torvalds (TCP_SKB_CB(skb)->end_seq - 485463c84b9SArnaldo Carvalho de Melo TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 4861da177e4SLinus Torvalds tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 4871da177e4SLinus Torvalds } 4881da177e4SLinus Torvalds 4891da177e4SLinus Torvalds /* 4901da177e4SLinus Torvalds * This function should be called every time data is copied to user space. 4911da177e4SLinus Torvalds * It calculates the appropriate TCP receive buffer space. 4921da177e4SLinus Torvalds */ 4931da177e4SLinus Torvalds void tcp_rcv_space_adjust(struct sock *sk) 4941da177e4SLinus Torvalds { 4951da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 4961da177e4SLinus Torvalds int time; 4971da177e4SLinus Torvalds int space; 4981da177e4SLinus Torvalds 4991da177e4SLinus Torvalds if (tp->rcvq_space.time == 0) 5001da177e4SLinus Torvalds goto new_measure; 5011da177e4SLinus Torvalds 5021da177e4SLinus Torvalds time = tcp_time_stamp - tp->rcvq_space.time; 503056834d9SIlpo Järvinen if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) 5041da177e4SLinus Torvalds return; 5051da177e4SLinus Torvalds 5061da177e4SLinus Torvalds space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 5071da177e4SLinus Torvalds 5081da177e4SLinus Torvalds space = max(tp->rcvq_space.space, space); 5091da177e4SLinus Torvalds 5101da177e4SLinus Torvalds if (tp->rcvq_space.space != space) { 5111da177e4SLinus Torvalds int rcvmem; 5121da177e4SLinus Torvalds 5131da177e4SLinus Torvalds tp->rcvq_space.space = space; 5141da177e4SLinus Torvalds 5156fcf9412SJohn Heffner if (sysctl_tcp_moderate_rcvbuf && 5166fcf9412SJohn Heffner !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 5171da177e4SLinus Torvalds int new_clamp = space; 5181da177e4SLinus Torvalds 5191da177e4SLinus Torvalds /* Receive space grows, normalize in order to 5201da177e4SLinus Torvalds * take into account packet headers and sk_buff 5211da177e4SLinus Torvalds * structure overhead. 5221da177e4SLinus Torvalds */ 5231da177e4SLinus Torvalds space /= tp->advmss; 5241da177e4SLinus Torvalds if (!space) 5251da177e4SLinus Torvalds space = 1; 5261da177e4SLinus Torvalds rcvmem = (tp->advmss + MAX_TCP_HEADER + 5271da177e4SLinus Torvalds 16 + sizeof(struct sk_buff)); 5281da177e4SLinus Torvalds while (tcp_win_from_space(rcvmem) < tp->advmss) 5291da177e4SLinus Torvalds rcvmem += 128; 5301da177e4SLinus Torvalds space *= rcvmem; 5311da177e4SLinus Torvalds space = min(space, sysctl_tcp_rmem[2]); 5321da177e4SLinus Torvalds if (space > sk->sk_rcvbuf) { 5331da177e4SLinus Torvalds sk->sk_rcvbuf = space; 5341da177e4SLinus Torvalds 5351da177e4SLinus Torvalds /* Make the window clamp follow along. */ 5361da177e4SLinus Torvalds tp->window_clamp = new_clamp; 5371da177e4SLinus Torvalds } 5381da177e4SLinus Torvalds } 5391da177e4SLinus Torvalds } 5401da177e4SLinus Torvalds 5411da177e4SLinus Torvalds new_measure: 5421da177e4SLinus Torvalds tp->rcvq_space.seq = tp->copied_seq; 5431da177e4SLinus Torvalds tp->rcvq_space.time = tcp_time_stamp; 5441da177e4SLinus Torvalds } 5451da177e4SLinus Torvalds 5461da177e4SLinus Torvalds /* There is something which you must keep in mind when you analyze the 5471da177e4SLinus Torvalds * behavior of the tp->ato delayed ack timeout interval. When a 5481da177e4SLinus Torvalds * connection starts up, we want to ack as quickly as possible. The 5491da177e4SLinus Torvalds * problem is that "good" TCP's do slow start at the beginning of data 5501da177e4SLinus Torvalds * transmission. The means that until we send the first few ACK's the 5511da177e4SLinus Torvalds * sender will sit on his end and only queue most of his data, because 5521da177e4SLinus Torvalds * he can only send snd_cwnd unacked packets at any given time. For 5531da177e4SLinus Torvalds * each ACK we send, he increments snd_cwnd and transmits more of his 5541da177e4SLinus Torvalds * queue. -DaveM 5551da177e4SLinus Torvalds */ 5569e412ba7SIlpo Järvinen static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 5571da177e4SLinus Torvalds { 5589e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 559463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 5601da177e4SLinus Torvalds u32 now; 5611da177e4SLinus Torvalds 562463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 5631da177e4SLinus Torvalds 564463c84b9SArnaldo Carvalho de Melo tcp_measure_rcv_mss(sk, skb); 5651da177e4SLinus Torvalds 5661da177e4SLinus Torvalds tcp_rcv_rtt_measure(tp); 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds now = tcp_time_stamp; 5691da177e4SLinus Torvalds 570463c84b9SArnaldo Carvalho de Melo if (!icsk->icsk_ack.ato) { 5711da177e4SLinus Torvalds /* The _first_ data packet received, initialize 5721da177e4SLinus Torvalds * delayed ACK engine. 5731da177e4SLinus Torvalds */ 574463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 575463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 5761da177e4SLinus Torvalds } else { 577463c84b9SArnaldo Carvalho de Melo int m = now - icsk->icsk_ack.lrcvtime; 5781da177e4SLinus Torvalds 5791da177e4SLinus Torvalds if (m <= TCP_ATO_MIN / 2) { 5801da177e4SLinus Torvalds /* The fastest case is the first. */ 581463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 582463c84b9SArnaldo Carvalho de Melo } else if (m < icsk->icsk_ack.ato) { 583463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 584463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.ato > icsk->icsk_rto) 585463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = icsk->icsk_rto; 586463c84b9SArnaldo Carvalho de Melo } else if (m > icsk->icsk_rto) { 587caa20d9aSStephen Hemminger /* Too long gap. Apparently sender failed to 5881da177e4SLinus Torvalds * restart window, so that we send ACKs quickly. 5891da177e4SLinus Torvalds */ 590463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 5913ab224beSHideo Aoki sk_mem_reclaim(sk); 5921da177e4SLinus Torvalds } 5931da177e4SLinus Torvalds } 594463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.lrcvtime = now; 5951da177e4SLinus Torvalds 5961da177e4SLinus Torvalds TCP_ECN_check_ce(tp, skb); 5971da177e4SLinus Torvalds 5981da177e4SLinus Torvalds if (skb->len >= 128) 5999e412ba7SIlpo Järvinen tcp_grow_window(sk, skb); 6001da177e4SLinus Torvalds } 6011da177e4SLinus Torvalds 60205bb1fadSDavid S. Miller static u32 tcp_rto_min(struct sock *sk) 60305bb1fadSDavid S. Miller { 60405bb1fadSDavid S. Miller struct dst_entry *dst = __sk_dst_get(sk); 60505bb1fadSDavid S. Miller u32 rto_min = TCP_RTO_MIN; 60605bb1fadSDavid S. Miller 6075c127c58SDavid S. Miller if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 60805bb1fadSDavid S. Miller rto_min = dst->metrics[RTAX_RTO_MIN - 1]; 60905bb1fadSDavid S. Miller return rto_min; 61005bb1fadSDavid S. Miller } 61105bb1fadSDavid S. Miller 6121da177e4SLinus Torvalds /* Called to compute a smoothed rtt estimate. The data fed to this 6131da177e4SLinus Torvalds * routine either comes from timestamps, or from segments that were 6141da177e4SLinus Torvalds * known _not_ to have been retransmitted [see Karn/Partridge 6151da177e4SLinus Torvalds * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 6161da177e4SLinus Torvalds * piece by Van Jacobson. 6171da177e4SLinus Torvalds * NOTE: the next three routines used to be one big routine. 6181da177e4SLinus Torvalds * To save cycles in the RFC 1323 implementation it was better to break 6191da177e4SLinus Torvalds * it up into three procedures. -- erics 6201da177e4SLinus Torvalds */ 6212d2abbabSStephen Hemminger static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 6221da177e4SLinus Torvalds { 6236687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 6241da177e4SLinus Torvalds long m = mrtt; /* RTT */ 6251da177e4SLinus Torvalds 6261da177e4SLinus Torvalds /* The following amusing code comes from Jacobson's 6271da177e4SLinus Torvalds * article in SIGCOMM '88. Note that rtt and mdev 6281da177e4SLinus Torvalds * are scaled versions of rtt and mean deviation. 6291da177e4SLinus Torvalds * This is designed to be as fast as possible 6301da177e4SLinus Torvalds * m stands for "measurement". 6311da177e4SLinus Torvalds * 6321da177e4SLinus Torvalds * On a 1990 paper the rto value is changed to: 6331da177e4SLinus Torvalds * RTO = rtt + 4 * mdev 6341da177e4SLinus Torvalds * 6351da177e4SLinus Torvalds * Funny. This algorithm seems to be very broken. 6361da177e4SLinus Torvalds * These formulae increase RTO, when it should be decreased, increase 63731f34269SStephen Hemminger * too slowly, when it should be increased quickly, decrease too quickly 6381da177e4SLinus Torvalds * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 6391da177e4SLinus Torvalds * does not matter how to _calculate_ it. Seems, it was trap 6401da177e4SLinus Torvalds * that VJ failed to avoid. 8) 6411da177e4SLinus Torvalds */ 6421da177e4SLinus Torvalds if (m == 0) 6431da177e4SLinus Torvalds m = 1; 6441da177e4SLinus Torvalds if (tp->srtt != 0) { 6451da177e4SLinus Torvalds m -= (tp->srtt >> 3); /* m is now error in rtt est */ 6461da177e4SLinus Torvalds tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 6471da177e4SLinus Torvalds if (m < 0) { 6481da177e4SLinus Torvalds m = -m; /* m is now abs(error) */ 6491da177e4SLinus Torvalds m -= (tp->mdev >> 2); /* similar update on mdev */ 6501da177e4SLinus Torvalds /* This is similar to one of Eifel findings. 6511da177e4SLinus Torvalds * Eifel blocks mdev updates when rtt decreases. 6521da177e4SLinus Torvalds * This solution is a bit different: we use finer gain 6531da177e4SLinus Torvalds * for mdev in this case (alpha*beta). 6541da177e4SLinus Torvalds * Like Eifel it also prevents growth of rto, 6551da177e4SLinus Torvalds * but also it limits too fast rto decreases, 6561da177e4SLinus Torvalds * happening in pure Eifel. 6571da177e4SLinus Torvalds */ 6581da177e4SLinus Torvalds if (m > 0) 6591da177e4SLinus Torvalds m >>= 3; 6601da177e4SLinus Torvalds } else { 6611da177e4SLinus Torvalds m -= (tp->mdev >> 2); /* similar update on mdev */ 6621da177e4SLinus Torvalds } 6631da177e4SLinus Torvalds tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 6641da177e4SLinus Torvalds if (tp->mdev > tp->mdev_max) { 6651da177e4SLinus Torvalds tp->mdev_max = tp->mdev; 6661da177e4SLinus Torvalds if (tp->mdev_max > tp->rttvar) 6671da177e4SLinus Torvalds tp->rttvar = tp->mdev_max; 6681da177e4SLinus Torvalds } 6691da177e4SLinus Torvalds if (after(tp->snd_una, tp->rtt_seq)) { 6701da177e4SLinus Torvalds if (tp->mdev_max < tp->rttvar) 6711da177e4SLinus Torvalds tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; 6721da177e4SLinus Torvalds tp->rtt_seq = tp->snd_nxt; 67305bb1fadSDavid S. Miller tp->mdev_max = tcp_rto_min(sk); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds } else { 6761da177e4SLinus Torvalds /* no previous measure. */ 6771da177e4SLinus Torvalds tp->srtt = m << 3; /* take the measured time to be rtt */ 6781da177e4SLinus Torvalds tp->mdev = m << 1; /* make sure rto = 3*rtt */ 67905bb1fadSDavid S. Miller tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 6801da177e4SLinus Torvalds tp->rtt_seq = tp->snd_nxt; 6811da177e4SLinus Torvalds } 6821da177e4SLinus Torvalds } 6831da177e4SLinus Torvalds 6841da177e4SLinus Torvalds /* Calculate rto without backoff. This is the second half of Van Jacobson's 6851da177e4SLinus Torvalds * routine referred to above. 6861da177e4SLinus Torvalds */ 687463c84b9SArnaldo Carvalho de Melo static inline void tcp_set_rto(struct sock *sk) 6881da177e4SLinus Torvalds { 689463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 6901da177e4SLinus Torvalds /* Old crap is replaced with new one. 8) 6911da177e4SLinus Torvalds * 6921da177e4SLinus Torvalds * More seriously: 6931da177e4SLinus Torvalds * 1. If rtt variance happened to be less 50msec, it is hallucination. 6941da177e4SLinus Torvalds * It cannot be less due to utterly erratic ACK generation made 6951da177e4SLinus Torvalds * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 6961da177e4SLinus Torvalds * to do with delayed acks, because at cwnd>2 true delack timeout 6971da177e4SLinus Torvalds * is invisible. Actually, Linux-2.4 also generates erratic 698caa20d9aSStephen Hemminger * ACKs in some circumstances. 6991da177e4SLinus Torvalds */ 700463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 7011da177e4SLinus Torvalds 7021da177e4SLinus Torvalds /* 2. Fixups made earlier cannot be right. 7031da177e4SLinus Torvalds * If we do not estimate RTO correctly without them, 7041da177e4SLinus Torvalds * all the algo is pure shit and should be replaced 705caa20d9aSStephen Hemminger * with correct one. It is exactly, which we pretend to do. 7061da177e4SLinus Torvalds */ 7071da177e4SLinus Torvalds } 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 7101da177e4SLinus Torvalds * guarantees that rto is higher. 7111da177e4SLinus Torvalds */ 712463c84b9SArnaldo Carvalho de Melo static inline void tcp_bound_rto(struct sock *sk) 7131da177e4SLinus Torvalds { 714463c84b9SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 715463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 7161da177e4SLinus Torvalds } 7171da177e4SLinus Torvalds 7181da177e4SLinus Torvalds /* Save metrics learned by this TCP session. 7191da177e4SLinus Torvalds This function is called only, when TCP finishes successfully 7201da177e4SLinus Torvalds i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. 7211da177e4SLinus Torvalds */ 7221da177e4SLinus Torvalds void tcp_update_metrics(struct sock *sk) 7231da177e4SLinus Torvalds { 7241da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 7251da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 7261da177e4SLinus Torvalds 7271da177e4SLinus Torvalds if (sysctl_tcp_nometrics_save) 7281da177e4SLinus Torvalds return; 7291da177e4SLinus Torvalds 7301da177e4SLinus Torvalds dst_confirm(dst); 7311da177e4SLinus Torvalds 7321da177e4SLinus Torvalds if (dst && (dst->flags & DST_HOST)) { 7336687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 7341da177e4SLinus Torvalds int m; 7351da177e4SLinus Torvalds 7366687e988SArnaldo Carvalho de Melo if (icsk->icsk_backoff || !tp->srtt) { 7371da177e4SLinus Torvalds /* This session failed to estimate rtt. Why? 7381da177e4SLinus Torvalds * Probably, no packets returned in time. 7391da177e4SLinus Torvalds * Reset our results. 7401da177e4SLinus Torvalds */ 7411da177e4SLinus Torvalds if (!(dst_metric_locked(dst, RTAX_RTT))) 7421da177e4SLinus Torvalds dst->metrics[RTAX_RTT - 1] = 0; 7431da177e4SLinus Torvalds return; 7441da177e4SLinus Torvalds } 7451da177e4SLinus Torvalds 7461da177e4SLinus Torvalds m = dst_metric(dst, RTAX_RTT) - tp->srtt; 7471da177e4SLinus Torvalds 7481da177e4SLinus Torvalds /* If newly calculated rtt larger than stored one, 7491da177e4SLinus Torvalds * store new one. Otherwise, use EWMA. Remember, 7501da177e4SLinus Torvalds * rtt overestimation is always better than underestimation. 7511da177e4SLinus Torvalds */ 7521da177e4SLinus Torvalds if (!(dst_metric_locked(dst, RTAX_RTT))) { 7531da177e4SLinus Torvalds if (m <= 0) 7541da177e4SLinus Torvalds dst->metrics[RTAX_RTT - 1] = tp->srtt; 7551da177e4SLinus Torvalds else 7561da177e4SLinus Torvalds dst->metrics[RTAX_RTT - 1] -= (m >> 3); 7571da177e4SLinus Torvalds } 7581da177e4SLinus Torvalds 7591da177e4SLinus Torvalds if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { 7601da177e4SLinus Torvalds if (m < 0) 7611da177e4SLinus Torvalds m = -m; 7621da177e4SLinus Torvalds 7631da177e4SLinus Torvalds /* Scale deviation to rttvar fixed point */ 7641da177e4SLinus Torvalds m >>= 1; 7651da177e4SLinus Torvalds if (m < tp->mdev) 7661da177e4SLinus Torvalds m = tp->mdev; 7671da177e4SLinus Torvalds 7681da177e4SLinus Torvalds if (m >= dst_metric(dst, RTAX_RTTVAR)) 7691da177e4SLinus Torvalds dst->metrics[RTAX_RTTVAR - 1] = m; 7701da177e4SLinus Torvalds else 7711da177e4SLinus Torvalds dst->metrics[RTAX_RTTVAR-1] -= 7721da177e4SLinus Torvalds (dst->metrics[RTAX_RTTVAR-1] - m)>>2; 7731da177e4SLinus Torvalds } 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds if (tp->snd_ssthresh >= 0xFFFF) { 7761da177e4SLinus Torvalds /* Slow start still did not finish. */ 7771da177e4SLinus Torvalds if (dst_metric(dst, RTAX_SSTHRESH) && 7781da177e4SLinus Torvalds !dst_metric_locked(dst, RTAX_SSTHRESH) && 7791da177e4SLinus Torvalds (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) 7801da177e4SLinus Torvalds dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1; 7811da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_CWND) && 7821da177e4SLinus Torvalds tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 7831da177e4SLinus Torvalds dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd; 7841da177e4SLinus Torvalds } else if (tp->snd_cwnd > tp->snd_ssthresh && 7856687e988SArnaldo Carvalho de Melo icsk->icsk_ca_state == TCP_CA_Open) { 7861da177e4SLinus Torvalds /* Cong. avoidance phase, cwnd is reliable. */ 7871da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 7881da177e4SLinus Torvalds dst->metrics[RTAX_SSTHRESH-1] = 7891da177e4SLinus Torvalds max(tp->snd_cwnd >> 1, tp->snd_ssthresh); 7901da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_CWND)) 7911da177e4SLinus Torvalds dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1; 7921da177e4SLinus Torvalds } else { 7931da177e4SLinus Torvalds /* Else slow start did not finish, cwnd is non-sense, 7941da177e4SLinus Torvalds ssthresh may be also invalid. 7951da177e4SLinus Torvalds */ 7961da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_CWND)) 7971da177e4SLinus Torvalds dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1; 7981da177e4SLinus Torvalds if (dst->metrics[RTAX_SSTHRESH-1] && 7991da177e4SLinus Torvalds !dst_metric_locked(dst, RTAX_SSTHRESH) && 8001da177e4SLinus Torvalds tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1]) 8011da177e4SLinus Torvalds dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh; 8021da177e4SLinus Torvalds } 8031da177e4SLinus Torvalds 8041da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_REORDERING)) { 8051da177e4SLinus Torvalds if (dst->metrics[RTAX_REORDERING-1] < tp->reordering && 8061da177e4SLinus Torvalds tp->reordering != sysctl_tcp_reordering) 8071da177e4SLinus Torvalds dst->metrics[RTAX_REORDERING-1] = tp->reordering; 8081da177e4SLinus Torvalds } 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds } 8111da177e4SLinus Torvalds 81226722873SDavid S. Miller /* Numbers are taken from RFC3390. 81326722873SDavid S. Miller * 81426722873SDavid S. Miller * John Heffner states: 81526722873SDavid S. Miller * 81626722873SDavid S. Miller * The RFC specifies a window of no more than 4380 bytes 81726722873SDavid S. Miller * unless 2*MSS > 4380. Reading the pseudocode in the RFC 81826722873SDavid S. Miller * is a bit misleading because they use a clamp at 4380 bytes 81926722873SDavid S. Miller * rather than use a multiplier in the relevant range. 82026722873SDavid S. Miller */ 8211da177e4SLinus Torvalds __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 8221da177e4SLinus Torvalds { 8231da177e4SLinus Torvalds __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 8241da177e4SLinus Torvalds 8251da177e4SLinus Torvalds if (!cwnd) { 826c1b4a7e6SDavid S. Miller if (tp->mss_cache > 1460) 8271da177e4SLinus Torvalds cwnd = 2; 8281da177e4SLinus Torvalds else 829c1b4a7e6SDavid S. Miller cwnd = (tp->mss_cache > 1095) ? 3 : 4; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 8321da177e4SLinus Torvalds } 8331da177e4SLinus Torvalds 83440efc6faSStephen Hemminger /* Set slow start threshold and cwnd not falling to slow start */ 8353cfe3baaSIlpo Järvinen void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) 83640efc6faSStephen Hemminger { 83740efc6faSStephen Hemminger struct tcp_sock *tp = tcp_sk(sk); 8383cfe3baaSIlpo Järvinen const struct inet_connection_sock *icsk = inet_csk(sk); 83940efc6faSStephen Hemminger 84040efc6faSStephen Hemminger tp->prior_ssthresh = 0; 84140efc6faSStephen Hemminger tp->bytes_acked = 0; 842e01f9d77SIlpo Järvinen if (icsk->icsk_ca_state < TCP_CA_CWR) { 84340efc6faSStephen Hemminger tp->undo_marker = 0; 8443cfe3baaSIlpo Järvinen if (set_ssthresh) 8453cfe3baaSIlpo Järvinen tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 84640efc6faSStephen Hemminger tp->snd_cwnd = min(tp->snd_cwnd, 84740efc6faSStephen Hemminger tcp_packets_in_flight(tp) + 1U); 84840efc6faSStephen Hemminger tp->snd_cwnd_cnt = 0; 84940efc6faSStephen Hemminger tp->high_seq = tp->snd_nxt; 85040efc6faSStephen Hemminger tp->snd_cwnd_stamp = tcp_time_stamp; 85140efc6faSStephen Hemminger TCP_ECN_queue_cwr(tp); 85240efc6faSStephen Hemminger 85340efc6faSStephen Hemminger tcp_set_ca_state(sk, TCP_CA_CWR); 85440efc6faSStephen Hemminger } 85540efc6faSStephen Hemminger } 85640efc6faSStephen Hemminger 857e60402d0SIlpo Järvinen /* 858e60402d0SIlpo Järvinen * Packet counting of FACK is based on in-order assumptions, therefore TCP 859e60402d0SIlpo Järvinen * disables it when reordering is detected 860e60402d0SIlpo Järvinen */ 861e60402d0SIlpo Järvinen static void tcp_disable_fack(struct tcp_sock *tp) 862e60402d0SIlpo Järvinen { 86385cc391cSIlpo Järvinen /* RFC3517 uses different metric in lost marker => reset on change */ 86485cc391cSIlpo Järvinen if (tcp_is_fack(tp)) 86585cc391cSIlpo Järvinen tp->lost_skb_hint = NULL; 866e60402d0SIlpo Järvinen tp->rx_opt.sack_ok &= ~2; 867e60402d0SIlpo Järvinen } 868e60402d0SIlpo Järvinen 869564262c1SRyousei Takano /* Take a notice that peer is sending D-SACKs */ 870e60402d0SIlpo Järvinen static void tcp_dsack_seen(struct tcp_sock *tp) 871e60402d0SIlpo Järvinen { 872e60402d0SIlpo Järvinen tp->rx_opt.sack_ok |= 4; 873e60402d0SIlpo Järvinen } 874e60402d0SIlpo Järvinen 8751da177e4SLinus Torvalds /* Initialize metrics on socket. */ 8761da177e4SLinus Torvalds 8771da177e4SLinus Torvalds static void tcp_init_metrics(struct sock *sk) 8781da177e4SLinus Torvalds { 8791da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 8801da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 8811da177e4SLinus Torvalds 8821da177e4SLinus Torvalds if (dst == NULL) 8831da177e4SLinus Torvalds goto reset; 8841da177e4SLinus Torvalds 8851da177e4SLinus Torvalds dst_confirm(dst); 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds if (dst_metric_locked(dst, RTAX_CWND)) 8881da177e4SLinus Torvalds tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); 8891da177e4SLinus Torvalds if (dst_metric(dst, RTAX_SSTHRESH)) { 8901da177e4SLinus Torvalds tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); 8911da177e4SLinus Torvalds if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 8921da177e4SLinus Torvalds tp->snd_ssthresh = tp->snd_cwnd_clamp; 8931da177e4SLinus Torvalds } 8941da177e4SLinus Torvalds if (dst_metric(dst, RTAX_REORDERING) && 8951da177e4SLinus Torvalds tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 896e60402d0SIlpo Järvinen tcp_disable_fack(tp); 8971da177e4SLinus Torvalds tp->reordering = dst_metric(dst, RTAX_REORDERING); 8981da177e4SLinus Torvalds } 8991da177e4SLinus Torvalds 9001da177e4SLinus Torvalds if (dst_metric(dst, RTAX_RTT) == 0) 9011da177e4SLinus Torvalds goto reset; 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3)) 9041da177e4SLinus Torvalds goto reset; 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds /* Initial rtt is determined from SYN,SYN-ACK. 9071da177e4SLinus Torvalds * The segment is small and rtt may appear much 9081da177e4SLinus Torvalds * less than real one. Use per-dst memory 9091da177e4SLinus Torvalds * to make it more realistic. 9101da177e4SLinus Torvalds * 9111da177e4SLinus Torvalds * A bit of theory. RTT is time passed after "normal" sized packet 912caa20d9aSStephen Hemminger * is sent until it is ACKed. In normal circumstances sending small 9131da177e4SLinus Torvalds * packets force peer to delay ACKs and calculation is correct too. 9141da177e4SLinus Torvalds * The algorithm is adaptive and, provided we follow specs, it 9151da177e4SLinus Torvalds * NEVER underestimate RTT. BUT! If peer tries to make some clever 9161da177e4SLinus Torvalds * tricks sort of "quick acks" for time long enough to decrease RTT 9171da177e4SLinus Torvalds * to low value, and then abruptly stops to do it and starts to delay 9181da177e4SLinus Torvalds * ACKs, wait for troubles. 9191da177e4SLinus Torvalds */ 9201da177e4SLinus Torvalds if (dst_metric(dst, RTAX_RTT) > tp->srtt) { 9211da177e4SLinus Torvalds tp->srtt = dst_metric(dst, RTAX_RTT); 9221da177e4SLinus Torvalds tp->rtt_seq = tp->snd_nxt; 9231da177e4SLinus Torvalds } 9241da177e4SLinus Torvalds if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) { 9251da177e4SLinus Torvalds tp->mdev = dst_metric(dst, RTAX_RTTVAR); 926488faa2aSSatoru SATOH tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 9271da177e4SLinus Torvalds } 928463c84b9SArnaldo Carvalho de Melo tcp_set_rto(sk); 929463c84b9SArnaldo Carvalho de Melo tcp_bound_rto(sk); 930463c84b9SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 9311da177e4SLinus Torvalds goto reset; 9321da177e4SLinus Torvalds tp->snd_cwnd = tcp_init_cwnd(tp, dst); 9331da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 9341da177e4SLinus Torvalds return; 9351da177e4SLinus Torvalds 9361da177e4SLinus Torvalds reset: 9371da177e4SLinus Torvalds /* Play conservative. If timestamps are not 9381da177e4SLinus Torvalds * supported, TCP will fail to recalculate correct 9391da177e4SLinus Torvalds * rtt, if initial rto is too small. FORGET ALL AND RESET! 9401da177e4SLinus Torvalds */ 9411da177e4SLinus Torvalds if (!tp->rx_opt.saw_tstamp && tp->srtt) { 9421da177e4SLinus Torvalds tp->srtt = 0; 9431da177e4SLinus Torvalds tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 944463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 9451da177e4SLinus Torvalds } 9461da177e4SLinus Torvalds } 9471da177e4SLinus Torvalds 9486687e988SArnaldo Carvalho de Melo static void tcp_update_reordering(struct sock *sk, const int metric, 9496687e988SArnaldo Carvalho de Melo const int ts) 9501da177e4SLinus Torvalds { 9516687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 9521da177e4SLinus Torvalds if (metric > tp->reordering) { 9531da177e4SLinus Torvalds tp->reordering = min(TCP_MAX_REORDERING, metric); 9541da177e4SLinus Torvalds 9551da177e4SLinus Torvalds /* This exciting event is worth to be remembered. 8) */ 9561da177e4SLinus Torvalds if (ts) 9571da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); 958e60402d0SIlpo Järvinen else if (tcp_is_reno(tp)) 9591da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); 960e60402d0SIlpo Järvinen else if (tcp_is_fack(tp)) 9611da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); 9621da177e4SLinus Torvalds else 9631da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 9641da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 1 9651da177e4SLinus Torvalds printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 9666687e988SArnaldo Carvalho de Melo tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 9671da177e4SLinus Torvalds tp->reordering, 9681da177e4SLinus Torvalds tp->fackets_out, 9691da177e4SLinus Torvalds tp->sacked_out, 9701da177e4SLinus Torvalds tp->undo_marker ? tp->undo_retrans : 0); 9711da177e4SLinus Torvalds #endif 972e60402d0SIlpo Järvinen tcp_disable_fack(tp); 9731da177e4SLinus Torvalds } 9741da177e4SLinus Torvalds } 9751da177e4SLinus Torvalds 9761da177e4SLinus Torvalds /* This procedure tags the retransmission queue when SACKs arrive. 9771da177e4SLinus Torvalds * 9781da177e4SLinus Torvalds * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 9791da177e4SLinus Torvalds * Packets in queue with these bits set are counted in variables 9801da177e4SLinus Torvalds * sacked_out, retrans_out and lost_out, correspondingly. 9811da177e4SLinus Torvalds * 9821da177e4SLinus Torvalds * Valid combinations are: 9831da177e4SLinus Torvalds * Tag InFlight Description 9841da177e4SLinus Torvalds * 0 1 - orig segment is in flight. 9851da177e4SLinus Torvalds * S 0 - nothing flies, orig reached receiver. 9861da177e4SLinus Torvalds * L 0 - nothing flies, orig lost by net. 9871da177e4SLinus Torvalds * R 2 - both orig and retransmit are in flight. 9881da177e4SLinus Torvalds * L|R 1 - orig is lost, retransmit is in flight. 9891da177e4SLinus Torvalds * S|R 1 - orig reached receiver, retrans is still in flight. 9901da177e4SLinus Torvalds * (L|S|R is logically valid, it could occur when L|R is sacked, 9911da177e4SLinus Torvalds * but it is equivalent to plain S and code short-curcuits it to S. 9921da177e4SLinus Torvalds * L|S is logically invalid, it would mean -1 packet in flight 8)) 9931da177e4SLinus Torvalds * 9941da177e4SLinus Torvalds * These 6 states form finite state machine, controlled by the following events: 9951da177e4SLinus Torvalds * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 9961da177e4SLinus Torvalds * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 9971da177e4SLinus Torvalds * 3. Loss detection event of one of three flavors: 9981da177e4SLinus Torvalds * A. Scoreboard estimator decided the packet is lost. 9991da177e4SLinus Torvalds * A'. Reno "three dupacks" marks head of queue lost. 10001da177e4SLinus Torvalds * A''. Its FACK modfication, head until snd.fack is lost. 10011da177e4SLinus Torvalds * B. SACK arrives sacking data transmitted after never retransmitted 10021da177e4SLinus Torvalds * hole was sent out. 10031da177e4SLinus Torvalds * C. SACK arrives sacking SND.NXT at the moment, when the 10041da177e4SLinus Torvalds * segment was retransmitted. 10051da177e4SLinus Torvalds * 4. D-SACK added new rule: D-SACK changes any tag to S. 10061da177e4SLinus Torvalds * 10071da177e4SLinus Torvalds * It is pleasant to note, that state diagram turns out to be commutative, 10081da177e4SLinus Torvalds * so that we are allowed not to be bothered by order of our actions, 10091da177e4SLinus Torvalds * when multiple events arrive simultaneously. (see the function below). 10101da177e4SLinus Torvalds * 10111da177e4SLinus Torvalds * Reordering detection. 10121da177e4SLinus Torvalds * -------------------- 10131da177e4SLinus Torvalds * Reordering metric is maximal distance, which a packet can be displaced 10141da177e4SLinus Torvalds * in packet stream. With SACKs we can estimate it: 10151da177e4SLinus Torvalds * 10161da177e4SLinus Torvalds * 1. SACK fills old hole and the corresponding segment was not 10171da177e4SLinus Torvalds * ever retransmitted -> reordering. Alas, we cannot use it 10181da177e4SLinus Torvalds * when segment was retransmitted. 10191da177e4SLinus Torvalds * 2. The last flaw is solved with D-SACK. D-SACK arrives 10201da177e4SLinus Torvalds * for retransmitted and already SACKed segment -> reordering.. 10211da177e4SLinus Torvalds * Both of these heuristics are not used in Loss state, when we cannot 10221da177e4SLinus Torvalds * account for retransmits accurately. 10235b3c9882SIlpo Järvinen * 10245b3c9882SIlpo Järvinen * SACK block validation. 10255b3c9882SIlpo Järvinen * ---------------------- 10265b3c9882SIlpo Järvinen * 10275b3c9882SIlpo Järvinen * SACK block range validation checks that the received SACK block fits to 10285b3c9882SIlpo Järvinen * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 10295b3c9882SIlpo Järvinen * Note that SND.UNA is not included to the range though being valid because 10300e835331SIlpo Järvinen * it means that the receiver is rather inconsistent with itself reporting 10310e835331SIlpo Järvinen * SACK reneging when it should advance SND.UNA. Such SACK block this is 10320e835331SIlpo Järvinen * perfectly valid, however, in light of RFC2018 which explicitly states 10330e835331SIlpo Järvinen * that "SACK block MUST reflect the newest segment. Even if the newest 10340e835331SIlpo Järvinen * segment is going to be discarded ...", not that it looks very clever 10350e835331SIlpo Järvinen * in case of head skb. Due to potentional receiver driven attacks, we 10360e835331SIlpo Järvinen * choose to avoid immediate execution of a walk in write queue due to 10370e835331SIlpo Järvinen * reneging and defer head skb's loss recovery to standard loss recovery 10380e835331SIlpo Järvinen * procedure that will eventually trigger (nothing forbids us doing this). 10395b3c9882SIlpo Järvinen * 10405b3c9882SIlpo Järvinen * Implements also blockage to start_seq wrap-around. Problem lies in the 10415b3c9882SIlpo Järvinen * fact that though start_seq (s) is before end_seq (i.e., not reversed), 10425b3c9882SIlpo Järvinen * there's no guarantee that it will be before snd_nxt (n). The problem 10435b3c9882SIlpo Järvinen * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 10445b3c9882SIlpo Järvinen * wrap (s_w): 10455b3c9882SIlpo Järvinen * 10465b3c9882SIlpo Järvinen * <- outs wnd -> <- wrapzone -> 10475b3c9882SIlpo Järvinen * u e n u_w e_w s n_w 10485b3c9882SIlpo Järvinen * | | | | | | | 10495b3c9882SIlpo Järvinen * |<------------+------+----- TCP seqno space --------------+---------->| 10505b3c9882SIlpo Järvinen * ...-- <2^31 ->| |<--------... 10515b3c9882SIlpo Järvinen * ...---- >2^31 ------>| |<--------... 10525b3c9882SIlpo Järvinen * 10535b3c9882SIlpo Järvinen * Current code wouldn't be vulnerable but it's better still to discard such 10545b3c9882SIlpo Järvinen * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 10555b3c9882SIlpo Järvinen * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 10565b3c9882SIlpo Järvinen * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 10575b3c9882SIlpo Järvinen * equal to the ideal case (infinite seqno space without wrap caused issues). 10585b3c9882SIlpo Järvinen * 10595b3c9882SIlpo Järvinen * With D-SACK the lower bound is extended to cover sequence space below 10605b3c9882SIlpo Järvinen * SND.UNA down to undo_marker, which is the last point of interest. Yet 1061564262c1SRyousei Takano * again, D-SACK block must not to go across snd_una (for the same reason as 10625b3c9882SIlpo Järvinen * for the normal SACK blocks, explained above). But there all simplicity 10635b3c9882SIlpo Järvinen * ends, TCP might receive valid D-SACKs below that. As long as they reside 10645b3c9882SIlpo Järvinen * fully below undo_marker they do not affect behavior in anyway and can 10655b3c9882SIlpo Järvinen * therefore be safely ignored. In rare cases (which are more or less 10665b3c9882SIlpo Järvinen * theoretical ones), the D-SACK will nicely cross that boundary due to skb 10675b3c9882SIlpo Järvinen * fragmentation and packet reordering past skb's retransmission. To consider 10685b3c9882SIlpo Järvinen * them correctly, the acceptable range must be extended even more though 10695b3c9882SIlpo Järvinen * the exact amount is rather hard to quantify. However, tp->max_window can 10705b3c9882SIlpo Järvinen * be used as an exaggerated estimate. 10711da177e4SLinus Torvalds */ 10725b3c9882SIlpo Järvinen static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, 10735b3c9882SIlpo Järvinen u32 start_seq, u32 end_seq) 10745b3c9882SIlpo Järvinen { 10755b3c9882SIlpo Järvinen /* Too far in future, or reversed (interpretation is ambiguous) */ 10765b3c9882SIlpo Järvinen if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 10775b3c9882SIlpo Järvinen return 0; 10785b3c9882SIlpo Järvinen 10795b3c9882SIlpo Järvinen /* Nasty start_seq wrap-around check (see comments above) */ 10805b3c9882SIlpo Järvinen if (!before(start_seq, tp->snd_nxt)) 10815b3c9882SIlpo Järvinen return 0; 10825b3c9882SIlpo Järvinen 1083564262c1SRyousei Takano /* In outstanding window? ...This is valid exit for D-SACKs too. 10845b3c9882SIlpo Järvinen * start_seq == snd_una is non-sensical (see comments above) 10855b3c9882SIlpo Järvinen */ 10865b3c9882SIlpo Järvinen if (after(start_seq, tp->snd_una)) 10875b3c9882SIlpo Järvinen return 1; 10885b3c9882SIlpo Järvinen 10895b3c9882SIlpo Järvinen if (!is_dsack || !tp->undo_marker) 10905b3c9882SIlpo Järvinen return 0; 10915b3c9882SIlpo Järvinen 10925b3c9882SIlpo Järvinen /* ...Then it's D-SACK, and must reside below snd_una completely */ 10935b3c9882SIlpo Järvinen if (!after(end_seq, tp->snd_una)) 10945b3c9882SIlpo Järvinen return 0; 10955b3c9882SIlpo Järvinen 10965b3c9882SIlpo Järvinen if (!before(start_seq, tp->undo_marker)) 10975b3c9882SIlpo Järvinen return 1; 10985b3c9882SIlpo Järvinen 10995b3c9882SIlpo Järvinen /* Too old */ 11005b3c9882SIlpo Järvinen if (!after(end_seq, tp->undo_marker)) 11015b3c9882SIlpo Järvinen return 0; 11025b3c9882SIlpo Järvinen 11035b3c9882SIlpo Järvinen /* Undo_marker boundary crossing (overestimates a lot). Known already: 11045b3c9882SIlpo Järvinen * start_seq < undo_marker and end_seq >= undo_marker. 11055b3c9882SIlpo Järvinen */ 11065b3c9882SIlpo Järvinen return !before(start_seq, end_seq - tp->max_window); 11075b3c9882SIlpo Järvinen } 11085b3c9882SIlpo Järvinen 11091c1e87edSIlpo Järvinen /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 11101c1e87edSIlpo Järvinen * Event "C". Later note: FACK people cheated me again 8), we have to account 11111c1e87edSIlpo Järvinen * for reordering! Ugly, but should help. 1112f785a8e2SIlpo Järvinen * 1113f785a8e2SIlpo Järvinen * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1114f785a8e2SIlpo Järvinen * less than what is now known to be received by the other end (derived from 11159f58f3b7SIlpo Järvinen * highest SACK block). Also calculate the lowest snd_nxt among the remaining 11169f58f3b7SIlpo Järvinen * retransmitted skbs to avoid some costly processing per ACKs. 11171c1e87edSIlpo Järvinen */ 1118407ef1deSIlpo Järvinen static void tcp_mark_lost_retrans(struct sock *sk) 11191c1e87edSIlpo Järvinen { 11209f58f3b7SIlpo Järvinen const struct inet_connection_sock *icsk = inet_csk(sk); 11211c1e87edSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 11221c1e87edSIlpo Järvinen struct sk_buff *skb; 1123f785a8e2SIlpo Järvinen int cnt = 0; 1124df2e014bSIlpo Järvinen u32 new_low_seq = tp->snd_nxt; 11256859d494SIlpo Järvinen u32 received_upto = tcp_highest_sack_seq(tp); 11269f58f3b7SIlpo Järvinen 11279f58f3b7SIlpo Järvinen if (!tcp_is_fack(tp) || !tp->retrans_out || 11289f58f3b7SIlpo Järvinen !after(received_upto, tp->lost_retrans_low) || 11299f58f3b7SIlpo Järvinen icsk->icsk_ca_state != TCP_CA_Recovery) 1130407ef1deSIlpo Järvinen return; 11311c1e87edSIlpo Järvinen 11321c1e87edSIlpo Järvinen tcp_for_write_queue(skb, sk) { 11331c1e87edSIlpo Järvinen u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; 11341c1e87edSIlpo Järvinen 11351c1e87edSIlpo Järvinen if (skb == tcp_send_head(sk)) 11361c1e87edSIlpo Järvinen break; 1137f785a8e2SIlpo Järvinen if (cnt == tp->retrans_out) 11381c1e87edSIlpo Järvinen break; 11391c1e87edSIlpo Järvinen if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 11401c1e87edSIlpo Järvinen continue; 11411c1e87edSIlpo Järvinen 1142f785a8e2SIlpo Järvinen if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) 1143f785a8e2SIlpo Järvinen continue; 1144f785a8e2SIlpo Järvinen 1145f785a8e2SIlpo Järvinen if (after(received_upto, ack_seq) && 11461c1e87edSIlpo Järvinen (tcp_is_fack(tp) || 1147f785a8e2SIlpo Järvinen !before(received_upto, 11481c1e87edSIlpo Järvinen ack_seq + tp->reordering * tp->mss_cache))) { 11491c1e87edSIlpo Järvinen TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 11501c1e87edSIlpo Järvinen tp->retrans_out -= tcp_skb_pcount(skb); 11511c1e87edSIlpo Järvinen 11521c1e87edSIlpo Järvinen /* clear lost hint */ 11531c1e87edSIlpo Järvinen tp->retransmit_skb_hint = NULL; 11541c1e87edSIlpo Järvinen 11551c1e87edSIlpo Järvinen if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 11561c1e87edSIlpo Järvinen tp->lost_out += tcp_skb_pcount(skb); 11571c1e87edSIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 11581c1e87edSIlpo Järvinen } 1159bce392f3SIlpo Järvinen NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1160f785a8e2SIlpo Järvinen } else { 1161df2e014bSIlpo Järvinen if (before(ack_seq, new_low_seq)) 1162b08d6cb2SIlpo Järvinen new_low_seq = ack_seq; 1163f785a8e2SIlpo Järvinen cnt += tcp_skb_pcount(skb); 11641c1e87edSIlpo Järvinen } 11651c1e87edSIlpo Järvinen } 1166b08d6cb2SIlpo Järvinen 1167b08d6cb2SIlpo Järvinen if (tp->retrans_out) 1168b08d6cb2SIlpo Järvinen tp->lost_retrans_low = new_low_seq; 11691c1e87edSIlpo Järvinen } 11705b3c9882SIlpo Järvinen 1171d06e021dSDavid S. Miller static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, 1172d06e021dSDavid S. Miller struct tcp_sack_block_wire *sp, int num_sacks, 1173d06e021dSDavid S. Miller u32 prior_snd_una) 1174d06e021dSDavid S. Miller { 1175d06e021dSDavid S. Miller u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq)); 1176d06e021dSDavid S. Miller u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq)); 1177d06e021dSDavid S. Miller int dup_sack = 0; 1178d06e021dSDavid S. Miller 1179d06e021dSDavid S. Miller if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1180d06e021dSDavid S. Miller dup_sack = 1; 1181e60402d0SIlpo Järvinen tcp_dsack_seen(tp); 1182d06e021dSDavid S. Miller NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 1183d06e021dSDavid S. Miller } else if (num_sacks > 1) { 1184d06e021dSDavid S. Miller u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq)); 1185d06e021dSDavid S. Miller u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq)); 1186d06e021dSDavid S. Miller 1187d06e021dSDavid S. Miller if (!after(end_seq_0, end_seq_1) && 1188d06e021dSDavid S. Miller !before(start_seq_0, start_seq_1)) { 1189d06e021dSDavid S. Miller dup_sack = 1; 1190e60402d0SIlpo Järvinen tcp_dsack_seen(tp); 1191d06e021dSDavid S. Miller NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 1192d06e021dSDavid S. Miller } 1193d06e021dSDavid S. Miller } 1194d06e021dSDavid S. Miller 1195d06e021dSDavid S. Miller /* D-SACK for already forgotten data... Do dumb counting. */ 1196d06e021dSDavid S. Miller if (dup_sack && 1197d06e021dSDavid S. Miller !after(end_seq_0, prior_snd_una) && 1198d06e021dSDavid S. Miller after(end_seq_0, tp->undo_marker)) 1199d06e021dSDavid S. Miller tp->undo_retrans--; 1200d06e021dSDavid S. Miller 1201d06e021dSDavid S. Miller return dup_sack; 1202d06e021dSDavid S. Miller } 1203d06e021dSDavid S. Miller 1204d1935942SIlpo Järvinen /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1205d1935942SIlpo Järvinen * the incoming SACK may not exactly match but we can find smaller MSS 1206d1935942SIlpo Järvinen * aligned portion of it that matches. Therefore we might need to fragment 1207d1935942SIlpo Järvinen * which may fail and creates some hassle (caller must handle error case 1208d1935942SIlpo Järvinen * returns). 1209d1935942SIlpo Järvinen */ 12100f79efdcSAdrian Bunk static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1211d1935942SIlpo Järvinen u32 start_seq, u32 end_seq) 1212d1935942SIlpo Järvinen { 1213d1935942SIlpo Järvinen int in_sack, err; 1214d1935942SIlpo Järvinen unsigned int pkt_len; 1215d1935942SIlpo Järvinen 1216d1935942SIlpo Järvinen in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1217d1935942SIlpo Järvinen !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1218d1935942SIlpo Järvinen 1219d1935942SIlpo Järvinen if (tcp_skb_pcount(skb) > 1 && !in_sack && 1220d1935942SIlpo Järvinen after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1221d1935942SIlpo Järvinen 1222d1935942SIlpo Järvinen in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1223d1935942SIlpo Järvinen 1224d1935942SIlpo Järvinen if (!in_sack) 1225d1935942SIlpo Järvinen pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1226d1935942SIlpo Järvinen else 1227d1935942SIlpo Järvinen pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1228d1935942SIlpo Järvinen err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size); 1229d1935942SIlpo Järvinen if (err < 0) 1230d1935942SIlpo Järvinen return err; 1231d1935942SIlpo Järvinen } 1232d1935942SIlpo Järvinen 1233d1935942SIlpo Järvinen return in_sack; 1234d1935942SIlpo Järvinen } 1235d1935942SIlpo Järvinen 12366859d494SIlpo Järvinen static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 12379e10c47cSIlpo Järvinen int *reord, int dup_sack, int fack_count) 12389e10c47cSIlpo Järvinen { 12396859d494SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 12409e10c47cSIlpo Järvinen u8 sacked = TCP_SKB_CB(skb)->sacked; 12419e10c47cSIlpo Järvinen int flag = 0; 12429e10c47cSIlpo Järvinen 12439e10c47cSIlpo Järvinen /* Account D-SACK for retransmitted packet. */ 12449e10c47cSIlpo Järvinen if (dup_sack && (sacked & TCPCB_RETRANS)) { 12459e10c47cSIlpo Järvinen if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 12469e10c47cSIlpo Järvinen tp->undo_retrans--; 1247ede9f3b1SIlpo Järvinen if (sacked & TCPCB_SACKED_ACKED) 12489e10c47cSIlpo Järvinen *reord = min(fack_count, *reord); 12499e10c47cSIlpo Järvinen } 12509e10c47cSIlpo Järvinen 12519e10c47cSIlpo Järvinen /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 12529e10c47cSIlpo Järvinen if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 12539e10c47cSIlpo Järvinen return flag; 12549e10c47cSIlpo Järvinen 12559e10c47cSIlpo Järvinen if (!(sacked & TCPCB_SACKED_ACKED)) { 12569e10c47cSIlpo Järvinen if (sacked & TCPCB_SACKED_RETRANS) { 12579e10c47cSIlpo Järvinen /* If the segment is not tagged as lost, 12589e10c47cSIlpo Järvinen * we do not clear RETRANS, believing 12599e10c47cSIlpo Järvinen * that retransmission is still in flight. 12609e10c47cSIlpo Järvinen */ 12619e10c47cSIlpo Järvinen if (sacked & TCPCB_LOST) { 12629e10c47cSIlpo Järvinen TCP_SKB_CB(skb)->sacked &= 12639e10c47cSIlpo Järvinen ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 12649e10c47cSIlpo Järvinen tp->lost_out -= tcp_skb_pcount(skb); 12659e10c47cSIlpo Järvinen tp->retrans_out -= tcp_skb_pcount(skb); 12669e10c47cSIlpo Järvinen 12679e10c47cSIlpo Järvinen /* clear lost hint */ 12689e10c47cSIlpo Järvinen tp->retransmit_skb_hint = NULL; 12699e10c47cSIlpo Järvinen } 12709e10c47cSIlpo Järvinen } else { 12719e10c47cSIlpo Järvinen if (!(sacked & TCPCB_RETRANS)) { 12729e10c47cSIlpo Järvinen /* New sack for not retransmitted frame, 12739e10c47cSIlpo Järvinen * which was in hole. It is reordering. 12749e10c47cSIlpo Järvinen */ 12759e10c47cSIlpo Järvinen if (before(TCP_SKB_CB(skb)->seq, 12769e10c47cSIlpo Järvinen tcp_highest_sack_seq(tp))) 12779e10c47cSIlpo Järvinen *reord = min(fack_count, *reord); 12789e10c47cSIlpo Järvinen 12799e10c47cSIlpo Järvinen /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 12809e10c47cSIlpo Järvinen if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 12819e10c47cSIlpo Järvinen flag |= FLAG_ONLY_ORIG_SACKED; 12829e10c47cSIlpo Järvinen } 12839e10c47cSIlpo Järvinen 12849e10c47cSIlpo Järvinen if (sacked & TCPCB_LOST) { 12859e10c47cSIlpo Järvinen TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 12869e10c47cSIlpo Järvinen tp->lost_out -= tcp_skb_pcount(skb); 12879e10c47cSIlpo Järvinen 12889e10c47cSIlpo Järvinen /* clear lost hint */ 12899e10c47cSIlpo Järvinen tp->retransmit_skb_hint = NULL; 12909e10c47cSIlpo Järvinen } 12919e10c47cSIlpo Järvinen } 12929e10c47cSIlpo Järvinen 12939e10c47cSIlpo Järvinen TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; 12949e10c47cSIlpo Järvinen flag |= FLAG_DATA_SACKED; 12959e10c47cSIlpo Järvinen tp->sacked_out += tcp_skb_pcount(skb); 12969e10c47cSIlpo Järvinen 12979e10c47cSIlpo Järvinen fack_count += tcp_skb_pcount(skb); 12989e10c47cSIlpo Järvinen 12999e10c47cSIlpo Järvinen /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 13009e10c47cSIlpo Järvinen if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 13019e10c47cSIlpo Järvinen before(TCP_SKB_CB(skb)->seq, 13029e10c47cSIlpo Järvinen TCP_SKB_CB(tp->lost_skb_hint)->seq)) 13039e10c47cSIlpo Järvinen tp->lost_cnt_hint += tcp_skb_pcount(skb); 13049e10c47cSIlpo Järvinen 13059e10c47cSIlpo Järvinen if (fack_count > tp->fackets_out) 13069e10c47cSIlpo Järvinen tp->fackets_out = fack_count; 13079e10c47cSIlpo Järvinen 13086859d494SIlpo Järvinen if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 13096859d494SIlpo Järvinen tcp_advance_highest_sack(sk, skb); 13109e10c47cSIlpo Järvinen } 13119e10c47cSIlpo Järvinen 13129e10c47cSIlpo Järvinen /* D-SACK. We can detect redundant retransmission in S|R and plain R 13139e10c47cSIlpo Järvinen * frames and clear it. undo_retrans is decreased above, L|R frames 13149e10c47cSIlpo Järvinen * are accounted above as well. 13159e10c47cSIlpo Järvinen */ 13169e10c47cSIlpo Järvinen if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { 13179e10c47cSIlpo Järvinen TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13189e10c47cSIlpo Järvinen tp->retrans_out -= tcp_skb_pcount(skb); 13199e10c47cSIlpo Järvinen tp->retransmit_skb_hint = NULL; 13209e10c47cSIlpo Järvinen } 13219e10c47cSIlpo Järvinen 13229e10c47cSIlpo Järvinen return flag; 13239e10c47cSIlpo Järvinen } 13249e10c47cSIlpo Järvinen 132568f8353bSIlpo Järvinen static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 132668f8353bSIlpo Järvinen struct tcp_sack_block *next_dup, 132768f8353bSIlpo Järvinen u32 start_seq, u32 end_seq, 132868f8353bSIlpo Järvinen int dup_sack_in, int *fack_count, 132968f8353bSIlpo Järvinen int *reord, int *flag) 133068f8353bSIlpo Järvinen { 133168f8353bSIlpo Järvinen tcp_for_write_queue_from(skb, sk) { 133268f8353bSIlpo Järvinen int in_sack = 0; 133368f8353bSIlpo Järvinen int dup_sack = dup_sack_in; 133468f8353bSIlpo Järvinen 133568f8353bSIlpo Järvinen if (skb == tcp_send_head(sk)) 133668f8353bSIlpo Järvinen break; 133768f8353bSIlpo Järvinen 133868f8353bSIlpo Järvinen /* queue is in-order => we can short-circuit the walk early */ 133968f8353bSIlpo Järvinen if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 134068f8353bSIlpo Järvinen break; 134168f8353bSIlpo Järvinen 134268f8353bSIlpo Järvinen if ((next_dup != NULL) && 134368f8353bSIlpo Järvinen before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 134468f8353bSIlpo Järvinen in_sack = tcp_match_skb_to_sack(sk, skb, 134568f8353bSIlpo Järvinen next_dup->start_seq, 134668f8353bSIlpo Järvinen next_dup->end_seq); 134768f8353bSIlpo Järvinen if (in_sack > 0) 134868f8353bSIlpo Järvinen dup_sack = 1; 134968f8353bSIlpo Järvinen } 135068f8353bSIlpo Järvinen 135168f8353bSIlpo Järvinen if (in_sack <= 0) 1352056834d9SIlpo Järvinen in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, 1353056834d9SIlpo Järvinen end_seq); 135468f8353bSIlpo Järvinen if (unlikely(in_sack < 0)) 135568f8353bSIlpo Järvinen break; 135668f8353bSIlpo Järvinen 135768f8353bSIlpo Järvinen if (in_sack) 1358056834d9SIlpo Järvinen *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, 1359056834d9SIlpo Järvinen *fack_count); 136068f8353bSIlpo Järvinen 136168f8353bSIlpo Järvinen *fack_count += tcp_skb_pcount(skb); 136268f8353bSIlpo Järvinen } 136368f8353bSIlpo Järvinen return skb; 136468f8353bSIlpo Järvinen } 136568f8353bSIlpo Järvinen 136668f8353bSIlpo Järvinen /* Avoid all extra work that is being done by sacktag while walking in 136768f8353bSIlpo Järvinen * a normal way 136868f8353bSIlpo Järvinen */ 136968f8353bSIlpo Järvinen static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1370*d152a7d8SIlpo Järvinen u32 skip_to_seq, int *fack_count) 137168f8353bSIlpo Järvinen { 137268f8353bSIlpo Järvinen tcp_for_write_queue_from(skb, sk) { 137368f8353bSIlpo Järvinen if (skb == tcp_send_head(sk)) 137468f8353bSIlpo Järvinen break; 137568f8353bSIlpo Järvinen 1376ea4f76aeSIlpo Järvinen if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 137768f8353bSIlpo Järvinen break; 1378*d152a7d8SIlpo Järvinen 1379*d152a7d8SIlpo Järvinen *fack_count += tcp_skb_pcount(skb); 138068f8353bSIlpo Järvinen } 138168f8353bSIlpo Järvinen return skb; 138268f8353bSIlpo Järvinen } 138368f8353bSIlpo Järvinen 138468f8353bSIlpo Järvinen static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 138568f8353bSIlpo Järvinen struct sock *sk, 138668f8353bSIlpo Järvinen struct tcp_sack_block *next_dup, 138768f8353bSIlpo Järvinen u32 skip_to_seq, 138868f8353bSIlpo Järvinen int *fack_count, int *reord, 138968f8353bSIlpo Järvinen int *flag) 139068f8353bSIlpo Järvinen { 139168f8353bSIlpo Järvinen if (next_dup == NULL) 139268f8353bSIlpo Järvinen return skb; 139368f8353bSIlpo Järvinen 139468f8353bSIlpo Järvinen if (before(next_dup->start_seq, skip_to_seq)) { 1395*d152a7d8SIlpo Järvinen skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 139668f8353bSIlpo Järvinen tcp_sacktag_walk(skb, sk, NULL, 139768f8353bSIlpo Järvinen next_dup->start_seq, next_dup->end_seq, 139868f8353bSIlpo Järvinen 1, fack_count, reord, flag); 139968f8353bSIlpo Järvinen } 140068f8353bSIlpo Järvinen 140168f8353bSIlpo Järvinen return skb; 140268f8353bSIlpo Järvinen } 140368f8353bSIlpo Järvinen 140468f8353bSIlpo Järvinen static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache) 140568f8353bSIlpo Järvinen { 140668f8353bSIlpo Järvinen return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 140768f8353bSIlpo Järvinen } 140868f8353bSIlpo Järvinen 14091da177e4SLinus Torvalds static int 1410056834d9SIlpo Järvinen tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, 1411056834d9SIlpo Järvinen u32 prior_snd_una) 14121da177e4SLinus Torvalds { 14136687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 14141da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 14159c70220bSArnaldo Carvalho de Melo unsigned char *ptr = (skb_transport_header(ack_skb) + 14169c70220bSArnaldo Carvalho de Melo TCP_SKB_CB(ack_skb)->sacked); 1417fd6dad61SIlpo Järvinen struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1418fd6dad61SIlpo Järvinen struct tcp_sack_block sp[4]; 141968f8353bSIlpo Järvinen struct tcp_sack_block *cache; 142068f8353bSIlpo Järvinen struct sk_buff *skb; 14211da177e4SLinus Torvalds int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE) >> 3; 1422fd6dad61SIlpo Järvinen int used_sacks; 14231da177e4SLinus Torvalds int reord = tp->packets_out; 14241da177e4SLinus Torvalds int flag = 0; 14257769f406SIlpo Järvinen int found_dup_sack = 0; 142668f8353bSIlpo Järvinen int fack_count; 142768f8353bSIlpo Järvinen int i, j; 1428fda03fbbSBaruch Even int first_sack_index; 14291da177e4SLinus Torvalds 1430d738cd8fSIlpo Järvinen if (!tp->sacked_out) { 1431de83c058SIlpo Järvinen if (WARN_ON(tp->fackets_out)) 14321da177e4SLinus Torvalds tp->fackets_out = 0; 14336859d494SIlpo Järvinen tcp_highest_sack_reset(sk); 1434d738cd8fSIlpo Järvinen } 14351da177e4SLinus Torvalds 1436fd6dad61SIlpo Järvinen found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, 1437d06e021dSDavid S. Miller num_sacks, prior_snd_una); 1438d06e021dSDavid S. Miller if (found_dup_sack) 143949ff4bb4SIlpo Järvinen flag |= FLAG_DSACKING_ACK; 14406f74651aSBaruch Even 14416f74651aSBaruch Even /* Eliminate too old ACKs, but take into 14426f74651aSBaruch Even * account more or less fresh ones, they can 14436f74651aSBaruch Even * contain valid SACK info. 14446f74651aSBaruch Even */ 14456f74651aSBaruch Even if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 14466f74651aSBaruch Even return 0; 14476f74651aSBaruch Even 144896a2d41aSIlpo Järvinen if (!tp->packets_out) 144996a2d41aSIlpo Järvinen goto out; 145096a2d41aSIlpo Järvinen 1451fd6dad61SIlpo Järvinen used_sacks = 0; 1452fd6dad61SIlpo Järvinen first_sack_index = 0; 1453fd6dad61SIlpo Järvinen for (i = 0; i < num_sacks; i++) { 1454fd6dad61SIlpo Järvinen int dup_sack = !i && found_dup_sack; 1455fd6dad61SIlpo Järvinen 1456fd6dad61SIlpo Järvinen sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq)); 1457fd6dad61SIlpo Järvinen sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq)); 1458fd6dad61SIlpo Järvinen 1459fd6dad61SIlpo Järvinen if (!tcp_is_sackblock_valid(tp, dup_sack, 1460fd6dad61SIlpo Järvinen sp[used_sacks].start_seq, 1461fd6dad61SIlpo Järvinen sp[used_sacks].end_seq)) { 1462fd6dad61SIlpo Järvinen if (dup_sack) { 1463fd6dad61SIlpo Järvinen if (!tp->undo_marker) 1464fd6dad61SIlpo Järvinen NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); 1465fd6dad61SIlpo Järvinen else 1466fd6dad61SIlpo Järvinen NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); 1467fd6dad61SIlpo Järvinen } else { 1468fd6dad61SIlpo Järvinen /* Don't count olds caused by ACK reordering */ 1469fd6dad61SIlpo Järvinen if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1470fd6dad61SIlpo Järvinen !after(sp[used_sacks].end_seq, tp->snd_una)) 1471fd6dad61SIlpo Järvinen continue; 1472fd6dad61SIlpo Järvinen NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); 1473fd6dad61SIlpo Järvinen } 1474fd6dad61SIlpo Järvinen if (i == 0) 1475fd6dad61SIlpo Järvinen first_sack_index = -1; 1476fd6dad61SIlpo Järvinen continue; 1477fd6dad61SIlpo Järvinen } 1478fd6dad61SIlpo Järvinen 1479fd6dad61SIlpo Järvinen /* Ignore very old stuff early */ 1480fd6dad61SIlpo Järvinen if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1481fd6dad61SIlpo Järvinen continue; 1482fd6dad61SIlpo Järvinen 1483fd6dad61SIlpo Järvinen used_sacks++; 1484fd6dad61SIlpo Järvinen } 1485fd6dad61SIlpo Järvinen 14866a438bbeSStephen Hemminger /* order SACK blocks to allow in order walk of the retrans queue */ 1487fd6dad61SIlpo Järvinen for (i = used_sacks - 1; i > 0; i--) { 14886a438bbeSStephen Hemminger for (j = 0; j < i; j++) { 1489fd6dad61SIlpo Järvinen if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1490fd6dad61SIlpo Järvinen struct tcp_sack_block tmp; 1491db3ccdacSBaruch Even 1492db3ccdacSBaruch Even tmp = sp[j]; 1493db3ccdacSBaruch Even sp[j] = sp[j + 1]; 1494db3ccdacSBaruch Even sp[j + 1] = tmp; 1495fda03fbbSBaruch Even 1496fda03fbbSBaruch Even /* Track where the first SACK block goes to */ 1497fda03fbbSBaruch Even if (j == first_sack_index) 1498fda03fbbSBaruch Even first_sack_index = j + 1; 14996a438bbeSStephen Hemminger } 15006a438bbeSStephen Hemminger } 15016a438bbeSStephen Hemminger } 15026a438bbeSStephen Hemminger 150368f8353bSIlpo Järvinen skb = tcp_write_queue_head(sk); 150468f8353bSIlpo Järvinen fack_count = 0; 150568f8353bSIlpo Järvinen i = 0; 150668f8353bSIlpo Järvinen 150768f8353bSIlpo Järvinen if (!tp->sacked_out) { 150868f8353bSIlpo Järvinen /* It's already past, so skip checking against it */ 150968f8353bSIlpo Järvinen cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 151068f8353bSIlpo Järvinen } else { 151168f8353bSIlpo Järvinen cache = tp->recv_sack_cache; 151268f8353bSIlpo Järvinen /* Skip empty blocks in at head of the cache */ 151368f8353bSIlpo Järvinen while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 151468f8353bSIlpo Järvinen !cache->end_seq) 151568f8353bSIlpo Järvinen cache++; 1516fda03fbbSBaruch Even } 1517fda03fbbSBaruch Even 151868f8353bSIlpo Järvinen while (i < used_sacks) { 1519fd6dad61SIlpo Järvinen u32 start_seq = sp[i].start_seq; 1520fd6dad61SIlpo Järvinen u32 end_seq = sp[i].end_seq; 15217769f406SIlpo Järvinen int dup_sack = (found_dup_sack && (i == first_sack_index)); 152268f8353bSIlpo Järvinen struct tcp_sack_block *next_dup = NULL; 1523e56d6cd6SIlpo Järvinen 152468f8353bSIlpo Järvinen if (found_dup_sack && ((i + 1) == first_sack_index)) 152568f8353bSIlpo Järvinen next_dup = &sp[i + 1]; 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds /* Event "B" in the comment above. */ 15281da177e4SLinus Torvalds if (after(end_seq, tp->high_seq)) 15291da177e4SLinus Torvalds flag |= FLAG_DATA_LOST; 15301da177e4SLinus Torvalds 153168f8353bSIlpo Järvinen /* Skip too early cached blocks */ 153268f8353bSIlpo Järvinen while (tcp_sack_cache_ok(tp, cache) && 153368f8353bSIlpo Järvinen !before(start_seq, cache->end_seq)) 153468f8353bSIlpo Järvinen cache++; 15351da177e4SLinus Torvalds 153668f8353bSIlpo Järvinen /* Can skip some work by looking recv_sack_cache? */ 153768f8353bSIlpo Järvinen if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 153868f8353bSIlpo Järvinen after(end_seq, cache->start_seq)) { 1539fe067e8aSDavid S. Miller 154068f8353bSIlpo Järvinen /* Head todo? */ 154168f8353bSIlpo Järvinen if (before(start_seq, cache->start_seq)) { 1542*d152a7d8SIlpo Järvinen skb = tcp_sacktag_skip(skb, sk, start_seq, 1543*d152a7d8SIlpo Järvinen &fack_count); 1544056834d9SIlpo Järvinen skb = tcp_sacktag_walk(skb, sk, next_dup, 1545056834d9SIlpo Järvinen start_seq, 1546056834d9SIlpo Järvinen cache->start_seq, 1547056834d9SIlpo Järvinen dup_sack, &fack_count, 1548056834d9SIlpo Järvinen &reord, &flag); 1549fda03fbbSBaruch Even } 15506a438bbeSStephen Hemminger 155168f8353bSIlpo Järvinen /* Rest of the block already fully processed? */ 155220de20beSIlpo Järvinen if (!after(end_seq, cache->end_seq)) 155320de20beSIlpo Järvinen goto advance_sp; 155420de20beSIlpo Järvinen 1555056834d9SIlpo Järvinen skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1556056834d9SIlpo Järvinen cache->end_seq, 1557056834d9SIlpo Järvinen &fack_count, &reord, 1558056834d9SIlpo Järvinen &flag); 155968f8353bSIlpo Järvinen 156068f8353bSIlpo Järvinen /* ...tail remains todo... */ 15616859d494SIlpo Järvinen if (tcp_highest_sack_seq(tp) == cache->end_seq) { 156220de20beSIlpo Järvinen /* ...but better entrypoint exists! */ 15636859d494SIlpo Järvinen skb = tcp_highest_sack(sk); 15646859d494SIlpo Järvinen if (skb == NULL) 15656859d494SIlpo Järvinen break; 156668f8353bSIlpo Järvinen fack_count = tp->fackets_out; 156768f8353bSIlpo Järvinen cache++; 156868f8353bSIlpo Järvinen goto walk; 1569e56d6cd6SIlpo Järvinen } 1570e56d6cd6SIlpo Järvinen 1571*d152a7d8SIlpo Järvinen skb = tcp_sacktag_skip(skb, sk, cache->end_seq, 1572*d152a7d8SIlpo Järvinen &fack_count); 157368f8353bSIlpo Järvinen /* Check overlap against next cached too (past this one already) */ 157468f8353bSIlpo Järvinen cache++; 157568f8353bSIlpo Järvinen continue; 15761da177e4SLinus Torvalds } 1577fbd52eb2SIlpo Järvinen 15786859d494SIlpo Järvinen if (!before(start_seq, tcp_highest_sack_seq(tp))) { 15796859d494SIlpo Järvinen skb = tcp_highest_sack(sk); 15806859d494SIlpo Järvinen if (skb == NULL) 15816859d494SIlpo Järvinen break; 158268f8353bSIlpo Järvinen fack_count = tp->fackets_out; 158368f8353bSIlpo Järvinen } 1584*d152a7d8SIlpo Järvinen skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count); 158568f8353bSIlpo Järvinen 158668f8353bSIlpo Järvinen walk: 158768f8353bSIlpo Järvinen skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq, 158868f8353bSIlpo Järvinen dup_sack, &fack_count, &reord, &flag); 158968f8353bSIlpo Järvinen 159068f8353bSIlpo Järvinen advance_sp: 1591fbd52eb2SIlpo Järvinen /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct 1592fbd52eb2SIlpo Järvinen * due to in-order walk 1593fbd52eb2SIlpo Järvinen */ 1594fbd52eb2SIlpo Järvinen if (after(end_seq, tp->frto_highmark)) 1595fbd52eb2SIlpo Järvinen flag &= ~FLAG_ONLY_ORIG_SACKED; 159668f8353bSIlpo Järvinen 159768f8353bSIlpo Järvinen i++; 15981da177e4SLinus Torvalds } 15991da177e4SLinus Torvalds 160068f8353bSIlpo Järvinen /* Clear the head of the cache sack blocks so we can skip it next time */ 160168f8353bSIlpo Järvinen for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 160268f8353bSIlpo Järvinen tp->recv_sack_cache[i].start_seq = 0; 160368f8353bSIlpo Järvinen tp->recv_sack_cache[i].end_seq = 0; 160468f8353bSIlpo Järvinen } 160568f8353bSIlpo Järvinen for (j = 0; j < used_sacks; j++) 160668f8353bSIlpo Järvinen tp->recv_sack_cache[i++] = sp[j]; 160768f8353bSIlpo Järvinen 1608407ef1deSIlpo Järvinen tcp_mark_lost_retrans(sk); 16091da177e4SLinus Torvalds 161086426c22SIlpo Järvinen tcp_verify_left_out(tp); 161186426c22SIlpo Järvinen 1612f5771113SIlpo Järvinen if ((reord < tp->fackets_out) && 1613f5771113SIlpo Järvinen ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && 1614c5e7af0dSIlpo Järvinen (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 16158dd71c5dSIlpo Järvinen tcp_update_reordering(sk, tp->fackets_out - reord, 0); 16161da177e4SLinus Torvalds 161796a2d41aSIlpo Järvinen out: 161896a2d41aSIlpo Järvinen 16191da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 16201da177e4SLinus Torvalds BUG_TRAP((int)tp->sacked_out >= 0); 16211da177e4SLinus Torvalds BUG_TRAP((int)tp->lost_out >= 0); 16221da177e4SLinus Torvalds BUG_TRAP((int)tp->retrans_out >= 0); 16231da177e4SLinus Torvalds BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0); 16241da177e4SLinus Torvalds #endif 16251da177e4SLinus Torvalds return flag; 16261da177e4SLinus Torvalds } 16271da177e4SLinus Torvalds 162895eacd27SIlpo Järvinen /* If we receive more dupacks than we expected counting segments 162995eacd27SIlpo Järvinen * in assumption of absent reordering, interpret this as reordering. 163095eacd27SIlpo Järvinen * The only another reason could be bug in receiver TCP. 163130935cf4SIlpo Järvinen */ 16324ddf6676SIlpo Järvinen static void tcp_check_reno_reordering(struct sock *sk, const int addend) 16334ddf6676SIlpo Järvinen { 16344ddf6676SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 16354ddf6676SIlpo Järvinen u32 holes; 16364ddf6676SIlpo Järvinen 16374ddf6676SIlpo Järvinen holes = max(tp->lost_out, 1U); 16384ddf6676SIlpo Järvinen holes = min(holes, tp->packets_out); 16394ddf6676SIlpo Järvinen 16404ddf6676SIlpo Järvinen if ((tp->sacked_out + holes) > tp->packets_out) { 16414ddf6676SIlpo Järvinen tp->sacked_out = tp->packets_out - holes; 16424ddf6676SIlpo Järvinen tcp_update_reordering(sk, tp->packets_out + addend, 0); 16434ddf6676SIlpo Järvinen } 16444ddf6676SIlpo Järvinen } 16454ddf6676SIlpo Järvinen 16464ddf6676SIlpo Järvinen /* Emulate SACKs for SACKless connection: account for a new dupack. */ 16474ddf6676SIlpo Järvinen 16484ddf6676SIlpo Järvinen static void tcp_add_reno_sack(struct sock *sk) 16494ddf6676SIlpo Järvinen { 16504ddf6676SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 16514ddf6676SIlpo Järvinen tp->sacked_out++; 16524ddf6676SIlpo Järvinen tcp_check_reno_reordering(sk, 0); 1653005903bcSIlpo Järvinen tcp_verify_left_out(tp); 16544ddf6676SIlpo Järvinen } 16554ddf6676SIlpo Järvinen 16564ddf6676SIlpo Järvinen /* Account for ACK, ACKing some data in Reno Recovery phase. */ 16574ddf6676SIlpo Järvinen 16584ddf6676SIlpo Järvinen static void tcp_remove_reno_sacks(struct sock *sk, int acked) 16594ddf6676SIlpo Järvinen { 16604ddf6676SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 16614ddf6676SIlpo Järvinen 16624ddf6676SIlpo Järvinen if (acked > 0) { 16634ddf6676SIlpo Järvinen /* One ACK acked hole. The rest eat duplicate ACKs. */ 16644ddf6676SIlpo Järvinen if (acked - 1 >= tp->sacked_out) 16654ddf6676SIlpo Järvinen tp->sacked_out = 0; 16664ddf6676SIlpo Järvinen else 16674ddf6676SIlpo Järvinen tp->sacked_out -= acked - 1; 16684ddf6676SIlpo Järvinen } 16694ddf6676SIlpo Järvinen tcp_check_reno_reordering(sk, acked); 1670005903bcSIlpo Järvinen tcp_verify_left_out(tp); 16714ddf6676SIlpo Järvinen } 16724ddf6676SIlpo Järvinen 16734ddf6676SIlpo Järvinen static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 16744ddf6676SIlpo Järvinen { 16754ddf6676SIlpo Järvinen tp->sacked_out = 0; 16764ddf6676SIlpo Järvinen } 16774ddf6676SIlpo Järvinen 167895eacd27SIlpo Järvinen /* F-RTO can only be used if TCP has never retransmitted anything other than 167995eacd27SIlpo Järvinen * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 168095eacd27SIlpo Järvinen */ 168146d0de4eSIlpo Järvinen int tcp_use_frto(struct sock *sk) 1682bdaae17dSIlpo Järvinen { 1683bdaae17dSIlpo Järvinen const struct tcp_sock *tp = tcp_sk(sk); 168446d0de4eSIlpo Järvinen struct sk_buff *skb; 1685bdaae17dSIlpo Järvinen 1686575ee714SIlpo Järvinen if (!sysctl_tcp_frto) 168746d0de4eSIlpo Järvinen return 0; 168846d0de4eSIlpo Järvinen 16894dc2665eSIlpo Järvinen if (IsSackFrto()) 16904dc2665eSIlpo Järvinen return 1; 16914dc2665eSIlpo Järvinen 169246d0de4eSIlpo Järvinen /* Avoid expensive walking of rexmit queue if possible */ 169346d0de4eSIlpo Järvinen if (tp->retrans_out > 1) 169446d0de4eSIlpo Järvinen return 0; 169546d0de4eSIlpo Järvinen 1696fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 1697fe067e8aSDavid S. Miller skb = tcp_write_queue_next(sk, skb); /* Skips head */ 1698fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 1699fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 1700fe067e8aSDavid S. Miller break; 170146d0de4eSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 170246d0de4eSIlpo Järvinen return 0; 170346d0de4eSIlpo Järvinen /* Short-circuit when first non-SACKed skb has been checked */ 170446d0de4eSIlpo Järvinen if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 170546d0de4eSIlpo Järvinen break; 170646d0de4eSIlpo Järvinen } 170746d0de4eSIlpo Järvinen return 1; 1708bdaae17dSIlpo Järvinen } 1709bdaae17dSIlpo Järvinen 171030935cf4SIlpo Järvinen /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 171130935cf4SIlpo Järvinen * recovery a bit and use heuristics in tcp_process_frto() to detect if 1712d1a54c6aSIlpo Järvinen * the RTO was spurious. Only clear SACKED_RETRANS of the head here to 1713d1a54c6aSIlpo Järvinen * keep retrans_out counting accurate (with SACK F-RTO, other than head 1714d1a54c6aSIlpo Järvinen * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS 1715d1a54c6aSIlpo Järvinen * bits are handled if the Loss state is really to be entered (in 1716d1a54c6aSIlpo Järvinen * tcp_enter_frto_loss). 17177487c48cSIlpo Järvinen * 17187487c48cSIlpo Järvinen * Do like tcp_enter_loss() would; when RTO expires the second time it 17197487c48cSIlpo Järvinen * does: 17207487c48cSIlpo Järvinen * "Reduce ssthresh if it has not yet been made inside this window." 17211da177e4SLinus Torvalds */ 17221da177e4SLinus Torvalds void tcp_enter_frto(struct sock *sk) 17231da177e4SLinus Torvalds { 17246687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 17251da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 17261da177e4SLinus Torvalds struct sk_buff *skb; 17271da177e4SLinus Torvalds 17287487c48cSIlpo Järvinen if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || 17291da177e4SLinus Torvalds tp->snd_una == tp->high_seq || 17307487c48cSIlpo Järvinen ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && 17317487c48cSIlpo Järvinen !icsk->icsk_retransmits)) { 17326687e988SArnaldo Carvalho de Melo tp->prior_ssthresh = tcp_current_ssthresh(sk); 173366e93e45SIlpo Järvinen /* Our state is too optimistic in ssthresh() call because cwnd 1734564262c1SRyousei Takano * is not reduced until tcp_enter_frto_loss() when previous F-RTO 173566e93e45SIlpo Järvinen * recovery has not yet completed. Pattern would be this: RTO, 173666e93e45SIlpo Järvinen * Cumulative ACK, RTO (2xRTO for the same segment does not end 173766e93e45SIlpo Järvinen * up here twice). 173866e93e45SIlpo Järvinen * RFC4138 should be more specific on what to do, even though 173966e93e45SIlpo Järvinen * RTO is quite unlikely to occur after the first Cumulative ACK 174066e93e45SIlpo Järvinen * due to back-off and complexity of triggering events ... 174166e93e45SIlpo Järvinen */ 174266e93e45SIlpo Järvinen if (tp->frto_counter) { 174366e93e45SIlpo Järvinen u32 stored_cwnd; 174466e93e45SIlpo Järvinen stored_cwnd = tp->snd_cwnd; 174566e93e45SIlpo Järvinen tp->snd_cwnd = 2; 17466687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 174766e93e45SIlpo Järvinen tp->snd_cwnd = stored_cwnd; 174866e93e45SIlpo Järvinen } else { 174966e93e45SIlpo Järvinen tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 175066e93e45SIlpo Järvinen } 175166e93e45SIlpo Järvinen /* ... in theory, cong.control module could do "any tricks" in 175266e93e45SIlpo Järvinen * ssthresh(), which means that ca_state, lost bits and lost_out 175366e93e45SIlpo Järvinen * counter would have to be faked before the call occurs. We 175466e93e45SIlpo Järvinen * consider that too expensive, unlikely and hacky, so modules 175566e93e45SIlpo Järvinen * using these in ssthresh() must deal these incompatibility 175666e93e45SIlpo Järvinen * issues if they receives CA_EVENT_FRTO and frto_counter != 0 175766e93e45SIlpo Järvinen */ 17586687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_FRTO); 17591da177e4SLinus Torvalds } 17601da177e4SLinus Torvalds 17611da177e4SLinus Torvalds tp->undo_marker = tp->snd_una; 17621da177e4SLinus Torvalds tp->undo_retrans = 0; 17631da177e4SLinus Torvalds 1764fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 1765009a2e3eSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1766009a2e3eSIlpo Järvinen tp->undo_marker = 0; 1767d1a54c6aSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 1768522e7548SIlpo Järvinen TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1769d1a54c6aSIlpo Järvinen tp->retrans_out -= tcp_skb_pcount(skb); 17701da177e4SLinus Torvalds } 1771005903bcSIlpo Järvinen tcp_verify_left_out(tp); 17721da177e4SLinus Torvalds 1773746aa32dSIlpo Järvinen /* Too bad if TCP was application limited */ 1774746aa32dSIlpo Järvinen tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 1775746aa32dSIlpo Järvinen 17764dc2665eSIlpo Järvinen /* Earlier loss recovery underway (see RFC4138; Appendix B). 17774dc2665eSIlpo Järvinen * The last condition is necessary at least in tp->frto_counter case. 17784dc2665eSIlpo Järvinen */ 17794dc2665eSIlpo Järvinen if (IsSackFrto() && (tp->frto_counter || 17804dc2665eSIlpo Järvinen ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && 17814dc2665eSIlpo Järvinen after(tp->high_seq, tp->snd_una)) { 17824dc2665eSIlpo Järvinen tp->frto_highmark = tp->high_seq; 17834dc2665eSIlpo Järvinen } else { 17844dc2665eSIlpo Järvinen tp->frto_highmark = tp->snd_nxt; 17854dc2665eSIlpo Järvinen } 17867b0eb22bSIlpo Järvinen tcp_set_ca_state(sk, TCP_CA_Disorder); 17877b0eb22bSIlpo Järvinen tp->high_seq = tp->snd_nxt; 17887487c48cSIlpo Järvinen tp->frto_counter = 1; 17891da177e4SLinus Torvalds } 17901da177e4SLinus Torvalds 17911da177e4SLinus Torvalds /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, 17921da177e4SLinus Torvalds * which indicates that we should follow the traditional RTO recovery, 17931da177e4SLinus Torvalds * i.e. mark everything lost and do go-back-N retransmission. 17941da177e4SLinus Torvalds */ 1795d1a54c6aSIlpo Järvinen static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) 17961da177e4SLinus Torvalds { 17971da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 17981da177e4SLinus Torvalds struct sk_buff *skb; 17991da177e4SLinus Torvalds 18001da177e4SLinus Torvalds tp->lost_out = 0; 1801d1a54c6aSIlpo Järvinen tp->retrans_out = 0; 1802e60402d0SIlpo Järvinen if (tcp_is_reno(tp)) 18039bff40fdSIlpo Järvinen tcp_reset_reno_sack(tp); 18041da177e4SLinus Torvalds 1805fe067e8aSDavid S. Miller tcp_for_write_queue(skb, sk) { 1806fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 1807fe067e8aSDavid S. Miller break; 180823aeeec3SIlpo Järvinen 180923aeeec3SIlpo Järvinen TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1810d1a54c6aSIlpo Järvinen /* 1811d1a54c6aSIlpo Järvinen * Count the retransmission made on RTO correctly (only when 1812d1a54c6aSIlpo Järvinen * waiting for the first ACK and did not get it)... 1813d1a54c6aSIlpo Järvinen */ 1814d1a54c6aSIlpo Järvinen if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { 18150a9f2a46SIlpo Järvinen /* For some reason this R-bit might get cleared? */ 18160a9f2a46SIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1817d1a54c6aSIlpo Järvinen tp->retrans_out += tcp_skb_pcount(skb); 1818d1a54c6aSIlpo Järvinen /* ...enter this if branch just for the first segment */ 1819d1a54c6aSIlpo Järvinen flag |= FLAG_DATA_ACKED; 1820d1a54c6aSIlpo Järvinen } else { 1821009a2e3eSIlpo Järvinen if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1822009a2e3eSIlpo Järvinen tp->undo_marker = 0; 182323aeeec3SIlpo Järvinen TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1824d1a54c6aSIlpo Järvinen } 18251da177e4SLinus Torvalds 18269bff40fdSIlpo Järvinen /* Don't lost mark skbs that were fwd transmitted after RTO */ 18279bff40fdSIlpo Järvinen if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && 18289bff40fdSIlpo Järvinen !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { 18291da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 18301da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 18311da177e4SLinus Torvalds } 18321da177e4SLinus Torvalds } 1833005903bcSIlpo Järvinen tcp_verify_left_out(tp); 18341da177e4SLinus Torvalds 183595c4922bSIlpo Järvinen tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; 18361da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 18371da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 18381da177e4SLinus Torvalds tp->frto_counter = 0; 183916e90681SIlpo Järvinen tp->bytes_acked = 0; 18401da177e4SLinus Torvalds 18411da177e4SLinus Torvalds tp->reordering = min_t(unsigned int, tp->reordering, 18421da177e4SLinus Torvalds sysctl_tcp_reordering); 18436687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 18441da177e4SLinus Torvalds tp->high_seq = tp->frto_highmark; 18451da177e4SLinus Torvalds TCP_ECN_queue_cwr(tp); 18466a438bbeSStephen Hemminger 1847b7689205SIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 18481da177e4SLinus Torvalds } 18491da177e4SLinus Torvalds 18504cd82999SIlpo Järvinen static void tcp_clear_retrans_partial(struct tcp_sock *tp) 18511da177e4SLinus Torvalds { 18521da177e4SLinus Torvalds tp->retrans_out = 0; 18531da177e4SLinus Torvalds tp->lost_out = 0; 18541da177e4SLinus Torvalds 18551da177e4SLinus Torvalds tp->undo_marker = 0; 18561da177e4SLinus Torvalds tp->undo_retrans = 0; 18571da177e4SLinus Torvalds } 18581da177e4SLinus Torvalds 18594cd82999SIlpo Järvinen void tcp_clear_retrans(struct tcp_sock *tp) 18604cd82999SIlpo Järvinen { 18614cd82999SIlpo Järvinen tcp_clear_retrans_partial(tp); 18624cd82999SIlpo Järvinen 18634cd82999SIlpo Järvinen tp->fackets_out = 0; 18644cd82999SIlpo Järvinen tp->sacked_out = 0; 18654cd82999SIlpo Järvinen } 18664cd82999SIlpo Järvinen 18671da177e4SLinus Torvalds /* Enter Loss state. If "how" is not zero, forget all SACK information 18681da177e4SLinus Torvalds * and reset tags completely, otherwise preserve SACKs. If receiver 18691da177e4SLinus Torvalds * dropped its ofo queue, we will know this due to reneging detection. 18701da177e4SLinus Torvalds */ 18711da177e4SLinus Torvalds void tcp_enter_loss(struct sock *sk, int how) 18721da177e4SLinus Torvalds { 18736687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 18741da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 18751da177e4SLinus Torvalds struct sk_buff *skb; 18761da177e4SLinus Torvalds 18771da177e4SLinus Torvalds /* Reduce ssthresh if it has not yet been made inside this window. */ 18786687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 18796687e988SArnaldo Carvalho de Melo (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 18806687e988SArnaldo Carvalho de Melo tp->prior_ssthresh = tcp_current_ssthresh(sk); 18816687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 18826687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_LOSS); 18831da177e4SLinus Torvalds } 18841da177e4SLinus Torvalds tp->snd_cwnd = 1; 18851da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 18861da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 18871da177e4SLinus Torvalds 18889772efb9SStephen Hemminger tp->bytes_acked = 0; 18894cd82999SIlpo Järvinen tcp_clear_retrans_partial(tp); 18904cd82999SIlpo Järvinen 18914cd82999SIlpo Järvinen if (tcp_is_reno(tp)) 18924cd82999SIlpo Järvinen tcp_reset_reno_sack(tp); 18931da177e4SLinus Torvalds 1894b7689205SIlpo Järvinen if (!how) { 18951da177e4SLinus Torvalds /* Push undo marker, if it was plain RTO and nothing 18961da177e4SLinus Torvalds * was retransmitted. */ 18971da177e4SLinus Torvalds tp->undo_marker = tp->snd_una; 1898b7689205SIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 1899b7689205SIlpo Järvinen } else { 19004cd82999SIlpo Järvinen tp->sacked_out = 0; 19014cd82999SIlpo Järvinen tp->fackets_out = 0; 1902b7689205SIlpo Järvinen tcp_clear_all_retrans_hints(tp); 1903b7689205SIlpo Järvinen } 19041da177e4SLinus Torvalds 1905fe067e8aSDavid S. Miller tcp_for_write_queue(skb, sk) { 1906fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 1907fe067e8aSDavid S. Miller break; 19084cd82999SIlpo Järvinen 19091da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 19101da177e4SLinus Torvalds tp->undo_marker = 0; 19111da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 19121da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 19131da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 19141da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 19151da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 19161da177e4SLinus Torvalds } 19171da177e4SLinus Torvalds } 1918005903bcSIlpo Järvinen tcp_verify_left_out(tp); 19191da177e4SLinus Torvalds 19201da177e4SLinus Torvalds tp->reordering = min_t(unsigned int, tp->reordering, 19211da177e4SLinus Torvalds sysctl_tcp_reordering); 19226687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 19231da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 19241da177e4SLinus Torvalds TCP_ECN_queue_cwr(tp); 1925564262c1SRyousei Takano /* Abort F-RTO algorithm if one is in progress */ 1926580e572aSIlpo Järvinen tp->frto_counter = 0; 19271da177e4SLinus Torvalds } 19281da177e4SLinus Torvalds 1929cadbd031SIlpo Järvinen /* If ACK arrived pointing to a remembered SACK, it means that our 1930cadbd031SIlpo Järvinen * remembered SACKs do not reflect real state of receiver i.e. 19311da177e4SLinus Torvalds * receiver _host_ is heavily congested (or buggy). 1932cadbd031SIlpo Järvinen * 19331da177e4SLinus Torvalds * Do processing similar to RTO timeout. 19341da177e4SLinus Torvalds */ 1935cadbd031SIlpo Järvinen static int tcp_check_sack_reneging(struct sock *sk, int flag) 1936cadbd031SIlpo Järvinen { 1937cadbd031SIlpo Järvinen if (flag & FLAG_SACK_RENEGING) { 19386687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 19391da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 19401da177e4SLinus Torvalds 19411da177e4SLinus Torvalds tcp_enter_loss(sk, 1); 19426687e988SArnaldo Carvalho de Melo icsk->icsk_retransmits++; 1943fe067e8aSDavid S. Miller tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 1944463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 19456687e988SArnaldo Carvalho de Melo icsk->icsk_rto, TCP_RTO_MAX); 19461da177e4SLinus Torvalds return 1; 19471da177e4SLinus Torvalds } 19481da177e4SLinus Torvalds return 0; 19491da177e4SLinus Torvalds } 19501da177e4SLinus Torvalds 19511da177e4SLinus Torvalds static inline int tcp_fackets_out(struct tcp_sock *tp) 19521da177e4SLinus Torvalds { 1953e60402d0SIlpo Järvinen return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 19541da177e4SLinus Torvalds } 19551da177e4SLinus Torvalds 195685cc391cSIlpo Järvinen /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 195785cc391cSIlpo Järvinen * counter when SACK is enabled (without SACK, sacked_out is used for 195885cc391cSIlpo Järvinen * that purpose). 195985cc391cSIlpo Järvinen * 196085cc391cSIlpo Järvinen * Instead, with FACK TCP uses fackets_out that includes both SACKed 196185cc391cSIlpo Järvinen * segments up to the highest received SACK block so far and holes in 196285cc391cSIlpo Järvinen * between them. 196385cc391cSIlpo Järvinen * 196485cc391cSIlpo Järvinen * With reordering, holes may still be in flight, so RFC3517 recovery 196585cc391cSIlpo Järvinen * uses pure sacked_out (total number of SACKed segments) even though 196685cc391cSIlpo Järvinen * it violates the RFC that uses duplicate ACKs, often these are equal 196785cc391cSIlpo Järvinen * but when e.g. out-of-window ACKs or packet duplication occurs, 196885cc391cSIlpo Järvinen * they differ. Since neither occurs due to loss, TCP should really 196985cc391cSIlpo Järvinen * ignore them. 197085cc391cSIlpo Järvinen */ 197185cc391cSIlpo Järvinen static inline int tcp_dupack_heurestics(struct tcp_sock *tp) 197285cc391cSIlpo Järvinen { 197385cc391cSIlpo Järvinen return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 197485cc391cSIlpo Järvinen } 197585cc391cSIlpo Järvinen 1976463c84b9SArnaldo Carvalho de Melo static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 19771da177e4SLinus Torvalds { 1978463c84b9SArnaldo Carvalho de Melo return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); 19791da177e4SLinus Torvalds } 19801da177e4SLinus Torvalds 19819e412ba7SIlpo Järvinen static inline int tcp_head_timedout(struct sock *sk) 19821da177e4SLinus Torvalds { 19839e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 19849e412ba7SIlpo Järvinen 19851da177e4SLinus Torvalds return tp->packets_out && 1986fe067e8aSDavid S. Miller tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 19871da177e4SLinus Torvalds } 19881da177e4SLinus Torvalds 19891da177e4SLinus Torvalds /* Linux NewReno/SACK/FACK/ECN state machine. 19901da177e4SLinus Torvalds * -------------------------------------- 19911da177e4SLinus Torvalds * 19921da177e4SLinus Torvalds * "Open" Normal state, no dubious events, fast path. 19931da177e4SLinus Torvalds * "Disorder" In all the respects it is "Open", 19941da177e4SLinus Torvalds * but requires a bit more attention. It is entered when 19951da177e4SLinus Torvalds * we see some SACKs or dupacks. It is split of "Open" 19961da177e4SLinus Torvalds * mainly to move some processing from fast path to slow one. 19971da177e4SLinus Torvalds * "CWR" CWND was reduced due to some Congestion Notification event. 19981da177e4SLinus Torvalds * It can be ECN, ICMP source quench, local device congestion. 19991da177e4SLinus Torvalds * "Recovery" CWND was reduced, we are fast-retransmitting. 20001da177e4SLinus Torvalds * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 20011da177e4SLinus Torvalds * 20021da177e4SLinus Torvalds * tcp_fastretrans_alert() is entered: 20031da177e4SLinus Torvalds * - each incoming ACK, if state is not "Open" 20041da177e4SLinus Torvalds * - when arrived ACK is unusual, namely: 20051da177e4SLinus Torvalds * * SACK 20061da177e4SLinus Torvalds * * Duplicate ACK. 20071da177e4SLinus Torvalds * * ECN ECE. 20081da177e4SLinus Torvalds * 20091da177e4SLinus Torvalds * Counting packets in flight is pretty simple. 20101da177e4SLinus Torvalds * 20111da177e4SLinus Torvalds * in_flight = packets_out - left_out + retrans_out 20121da177e4SLinus Torvalds * 20131da177e4SLinus Torvalds * packets_out is SND.NXT-SND.UNA counted in packets. 20141da177e4SLinus Torvalds * 20151da177e4SLinus Torvalds * retrans_out is number of retransmitted segments. 20161da177e4SLinus Torvalds * 20171da177e4SLinus Torvalds * left_out is number of segments left network, but not ACKed yet. 20181da177e4SLinus Torvalds * 20191da177e4SLinus Torvalds * left_out = sacked_out + lost_out 20201da177e4SLinus Torvalds * 20211da177e4SLinus Torvalds * sacked_out: Packets, which arrived to receiver out of order 20221da177e4SLinus Torvalds * and hence not ACKed. With SACKs this number is simply 20231da177e4SLinus Torvalds * amount of SACKed data. Even without SACKs 20241da177e4SLinus Torvalds * it is easy to give pretty reliable estimate of this number, 20251da177e4SLinus Torvalds * counting duplicate ACKs. 20261da177e4SLinus Torvalds * 20271da177e4SLinus Torvalds * lost_out: Packets lost by network. TCP has no explicit 20281da177e4SLinus Torvalds * "loss notification" feedback from network (for now). 20291da177e4SLinus Torvalds * It means that this number can be only _guessed_. 20301da177e4SLinus Torvalds * Actually, it is the heuristics to predict lossage that 20311da177e4SLinus Torvalds * distinguishes different algorithms. 20321da177e4SLinus Torvalds * 20331da177e4SLinus Torvalds * F.e. after RTO, when all the queue is considered as lost, 20341da177e4SLinus Torvalds * lost_out = packets_out and in_flight = retrans_out. 20351da177e4SLinus Torvalds * 20361da177e4SLinus Torvalds * Essentially, we have now two algorithms counting 20371da177e4SLinus Torvalds * lost packets. 20381da177e4SLinus Torvalds * 20391da177e4SLinus Torvalds * FACK: It is the simplest heuristics. As soon as we decided 20401da177e4SLinus Torvalds * that something is lost, we decide that _all_ not SACKed 20411da177e4SLinus Torvalds * packets until the most forward SACK are lost. I.e. 20421da177e4SLinus Torvalds * lost_out = fackets_out - sacked_out and left_out = fackets_out. 20431da177e4SLinus Torvalds * It is absolutely correct estimate, if network does not reorder 20441da177e4SLinus Torvalds * packets. And it loses any connection to reality when reordering 20451da177e4SLinus Torvalds * takes place. We use FACK by default until reordering 20461da177e4SLinus Torvalds * is suspected on the path to this destination. 20471da177e4SLinus Torvalds * 20481da177e4SLinus Torvalds * NewReno: when Recovery is entered, we assume that one segment 20491da177e4SLinus Torvalds * is lost (classic Reno). While we are in Recovery and 20501da177e4SLinus Torvalds * a partial ACK arrives, we assume that one more packet 20511da177e4SLinus Torvalds * is lost (NewReno). This heuristics are the same in NewReno 20521da177e4SLinus Torvalds * and SACK. 20531da177e4SLinus Torvalds * 20541da177e4SLinus Torvalds * Imagine, that's all! Forget about all this shamanism about CWND inflation 20551da177e4SLinus Torvalds * deflation etc. CWND is real congestion window, never inflated, changes 20561da177e4SLinus Torvalds * only according to classic VJ rules. 20571da177e4SLinus Torvalds * 20581da177e4SLinus Torvalds * Really tricky (and requiring careful tuning) part of algorithm 20591da177e4SLinus Torvalds * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 20601da177e4SLinus Torvalds * The first determines the moment _when_ we should reduce CWND and, 20611da177e4SLinus Torvalds * hence, slow down forward transmission. In fact, it determines the moment 20621da177e4SLinus Torvalds * when we decide that hole is caused by loss, rather than by a reorder. 20631da177e4SLinus Torvalds * 20641da177e4SLinus Torvalds * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 20651da177e4SLinus Torvalds * holes, caused by lost packets. 20661da177e4SLinus Torvalds * 20671da177e4SLinus Torvalds * And the most logically complicated part of algorithm is undo 20681da177e4SLinus Torvalds * heuristics. We detect false retransmits due to both too early 20691da177e4SLinus Torvalds * fast retransmit (reordering) and underestimated RTO, analyzing 20701da177e4SLinus Torvalds * timestamps and D-SACKs. When we detect that some segments were 20711da177e4SLinus Torvalds * retransmitted by mistake and CWND reduction was wrong, we undo 20721da177e4SLinus Torvalds * window reduction and abort recovery phase. This logic is hidden 20731da177e4SLinus Torvalds * inside several functions named tcp_try_undo_<something>. 20741da177e4SLinus Torvalds */ 20751da177e4SLinus Torvalds 20761da177e4SLinus Torvalds /* This function decides, when we should leave Disordered state 20771da177e4SLinus Torvalds * and enter Recovery phase, reducing congestion window. 20781da177e4SLinus Torvalds * 20791da177e4SLinus Torvalds * Main question: may we further continue forward transmission 20801da177e4SLinus Torvalds * with the same cwnd? 20811da177e4SLinus Torvalds */ 20829e412ba7SIlpo Järvinen static int tcp_time_to_recover(struct sock *sk) 20831da177e4SLinus Torvalds { 20849e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 20851da177e4SLinus Torvalds __u32 packets_out; 20861da177e4SLinus Torvalds 2087564262c1SRyousei Takano /* Do not perform any recovery during F-RTO algorithm */ 208852c63f1eSIlpo Järvinen if (tp->frto_counter) 208952c63f1eSIlpo Järvinen return 0; 209052c63f1eSIlpo Järvinen 20911da177e4SLinus Torvalds /* Trick#1: The loss is proven. */ 20921da177e4SLinus Torvalds if (tp->lost_out) 20931da177e4SLinus Torvalds return 1; 20941da177e4SLinus Torvalds 20951da177e4SLinus Torvalds /* Not-A-Trick#2 : Classic rule... */ 209685cc391cSIlpo Järvinen if (tcp_dupack_heurestics(tp) > tp->reordering) 20971da177e4SLinus Torvalds return 1; 20981da177e4SLinus Torvalds 20991da177e4SLinus Torvalds /* Trick#3 : when we use RFC2988 timer restart, fast 21001da177e4SLinus Torvalds * retransmit can be triggered by timeout of queue head. 21011da177e4SLinus Torvalds */ 210285cc391cSIlpo Järvinen if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 21031da177e4SLinus Torvalds return 1; 21041da177e4SLinus Torvalds 21051da177e4SLinus Torvalds /* Trick#4: It is still not OK... But will it be useful to delay 21061da177e4SLinus Torvalds * recovery more? 21071da177e4SLinus Torvalds */ 21081da177e4SLinus Torvalds packets_out = tp->packets_out; 21091da177e4SLinus Torvalds if (packets_out <= tp->reordering && 21101da177e4SLinus Torvalds tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && 21119e412ba7SIlpo Järvinen !tcp_may_send_now(sk)) { 21121da177e4SLinus Torvalds /* We have nothing to send. This connection is limited 21131da177e4SLinus Torvalds * either by receiver window or by application. 21141da177e4SLinus Torvalds */ 21151da177e4SLinus Torvalds return 1; 21161da177e4SLinus Torvalds } 21171da177e4SLinus Torvalds 21181da177e4SLinus Torvalds return 0; 21191da177e4SLinus Torvalds } 21201da177e4SLinus Torvalds 2121d8f4f223SIlpo Järvinen /* RFC: This is from the original, I doubt that this is necessary at all: 2122d8f4f223SIlpo Järvinen * clear xmit_retrans hint if seq of this skb is beyond hint. How could we 2123d8f4f223SIlpo Järvinen * retransmitted past LOST markings in the first place? I'm not fully sure 2124d8f4f223SIlpo Järvinen * about undo and end of connection cases, which can cause R without L? 2125d8f4f223SIlpo Järvinen */ 2126056834d9SIlpo Järvinen static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 2127d8f4f223SIlpo Järvinen { 2128d8f4f223SIlpo Järvinen if ((tp->retransmit_skb_hint != NULL) && 2129d8f4f223SIlpo Järvinen before(TCP_SKB_CB(skb)->seq, 2130d8f4f223SIlpo Järvinen TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 213119b2b486SIlpo Järvinen tp->retransmit_skb_hint = NULL; 2132d8f4f223SIlpo Järvinen } 2133d8f4f223SIlpo Järvinen 213485cc391cSIlpo Järvinen /* Mark head of queue up as lost. With RFC3517 SACK, the packets is 213585cc391cSIlpo Järvinen * is against sacked "cnt", otherwise it's against facked "cnt" 213685cc391cSIlpo Järvinen */ 213785cc391cSIlpo Järvinen static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit) 21381da177e4SLinus Torvalds { 21399e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 21401da177e4SLinus Torvalds struct sk_buff *skb; 21416a438bbeSStephen Hemminger int cnt; 21421da177e4SLinus Torvalds 21436a438bbeSStephen Hemminger BUG_TRAP(packets <= tp->packets_out); 21446a438bbeSStephen Hemminger if (tp->lost_skb_hint) { 21456a438bbeSStephen Hemminger skb = tp->lost_skb_hint; 21466a438bbeSStephen Hemminger cnt = tp->lost_cnt_hint; 21476a438bbeSStephen Hemminger } else { 2148fe067e8aSDavid S. Miller skb = tcp_write_queue_head(sk); 21496a438bbeSStephen Hemminger cnt = 0; 21506a438bbeSStephen Hemminger } 21511da177e4SLinus Torvalds 2152fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 2153fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2154fe067e8aSDavid S. Miller break; 21556a438bbeSStephen Hemminger /* TODO: do this better */ 21566a438bbeSStephen Hemminger /* this is not the most efficient way to do this... */ 21576a438bbeSStephen Hemminger tp->lost_skb_hint = skb; 21586a438bbeSStephen Hemminger tp->lost_cnt_hint = cnt; 215985cc391cSIlpo Järvinen 2160ad1984e8SIlpo Järvinen if (tcp_is_fack(tp) || tcp_is_reno(tp) || 216185cc391cSIlpo Järvinen (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 21626a438bbeSStephen Hemminger cnt += tcp_skb_pcount(skb); 216385cc391cSIlpo Järvinen 216485cc391cSIlpo Järvinen if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) || 216585cc391cSIlpo Järvinen after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 21661da177e4SLinus Torvalds break; 21673eec0047SIlpo Järvinen if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 21681da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 21691da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 2170d8f4f223SIlpo Järvinen tcp_verify_retransmit_hint(tp, skb); 21711da177e4SLinus Torvalds } 21721da177e4SLinus Torvalds } 2173005903bcSIlpo Järvinen tcp_verify_left_out(tp); 21741da177e4SLinus Torvalds } 21751da177e4SLinus Torvalds 21761da177e4SLinus Torvalds /* Account newly detected lost packet(s) */ 21771da177e4SLinus Torvalds 217885cc391cSIlpo Järvinen static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 21791da177e4SLinus Torvalds { 21809e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 21819e412ba7SIlpo Järvinen 218285cc391cSIlpo Järvinen if (tcp_is_reno(tp)) { 218385cc391cSIlpo Järvinen tcp_mark_head_lost(sk, 1, fast_rexmit); 218485cc391cSIlpo Järvinen } else if (tcp_is_fack(tp)) { 21851da177e4SLinus Torvalds int lost = tp->fackets_out - tp->reordering; 21861da177e4SLinus Torvalds if (lost <= 0) 21871da177e4SLinus Torvalds lost = 1; 218885cc391cSIlpo Järvinen tcp_mark_head_lost(sk, lost, fast_rexmit); 21891da177e4SLinus Torvalds } else { 219085cc391cSIlpo Järvinen int sacked_upto = tp->sacked_out - tp->reordering; 219185cc391cSIlpo Järvinen if (sacked_upto < 0) 219285cc391cSIlpo Järvinen sacked_upto = 0; 219385cc391cSIlpo Järvinen tcp_mark_head_lost(sk, sacked_upto, fast_rexmit); 21941da177e4SLinus Torvalds } 21951da177e4SLinus Torvalds 21961da177e4SLinus Torvalds /* New heuristics: it is possible only after we switched 21971da177e4SLinus Torvalds * to restart timer each time when something is ACKed. 21981da177e4SLinus Torvalds * Hence, we can detect timed out packets during fast 21991da177e4SLinus Torvalds * retransmit without falling to slow start. 22001da177e4SLinus Torvalds */ 220185cc391cSIlpo Järvinen if (tcp_is_fack(tp) && tcp_head_timedout(sk)) { 22021da177e4SLinus Torvalds struct sk_buff *skb; 22031da177e4SLinus Torvalds 22046a438bbeSStephen Hemminger skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint 2205fe067e8aSDavid S. Miller : tcp_write_queue_head(sk); 22066a438bbeSStephen Hemminger 2207fe067e8aSDavid S. Miller tcp_for_write_queue_from(skb, sk) { 2208fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2209fe067e8aSDavid S. Miller break; 22106a438bbeSStephen Hemminger if (!tcp_skb_timedout(sk, skb)) 22116a438bbeSStephen Hemminger break; 22126a438bbeSStephen Hemminger 2213261ab365SIlpo Järvinen if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 22141da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 22151da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 2216d8f4f223SIlpo Järvinen tcp_verify_retransmit_hint(tp, skb); 22171da177e4SLinus Torvalds } 22181da177e4SLinus Torvalds } 22196a438bbeSStephen Hemminger 22206a438bbeSStephen Hemminger tp->scoreboard_skb_hint = skb; 22216a438bbeSStephen Hemminger 2222005903bcSIlpo Järvinen tcp_verify_left_out(tp); 22231da177e4SLinus Torvalds } 22241da177e4SLinus Torvalds } 22251da177e4SLinus Torvalds 22261da177e4SLinus Torvalds /* CWND moderation, preventing bursts due to too big ACKs 22271da177e4SLinus Torvalds * in dubious situations. 22281da177e4SLinus Torvalds */ 22291da177e4SLinus Torvalds static inline void tcp_moderate_cwnd(struct tcp_sock *tp) 22301da177e4SLinus Torvalds { 22311da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, 22321da177e4SLinus Torvalds tcp_packets_in_flight(tp) + tcp_max_burst(tp)); 22331da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 22341da177e4SLinus Torvalds } 22351da177e4SLinus Torvalds 223672dc5b92SStephen Hemminger /* Lower bound on congestion window is slow start threshold 223772dc5b92SStephen Hemminger * unless congestion avoidance choice decides to overide it. 223872dc5b92SStephen Hemminger */ 223972dc5b92SStephen Hemminger static inline u32 tcp_cwnd_min(const struct sock *sk) 224072dc5b92SStephen Hemminger { 224172dc5b92SStephen Hemminger const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 224272dc5b92SStephen Hemminger 224372dc5b92SStephen Hemminger return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; 224472dc5b92SStephen Hemminger } 224572dc5b92SStephen Hemminger 22461da177e4SLinus Torvalds /* Decrease cwnd each second ack. */ 22471e757f99SIlpo Järvinen static void tcp_cwnd_down(struct sock *sk, int flag) 22481da177e4SLinus Torvalds { 22496687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 22501da177e4SLinus Torvalds int decr = tp->snd_cwnd_cnt + 1; 22511da177e4SLinus Torvalds 225249ff4bb4SIlpo Järvinen if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || 2253e60402d0SIlpo Järvinen (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { 22541da177e4SLinus Torvalds tp->snd_cwnd_cnt = decr & 1; 22551da177e4SLinus Torvalds decr >>= 1; 22561da177e4SLinus Torvalds 225772dc5b92SStephen Hemminger if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) 22581da177e4SLinus Torvalds tp->snd_cwnd -= decr; 22591da177e4SLinus Torvalds 22601da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 22611da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 22621da177e4SLinus Torvalds } 22631e757f99SIlpo Järvinen } 22641da177e4SLinus Torvalds 22651da177e4SLinus Torvalds /* Nothing was retransmitted or returned timestamp is less 22661da177e4SLinus Torvalds * than timestamp of the first retransmission. 22671da177e4SLinus Torvalds */ 22681da177e4SLinus Torvalds static inline int tcp_packet_delayed(struct tcp_sock *tp) 22691da177e4SLinus Torvalds { 22701da177e4SLinus Torvalds return !tp->retrans_stamp || 22711da177e4SLinus Torvalds (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 22721da177e4SLinus Torvalds (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0); 22731da177e4SLinus Torvalds } 22741da177e4SLinus Torvalds 22751da177e4SLinus Torvalds /* Undo procedures. */ 22761da177e4SLinus Torvalds 22771da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 1 22789e412ba7SIlpo Järvinen static void DBGUNDO(struct sock *sk, const char *msg) 22791da177e4SLinus Torvalds { 22809e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 22811da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 22829e412ba7SIlpo Järvinen 22831da177e4SLinus Torvalds printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", 22841da177e4SLinus Torvalds msg, 22851da177e4SLinus Torvalds NIPQUAD(inet->daddr), ntohs(inet->dport), 228683ae4088SIlpo Järvinen tp->snd_cwnd, tcp_left_out(tp), 22871da177e4SLinus Torvalds tp->snd_ssthresh, tp->prior_ssthresh, 22881da177e4SLinus Torvalds tp->packets_out); 22891da177e4SLinus Torvalds } 22901da177e4SLinus Torvalds #else 22911da177e4SLinus Torvalds #define DBGUNDO(x...) do { } while (0) 22921da177e4SLinus Torvalds #endif 22931da177e4SLinus Torvalds 22946687e988SArnaldo Carvalho de Melo static void tcp_undo_cwr(struct sock *sk, const int undo) 22951da177e4SLinus Torvalds { 22966687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 22976687e988SArnaldo Carvalho de Melo 22981da177e4SLinus Torvalds if (tp->prior_ssthresh) { 22996687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 23006687e988SArnaldo Carvalho de Melo 23016687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_ops->undo_cwnd) 23026687e988SArnaldo Carvalho de Melo tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 23031da177e4SLinus Torvalds else 23041da177e4SLinus Torvalds tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 23051da177e4SLinus Torvalds 23061da177e4SLinus Torvalds if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { 23071da177e4SLinus Torvalds tp->snd_ssthresh = tp->prior_ssthresh; 23081da177e4SLinus Torvalds TCP_ECN_withdraw_cwr(tp); 23091da177e4SLinus Torvalds } 23101da177e4SLinus Torvalds } else { 23111da177e4SLinus Torvalds tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 23121da177e4SLinus Torvalds } 23131da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 23141da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 23156a438bbeSStephen Hemminger 23166a438bbeSStephen Hemminger /* There is something screwy going on with the retrans hints after 23176a438bbeSStephen Hemminger an undo */ 23185af4ec23SIlpo Järvinen tcp_clear_all_retrans_hints(tp); 23191da177e4SLinus Torvalds } 23201da177e4SLinus Torvalds 23211da177e4SLinus Torvalds static inline int tcp_may_undo(struct tcp_sock *tp) 23221da177e4SLinus Torvalds { 2323056834d9SIlpo Järvinen return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 23241da177e4SLinus Torvalds } 23251da177e4SLinus Torvalds 23261da177e4SLinus Torvalds /* People celebrate: "We love our President!" */ 23279e412ba7SIlpo Järvinen static int tcp_try_undo_recovery(struct sock *sk) 23281da177e4SLinus Torvalds { 23299e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 23309e412ba7SIlpo Järvinen 23311da177e4SLinus Torvalds if (tcp_may_undo(tp)) { 23321da177e4SLinus Torvalds /* Happy end! We did not retransmit anything 23331da177e4SLinus Torvalds * or our original transmission succeeded. 23341da177e4SLinus Torvalds */ 23359e412ba7SIlpo Järvinen DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 23366687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 1); 23376687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 23381da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 23391da177e4SLinus Torvalds else 23401da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 23411da177e4SLinus Torvalds tp->undo_marker = 0; 23421da177e4SLinus Torvalds } 2343e60402d0SIlpo Järvinen if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 23441da177e4SLinus Torvalds /* Hold old state until something *above* high_seq 23451da177e4SLinus Torvalds * is ACKed. For Reno it is MUST to prevent false 23461da177e4SLinus Torvalds * fast retransmits (RFC2582). SACK TCP is safe. */ 23471da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 23481da177e4SLinus Torvalds return 1; 23491da177e4SLinus Torvalds } 23506687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 23511da177e4SLinus Torvalds return 0; 23521da177e4SLinus Torvalds } 23531da177e4SLinus Torvalds 23541da177e4SLinus Torvalds /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 23559e412ba7SIlpo Järvinen static void tcp_try_undo_dsack(struct sock *sk) 23561da177e4SLinus Torvalds { 23579e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 23589e412ba7SIlpo Järvinen 23591da177e4SLinus Torvalds if (tp->undo_marker && !tp->undo_retrans) { 23609e412ba7SIlpo Järvinen DBGUNDO(sk, "D-SACK"); 23616687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 1); 23621da177e4SLinus Torvalds tp->undo_marker = 0; 23631da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 23641da177e4SLinus Torvalds } 23651da177e4SLinus Torvalds } 23661da177e4SLinus Torvalds 23671da177e4SLinus Torvalds /* Undo during fast recovery after partial ACK. */ 23681da177e4SLinus Torvalds 23699e412ba7SIlpo Järvinen static int tcp_try_undo_partial(struct sock *sk, int acked) 23701da177e4SLinus Torvalds { 23719e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 23721da177e4SLinus Torvalds /* Partial ACK arrived. Force Hoe's retransmit. */ 237385cc391cSIlpo Järvinen int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); 23741da177e4SLinus Torvalds 23751da177e4SLinus Torvalds if (tcp_may_undo(tp)) { 23761da177e4SLinus Torvalds /* Plain luck! Hole if filled with delayed 23771da177e4SLinus Torvalds * packet, rather than with a retransmit. 23781da177e4SLinus Torvalds */ 23791da177e4SLinus Torvalds if (tp->retrans_out == 0) 23801da177e4SLinus Torvalds tp->retrans_stamp = 0; 23811da177e4SLinus Torvalds 23826687e988SArnaldo Carvalho de Melo tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 23831da177e4SLinus Torvalds 23849e412ba7SIlpo Järvinen DBGUNDO(sk, "Hoe"); 23856687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 0); 23861da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 23871da177e4SLinus Torvalds 23881da177e4SLinus Torvalds /* So... Do not make Hoe's retransmit yet. 23891da177e4SLinus Torvalds * If the first packet was delayed, the rest 23901da177e4SLinus Torvalds * ones are most probably delayed as well. 23911da177e4SLinus Torvalds */ 23921da177e4SLinus Torvalds failed = 0; 23931da177e4SLinus Torvalds } 23941da177e4SLinus Torvalds return failed; 23951da177e4SLinus Torvalds } 23961da177e4SLinus Torvalds 23971da177e4SLinus Torvalds /* Undo during loss recovery after partial ACK. */ 23989e412ba7SIlpo Järvinen static int tcp_try_undo_loss(struct sock *sk) 23991da177e4SLinus Torvalds { 24009e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 24019e412ba7SIlpo Järvinen 24021da177e4SLinus Torvalds if (tcp_may_undo(tp)) { 24031da177e4SLinus Torvalds struct sk_buff *skb; 2404fe067e8aSDavid S. Miller tcp_for_write_queue(skb, sk) { 2405fe067e8aSDavid S. Miller if (skb == tcp_send_head(sk)) 2406fe067e8aSDavid S. Miller break; 24071da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 24081da177e4SLinus Torvalds } 24096a438bbeSStephen Hemminger 24105af4ec23SIlpo Järvinen tcp_clear_all_retrans_hints(tp); 24116a438bbeSStephen Hemminger 24129e412ba7SIlpo Järvinen DBGUNDO(sk, "partial loss"); 24131da177e4SLinus Torvalds tp->lost_out = 0; 24146687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 1); 24151da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2416463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 24171da177e4SLinus Torvalds tp->undo_marker = 0; 2418e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 24196687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 24201da177e4SLinus Torvalds return 1; 24211da177e4SLinus Torvalds } 24221da177e4SLinus Torvalds return 0; 24231da177e4SLinus Torvalds } 24241da177e4SLinus Torvalds 24256687e988SArnaldo Carvalho de Melo static inline void tcp_complete_cwr(struct sock *sk) 24261da177e4SLinus Torvalds { 24276687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 24281da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 24291da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 24306687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 24311da177e4SLinus Torvalds } 24321da177e4SLinus Torvalds 24339e412ba7SIlpo Järvinen static void tcp_try_to_open(struct sock *sk, int flag) 24341da177e4SLinus Torvalds { 24359e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 24369e412ba7SIlpo Järvinen 243786426c22SIlpo Järvinen tcp_verify_left_out(tp); 243886426c22SIlpo Järvinen 24391da177e4SLinus Torvalds if (tp->retrans_out == 0) 24401da177e4SLinus Torvalds tp->retrans_stamp = 0; 24411da177e4SLinus Torvalds 24421da177e4SLinus Torvalds if (flag & FLAG_ECE) 24433cfe3baaSIlpo Järvinen tcp_enter_cwr(sk, 1); 24441da177e4SLinus Torvalds 24456687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 24461da177e4SLinus Torvalds int state = TCP_CA_Open; 24471da177e4SLinus Torvalds 2448d02596e3SIlpo Järvinen if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) 24491da177e4SLinus Torvalds state = TCP_CA_Disorder; 24501da177e4SLinus Torvalds 24516687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state != state) { 24526687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, state); 24531da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 24541da177e4SLinus Torvalds } 24551da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 24561da177e4SLinus Torvalds } else { 24571e757f99SIlpo Järvinen tcp_cwnd_down(sk, flag); 24581da177e4SLinus Torvalds } 24591da177e4SLinus Torvalds } 24601da177e4SLinus Torvalds 24615d424d5aSJohn Heffner static void tcp_mtup_probe_failed(struct sock *sk) 24625d424d5aSJohn Heffner { 24635d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 24645d424d5aSJohn Heffner 24655d424d5aSJohn Heffner icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 24665d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 24675d424d5aSJohn Heffner } 24685d424d5aSJohn Heffner 24695d424d5aSJohn Heffner static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) 24705d424d5aSJohn Heffner { 24715d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 24725d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 24735d424d5aSJohn Heffner 24745d424d5aSJohn Heffner /* FIXME: breaks with very large cwnd */ 24755d424d5aSJohn Heffner tp->prior_ssthresh = tcp_current_ssthresh(sk); 24765d424d5aSJohn Heffner tp->snd_cwnd = tp->snd_cwnd * 24775d424d5aSJohn Heffner tcp_mss_to_mtu(sk, tp->mss_cache) / 24785d424d5aSJohn Heffner icsk->icsk_mtup.probe_size; 24795d424d5aSJohn Heffner tp->snd_cwnd_cnt = 0; 24805d424d5aSJohn Heffner tp->snd_cwnd_stamp = tcp_time_stamp; 24815d424d5aSJohn Heffner tp->rcv_ssthresh = tcp_current_ssthresh(sk); 24825d424d5aSJohn Heffner 24835d424d5aSJohn Heffner icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 24845d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 24855d424d5aSJohn Heffner tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 24865d424d5aSJohn Heffner } 24875d424d5aSJohn Heffner 24881da177e4SLinus Torvalds /* Process an event, which can update packets-in-flight not trivially. 24891da177e4SLinus Torvalds * Main goal of this function is to calculate new estimate for left_out, 24901da177e4SLinus Torvalds * taking into account both packets sitting in receiver's buffer and 24911da177e4SLinus Torvalds * packets lost by network. 24921da177e4SLinus Torvalds * 24931da177e4SLinus Torvalds * Besides that it does CWND reduction, when packet loss is detected 24941da177e4SLinus Torvalds * and changes state of machine. 24951da177e4SLinus Torvalds * 24961da177e4SLinus Torvalds * It does _not_ decide what to send, it is made in function 24971da177e4SLinus Torvalds * tcp_xmit_retransmit_queue(). 24981da177e4SLinus Torvalds */ 2499056834d9SIlpo Järvinen static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) 25001da177e4SLinus Torvalds { 25016687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 25021da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 25032e605294SIlpo Järvinen int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 25042e605294SIlpo Järvinen int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 250585cc391cSIlpo Järvinen (tcp_fackets_out(tp) > tp->reordering)); 250685cc391cSIlpo Järvinen int fast_rexmit = 0; 25071da177e4SLinus Torvalds 25083ccd3130SIlpo Järvinen if (WARN_ON(!tp->packets_out && tp->sacked_out)) 25091da177e4SLinus Torvalds tp->sacked_out = 0; 251091fed7a1SIlpo Järvinen if (WARN_ON(!tp->sacked_out && tp->fackets_out)) 25111da177e4SLinus Torvalds tp->fackets_out = 0; 25121da177e4SLinus Torvalds 25131da177e4SLinus Torvalds /* Now state machine starts. 25141da177e4SLinus Torvalds * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 25151da177e4SLinus Torvalds if (flag & FLAG_ECE) 25161da177e4SLinus Torvalds tp->prior_ssthresh = 0; 25171da177e4SLinus Torvalds 25181da177e4SLinus Torvalds /* B. In all the states check for reneging SACKs. */ 2519cadbd031SIlpo Järvinen if (tcp_check_sack_reneging(sk, flag)) 25201da177e4SLinus Torvalds return; 25211da177e4SLinus Torvalds 25221da177e4SLinus Torvalds /* C. Process data loss notification, provided it is valid. */ 252385cc391cSIlpo Järvinen if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && 25241da177e4SLinus Torvalds before(tp->snd_una, tp->high_seq) && 25256687e988SArnaldo Carvalho de Melo icsk->icsk_ca_state != TCP_CA_Open && 25261da177e4SLinus Torvalds tp->fackets_out > tp->reordering) { 252785cc391cSIlpo Järvinen tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); 25281da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 25291da177e4SLinus Torvalds } 25301da177e4SLinus Torvalds 2531005903bcSIlpo Järvinen /* D. Check consistency of the current state. */ 2532005903bcSIlpo Järvinen tcp_verify_left_out(tp); 25331da177e4SLinus Torvalds 25341da177e4SLinus Torvalds /* E. Check state exit conditions. State can be terminated 25351da177e4SLinus Torvalds * when high_seq is ACKed. */ 25366687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state == TCP_CA_Open) { 25371da177e4SLinus Torvalds BUG_TRAP(tp->retrans_out == 0); 25381da177e4SLinus Torvalds tp->retrans_stamp = 0; 25391da177e4SLinus Torvalds } else if (!before(tp->snd_una, tp->high_seq)) { 25406687e988SArnaldo Carvalho de Melo switch (icsk->icsk_ca_state) { 25411da177e4SLinus Torvalds case TCP_CA_Loss: 25426687e988SArnaldo Carvalho de Melo icsk->icsk_retransmits = 0; 25439e412ba7SIlpo Järvinen if (tcp_try_undo_recovery(sk)) 25441da177e4SLinus Torvalds return; 25451da177e4SLinus Torvalds break; 25461da177e4SLinus Torvalds 25471da177e4SLinus Torvalds case TCP_CA_CWR: 25481da177e4SLinus Torvalds /* CWR is to be held something *above* high_seq 25491da177e4SLinus Torvalds * is ACKed for CWR bit to reach receiver. */ 25501da177e4SLinus Torvalds if (tp->snd_una != tp->high_seq) { 25516687e988SArnaldo Carvalho de Melo tcp_complete_cwr(sk); 25526687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 25531da177e4SLinus Torvalds } 25541da177e4SLinus Torvalds break; 25551da177e4SLinus Torvalds 25561da177e4SLinus Torvalds case TCP_CA_Disorder: 25579e412ba7SIlpo Järvinen tcp_try_undo_dsack(sk); 25581da177e4SLinus Torvalds if (!tp->undo_marker || 25591da177e4SLinus Torvalds /* For SACK case do not Open to allow to undo 25601da177e4SLinus Torvalds * catching for all duplicate ACKs. */ 2561e60402d0SIlpo Järvinen tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { 25621da177e4SLinus Torvalds tp->undo_marker = 0; 25636687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 25641da177e4SLinus Torvalds } 25651da177e4SLinus Torvalds break; 25661da177e4SLinus Torvalds 25671da177e4SLinus Torvalds case TCP_CA_Recovery: 2568e60402d0SIlpo Järvinen if (tcp_is_reno(tp)) 25691da177e4SLinus Torvalds tcp_reset_reno_sack(tp); 25709e412ba7SIlpo Järvinen if (tcp_try_undo_recovery(sk)) 25711da177e4SLinus Torvalds return; 25726687e988SArnaldo Carvalho de Melo tcp_complete_cwr(sk); 25731da177e4SLinus Torvalds break; 25741da177e4SLinus Torvalds } 25751da177e4SLinus Torvalds } 25761da177e4SLinus Torvalds 25771da177e4SLinus Torvalds /* F. Process state. */ 25786687e988SArnaldo Carvalho de Melo switch (icsk->icsk_ca_state) { 25791da177e4SLinus Torvalds case TCP_CA_Recovery: 25802e605294SIlpo Järvinen if (!(flag & FLAG_SND_UNA_ADVANCED)) { 2581e60402d0SIlpo Järvinen if (tcp_is_reno(tp) && is_dupack) 25826687e988SArnaldo Carvalho de Melo tcp_add_reno_sack(sk); 25831b6d427bSIlpo Järvinen } else 25841b6d427bSIlpo Järvinen do_lost = tcp_try_undo_partial(sk, pkts_acked); 25851da177e4SLinus Torvalds break; 25861da177e4SLinus Torvalds case TCP_CA_Loss: 25871da177e4SLinus Torvalds if (flag & FLAG_DATA_ACKED) 25886687e988SArnaldo Carvalho de Melo icsk->icsk_retransmits = 0; 25899e412ba7SIlpo Järvinen if (!tcp_try_undo_loss(sk)) { 25901da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 25911da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 25921da177e4SLinus Torvalds return; 25931da177e4SLinus Torvalds } 25946687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 25951da177e4SLinus Torvalds return; 25961da177e4SLinus Torvalds /* Loss is undone; fall through to processing in Open state. */ 25971da177e4SLinus Torvalds default: 2598e60402d0SIlpo Järvinen if (tcp_is_reno(tp)) { 25992e605294SIlpo Järvinen if (flag & FLAG_SND_UNA_ADVANCED) 26001da177e4SLinus Torvalds tcp_reset_reno_sack(tp); 26011da177e4SLinus Torvalds if (is_dupack) 26026687e988SArnaldo Carvalho de Melo tcp_add_reno_sack(sk); 26031da177e4SLinus Torvalds } 26041da177e4SLinus Torvalds 26056687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state == TCP_CA_Disorder) 26069e412ba7SIlpo Järvinen tcp_try_undo_dsack(sk); 26071da177e4SLinus Torvalds 26089e412ba7SIlpo Järvinen if (!tcp_time_to_recover(sk)) { 26099e412ba7SIlpo Järvinen tcp_try_to_open(sk, flag); 26101da177e4SLinus Torvalds return; 26111da177e4SLinus Torvalds } 26121da177e4SLinus Torvalds 26135d424d5aSJohn Heffner /* MTU probe failure: don't reduce cwnd */ 26145d424d5aSJohn Heffner if (icsk->icsk_ca_state < TCP_CA_CWR && 26155d424d5aSJohn Heffner icsk->icsk_mtup.probe_size && 26160e7b1368SJohn Heffner tp->snd_una == tp->mtu_probe.probe_seq_start) { 26175d424d5aSJohn Heffner tcp_mtup_probe_failed(sk); 26185d424d5aSJohn Heffner /* Restores the reduction we did in tcp_mtup_probe() */ 26195d424d5aSJohn Heffner tp->snd_cwnd++; 26205d424d5aSJohn Heffner tcp_simple_retransmit(sk); 26215d424d5aSJohn Heffner return; 26225d424d5aSJohn Heffner } 26235d424d5aSJohn Heffner 26241da177e4SLinus Torvalds /* Otherwise enter Recovery state */ 26251da177e4SLinus Torvalds 2626e60402d0SIlpo Järvinen if (tcp_is_reno(tp)) 26271da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); 26281da177e4SLinus Torvalds else 26291da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); 26301da177e4SLinus Torvalds 26311da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 26321da177e4SLinus Torvalds tp->prior_ssthresh = 0; 26331da177e4SLinus Torvalds tp->undo_marker = tp->snd_una; 26341da177e4SLinus Torvalds tp->undo_retrans = tp->retrans_out; 26351da177e4SLinus Torvalds 26366687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state < TCP_CA_CWR) { 26371da177e4SLinus Torvalds if (!(flag & FLAG_ECE)) 26386687e988SArnaldo Carvalho de Melo tp->prior_ssthresh = tcp_current_ssthresh(sk); 26396687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 26401da177e4SLinus Torvalds TCP_ECN_queue_cwr(tp); 26411da177e4SLinus Torvalds } 26421da177e4SLinus Torvalds 26439772efb9SStephen Hemminger tp->bytes_acked = 0; 26441da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 26456687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Recovery); 264685cc391cSIlpo Järvinen fast_rexmit = 1; 26471da177e4SLinus Torvalds } 26481da177e4SLinus Torvalds 264985cc391cSIlpo Järvinen if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 265085cc391cSIlpo Järvinen tcp_update_scoreboard(sk, fast_rexmit); 26511e757f99SIlpo Järvinen tcp_cwnd_down(sk, flag); 26521da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 26531da177e4SLinus Torvalds } 26541da177e4SLinus Torvalds 26551da177e4SLinus Torvalds /* Read draft-ietf-tcplw-high-performance before mucking 2656caa20d9aSStephen Hemminger * with this code. (Supersedes RFC1323) 26571da177e4SLinus Torvalds */ 26582d2abbabSStephen Hemminger static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 26591da177e4SLinus Torvalds { 26601da177e4SLinus Torvalds /* RTTM Rule: A TSecr value received in a segment is used to 26611da177e4SLinus Torvalds * update the averaged RTT measurement only if the segment 26621da177e4SLinus Torvalds * acknowledges some new data, i.e., only if it advances the 26631da177e4SLinus Torvalds * left edge of the send window. 26641da177e4SLinus Torvalds * 26651da177e4SLinus Torvalds * See draft-ietf-tcplw-high-performance-00, section 3.3. 26661da177e4SLinus Torvalds * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 26671da177e4SLinus Torvalds * 26681da177e4SLinus Torvalds * Changed: reset backoff as soon as we see the first valid sample. 2669caa20d9aSStephen Hemminger * If we do not, we get strongly overestimated rto. With timestamps 26701da177e4SLinus Torvalds * samples are accepted even from very old segments: f.e., when rtt=1 26711da177e4SLinus Torvalds * increases to 8, we retransmit 5 times and after 8 seconds delayed 26721da177e4SLinus Torvalds * answer arrives rto becomes 120 seconds! If at least one of segments 26731da177e4SLinus Torvalds * in window is lost... Voila. --ANK (010210) 26741da177e4SLinus Torvalds */ 2675463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 2676463c84b9SArnaldo Carvalho de Melo const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 26772d2abbabSStephen Hemminger tcp_rtt_estimator(sk, seq_rtt); 2678463c84b9SArnaldo Carvalho de Melo tcp_set_rto(sk); 2679463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_backoff = 0; 2680463c84b9SArnaldo Carvalho de Melo tcp_bound_rto(sk); 26811da177e4SLinus Torvalds } 26821da177e4SLinus Torvalds 26832d2abbabSStephen Hemminger static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) 26841da177e4SLinus Torvalds { 26851da177e4SLinus Torvalds /* We don't have a timestamp. Can only use 26861da177e4SLinus Torvalds * packets that are not retransmitted to determine 26871da177e4SLinus Torvalds * rtt estimates. Also, we must not reset the 26881da177e4SLinus Torvalds * backoff for rto until we get a non-retransmitted 26891da177e4SLinus Torvalds * packet. This allows us to deal with a situation 26901da177e4SLinus Torvalds * where the network delay has increased suddenly. 26911da177e4SLinus Torvalds * I.e. Karn's algorithm. (SIGCOMM '87, p5.) 26921da177e4SLinus Torvalds */ 26931da177e4SLinus Torvalds 26941da177e4SLinus Torvalds if (flag & FLAG_RETRANS_DATA_ACKED) 26951da177e4SLinus Torvalds return; 26961da177e4SLinus Torvalds 26972d2abbabSStephen Hemminger tcp_rtt_estimator(sk, seq_rtt); 2698463c84b9SArnaldo Carvalho de Melo tcp_set_rto(sk); 2699463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_backoff = 0; 2700463c84b9SArnaldo Carvalho de Melo tcp_bound_rto(sk); 27011da177e4SLinus Torvalds } 27021da177e4SLinus Torvalds 2703463c84b9SArnaldo Carvalho de Melo static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 27042d2abbabSStephen Hemminger const s32 seq_rtt) 27051da177e4SLinus Torvalds { 2706463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 27071da177e4SLinus Torvalds /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 27081da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 27092d2abbabSStephen Hemminger tcp_ack_saw_tstamp(sk, flag); 27101da177e4SLinus Torvalds else if (seq_rtt >= 0) 27112d2abbabSStephen Hemminger tcp_ack_no_tstamp(sk, seq_rtt, flag); 27121da177e4SLinus Torvalds } 27131da177e4SLinus Torvalds 2714c3a05c60SIlpo Järvinen static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 27151da177e4SLinus Torvalds { 27166687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 2717c3a05c60SIlpo Järvinen icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); 27186687e988SArnaldo Carvalho de Melo tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 27191da177e4SLinus Torvalds } 27201da177e4SLinus Torvalds 27211da177e4SLinus Torvalds /* Restart timer after forward progress on connection. 27221da177e4SLinus Torvalds * RFC2988 recommends to restart timer to now+rto. 27231da177e4SLinus Torvalds */ 27246728e7dcSIlpo Järvinen static void tcp_rearm_rto(struct sock *sk) 27251da177e4SLinus Torvalds { 27269e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 27279e412ba7SIlpo Järvinen 27281da177e4SLinus Torvalds if (!tp->packets_out) { 2729463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 27301da177e4SLinus Torvalds } else { 2731056834d9SIlpo Järvinen inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2732056834d9SIlpo Järvinen inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 27331da177e4SLinus Torvalds } 27341da177e4SLinus Torvalds } 27351da177e4SLinus Torvalds 27367c46a03eSIlpo Järvinen /* If we get here, the whole TSO packet has not been acked. */ 273713fcf850SIlpo Järvinen static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 27381da177e4SLinus Torvalds { 27391da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 27407c46a03eSIlpo Järvinen u32 packets_acked; 27411da177e4SLinus Torvalds 27427c46a03eSIlpo Järvinen BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 27431da177e4SLinus Torvalds 27441da177e4SLinus Torvalds packets_acked = tcp_skb_pcount(skb); 27457c46a03eSIlpo Järvinen if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 27461da177e4SLinus Torvalds return 0; 27471da177e4SLinus Torvalds packets_acked -= tcp_skb_pcount(skb); 27481da177e4SLinus Torvalds 27491da177e4SLinus Torvalds if (packets_acked) { 27501da177e4SLinus Torvalds BUG_ON(tcp_skb_pcount(skb) == 0); 27517c46a03eSIlpo Järvinen BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 27521da177e4SLinus Torvalds } 27531da177e4SLinus Torvalds 275413fcf850SIlpo Järvinen return packets_acked; 27551da177e4SLinus Torvalds } 27561da177e4SLinus Torvalds 27577c46a03eSIlpo Järvinen /* Remove acknowledged frames from the retransmission queue. If our packet 27587c46a03eSIlpo Järvinen * is before the ack sequence we can discard it as it's confirmed to have 27597c46a03eSIlpo Järvinen * arrived at the other end. 27607c46a03eSIlpo Järvinen */ 2761c776ee01SIlpo Järvinen static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) 27621da177e4SLinus Torvalds { 27631da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 27642d2abbabSStephen Hemminger const struct inet_connection_sock *icsk = inet_csk(sk); 27651da177e4SLinus Torvalds struct sk_buff *skb; 27667c46a03eSIlpo Järvinen u32 now = tcp_time_stamp; 276713fcf850SIlpo Järvinen int fully_acked = 1; 27687c46a03eSIlpo Järvinen int flag = 0; 276972018835SIlpo Järvinen u32 pkts_acked = 0; 2770c7caf8d3SIlpo Järvinen u32 reord = tp->packets_out; 27717c46a03eSIlpo Järvinen s32 seq_rtt = -1; 27722072c228SGavin McCullagh s32 ca_seq_rtt = -1; 2773b9ce204fSIlpo Järvinen ktime_t last_ackt = net_invalid_timestamp(); 27741da177e4SLinus Torvalds 27757c46a03eSIlpo Järvinen while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 27761da177e4SLinus Torvalds struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 277713fcf850SIlpo Järvinen u32 end_seq; 277872018835SIlpo Järvinen u32 acked_pcount; 27797c46a03eSIlpo Järvinen u8 sacked = scb->sacked; 27801da177e4SLinus Torvalds 27812072c228SGavin McCullagh /* Determine how many packets and what bytes were acked, tso and else */ 27821da177e4SLinus Torvalds if (after(scb->end_seq, tp->snd_una)) { 278313fcf850SIlpo Järvinen if (tcp_skb_pcount(skb) == 1 || 278413fcf850SIlpo Järvinen !after(tp->snd_una, scb->seq)) 27851da177e4SLinus Torvalds break; 278613fcf850SIlpo Järvinen 278772018835SIlpo Järvinen acked_pcount = tcp_tso_acked(sk, skb); 278872018835SIlpo Järvinen if (!acked_pcount) 278913fcf850SIlpo Järvinen break; 279013fcf850SIlpo Järvinen 279113fcf850SIlpo Järvinen fully_acked = 0; 279213fcf850SIlpo Järvinen end_seq = tp->snd_una; 279313fcf850SIlpo Järvinen } else { 279472018835SIlpo Järvinen acked_pcount = tcp_skb_pcount(skb); 279513fcf850SIlpo Järvinen end_seq = scb->end_seq; 27961da177e4SLinus Torvalds } 27971da177e4SLinus Torvalds 27985d424d5aSJohn Heffner /* MTU probing checks */ 27997c46a03eSIlpo Järvinen if (fully_acked && icsk->icsk_mtup.probe_size && 28007c46a03eSIlpo Järvinen !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) { 28015d424d5aSJohn Heffner tcp_mtup_probe_success(sk, skb); 28025d424d5aSJohn Heffner } 28035d424d5aSJohn Heffner 28041da177e4SLinus Torvalds if (sacked & TCPCB_RETRANS) { 28051da177e4SLinus Torvalds if (sacked & TCPCB_SACKED_RETRANS) 280672018835SIlpo Järvinen tp->retrans_out -= acked_pcount; 28077c46a03eSIlpo Järvinen flag |= FLAG_RETRANS_DATA_ACKED; 28082072c228SGavin McCullagh ca_seq_rtt = -1; 28091da177e4SLinus Torvalds seq_rtt = -1; 2810056834d9SIlpo Järvinen if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) 2811009a2e3eSIlpo Järvinen flag |= FLAG_NONHEAD_RETRANS_ACKED; 2812c7caf8d3SIlpo Järvinen } else { 28132072c228SGavin McCullagh ca_seq_rtt = now - scb->when; 2814164891aaSStephen Hemminger last_ackt = skb->tstamp; 28152072c228SGavin McCullagh if (seq_rtt < 0) { 28162072c228SGavin McCullagh seq_rtt = ca_seq_rtt; 2817a61bbcf2SPatrick McHardy } 2818c7caf8d3SIlpo Järvinen if (!(sacked & TCPCB_SACKED_ACKED)) 281972018835SIlpo Järvinen reord = min(pkts_acked, reord); 2820c7caf8d3SIlpo Järvinen } 28217c46a03eSIlpo Järvinen 28221da177e4SLinus Torvalds if (sacked & TCPCB_SACKED_ACKED) 282372018835SIlpo Järvinen tp->sacked_out -= acked_pcount; 28241da177e4SLinus Torvalds if (sacked & TCPCB_LOST) 282572018835SIlpo Järvinen tp->lost_out -= acked_pcount; 28267c46a03eSIlpo Järvinen 28274828e7f4SIlpo Järvinen if (unlikely(tp->urg_mode && !before(end_seq, tp->snd_up))) 28281da177e4SLinus Torvalds tp->urg_mode = 0; 282989d478f7SIlpo Järvinen 283072018835SIlpo Järvinen tp->packets_out -= acked_pcount; 283172018835SIlpo Järvinen pkts_acked += acked_pcount; 283213fcf850SIlpo Järvinen 2833009a2e3eSIlpo Järvinen /* Initial outgoing SYN's get put onto the write_queue 2834009a2e3eSIlpo Järvinen * just like anything else we transmit. It is not 2835009a2e3eSIlpo Järvinen * true data, and if we misinform our callers that 2836009a2e3eSIlpo Järvinen * this ACK acks real data, we will erroneously exit 2837009a2e3eSIlpo Järvinen * connection startup slow start one packet too 2838009a2e3eSIlpo Järvinen * quickly. This is severely frowned upon behavior. 2839009a2e3eSIlpo Järvinen */ 2840009a2e3eSIlpo Järvinen if (!(scb->flags & TCPCB_FLAG_SYN)) { 2841009a2e3eSIlpo Järvinen flag |= FLAG_DATA_ACKED; 2842009a2e3eSIlpo Järvinen } else { 2843009a2e3eSIlpo Järvinen flag |= FLAG_SYN_ACKED; 2844009a2e3eSIlpo Järvinen tp->retrans_stamp = 0; 2845009a2e3eSIlpo Järvinen } 2846009a2e3eSIlpo Järvinen 284713fcf850SIlpo Järvinen if (!fully_acked) 284813fcf850SIlpo Järvinen break; 284913fcf850SIlpo Järvinen 2850fe067e8aSDavid S. Miller tcp_unlink_write_queue(skb, sk); 28513ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 28525af4ec23SIlpo Järvinen tcp_clear_all_retrans_hints(tp); 28531da177e4SLinus Torvalds } 28541da177e4SLinus Torvalds 2855cadbd031SIlpo Järvinen if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2856cadbd031SIlpo Järvinen flag |= FLAG_SACK_RENEGING; 2857cadbd031SIlpo Järvinen 28587c46a03eSIlpo Järvinen if (flag & FLAG_ACKED) { 2859164891aaSStephen Hemminger const struct tcp_congestion_ops *ca_ops 2860164891aaSStephen Hemminger = inet_csk(sk)->icsk_ca_ops; 2861164891aaSStephen Hemminger 28627c46a03eSIlpo Järvinen tcp_ack_update_rtt(sk, flag, seq_rtt); 28636728e7dcSIlpo Järvinen tcp_rearm_rto(sk); 2864317a76f9SStephen Hemminger 2865c7caf8d3SIlpo Järvinen if (tcp_is_reno(tp)) { 2866c7caf8d3SIlpo Järvinen tcp_remove_reno_sacks(sk, pkts_acked); 2867c7caf8d3SIlpo Järvinen } else { 2868c7caf8d3SIlpo Järvinen /* Non-retransmitted hole got filled? That's reordering */ 2869c7caf8d3SIlpo Järvinen if (reord < prior_fackets) 2870c7caf8d3SIlpo Järvinen tcp_update_reordering(sk, tp->fackets_out - reord, 0); 2871c7caf8d3SIlpo Järvinen } 2872c7caf8d3SIlpo Järvinen 287391fed7a1SIlpo Järvinen tp->fackets_out -= min(pkts_acked, tp->fackets_out); 287468f8353bSIlpo Järvinen 287530cfd0baSStephen Hemminger if (ca_ops->pkts_acked) { 287630cfd0baSStephen Hemminger s32 rtt_us = -1; 2877b9ce204fSIlpo Järvinen 287830cfd0baSStephen Hemminger /* Is the ACK triggering packet unambiguous? */ 28797c46a03eSIlpo Järvinen if (!(flag & FLAG_RETRANS_DATA_ACKED)) { 288030cfd0baSStephen Hemminger /* High resolution needed and available? */ 288130cfd0baSStephen Hemminger if (ca_ops->flags & TCP_CONG_RTT_STAMP && 288230cfd0baSStephen Hemminger !ktime_equal(last_ackt, 288330cfd0baSStephen Hemminger net_invalid_timestamp())) 288430cfd0baSStephen Hemminger rtt_us = ktime_us_delta(ktime_get_real(), 288530cfd0baSStephen Hemminger last_ackt); 28862072c228SGavin McCullagh else if (ca_seq_rtt > 0) 28872072c228SGavin McCullagh rtt_us = jiffies_to_usecs(ca_seq_rtt); 288830cfd0baSStephen Hemminger } 288930cfd0baSStephen Hemminger 289030cfd0baSStephen Hemminger ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 289130cfd0baSStephen Hemminger } 28921da177e4SLinus Torvalds } 28931da177e4SLinus Torvalds 28941da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 28951da177e4SLinus Torvalds BUG_TRAP((int)tp->sacked_out >= 0); 28961da177e4SLinus Torvalds BUG_TRAP((int)tp->lost_out >= 0); 28971da177e4SLinus Torvalds BUG_TRAP((int)tp->retrans_out >= 0); 2898e60402d0SIlpo Järvinen if (!tp->packets_out && tcp_is_sack(tp)) { 2899cfcabdccSStephen Hemminger icsk = inet_csk(sk); 29001da177e4SLinus Torvalds if (tp->lost_out) { 29011da177e4SLinus Torvalds printk(KERN_DEBUG "Leak l=%u %d\n", 29026687e988SArnaldo Carvalho de Melo tp->lost_out, icsk->icsk_ca_state); 29031da177e4SLinus Torvalds tp->lost_out = 0; 29041da177e4SLinus Torvalds } 29051da177e4SLinus Torvalds if (tp->sacked_out) { 29061da177e4SLinus Torvalds printk(KERN_DEBUG "Leak s=%u %d\n", 29076687e988SArnaldo Carvalho de Melo tp->sacked_out, icsk->icsk_ca_state); 29081da177e4SLinus Torvalds tp->sacked_out = 0; 29091da177e4SLinus Torvalds } 29101da177e4SLinus Torvalds if (tp->retrans_out) { 29111da177e4SLinus Torvalds printk(KERN_DEBUG "Leak r=%u %d\n", 29126687e988SArnaldo Carvalho de Melo tp->retrans_out, icsk->icsk_ca_state); 29131da177e4SLinus Torvalds tp->retrans_out = 0; 29141da177e4SLinus Torvalds } 29151da177e4SLinus Torvalds } 29161da177e4SLinus Torvalds #endif 29177c46a03eSIlpo Järvinen return flag; 29181da177e4SLinus Torvalds } 29191da177e4SLinus Torvalds 29201da177e4SLinus Torvalds static void tcp_ack_probe(struct sock *sk) 29211da177e4SLinus Torvalds { 2922463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 2923463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 29241da177e4SLinus Torvalds 29251da177e4SLinus Torvalds /* Was it a usable window open? */ 29261da177e4SLinus Torvalds 292790840defSIlpo Järvinen if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { 2928463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 2929463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 29301da177e4SLinus Torvalds /* Socket must be waked up by subsequent tcp_data_snd_check(). 29311da177e4SLinus Torvalds * This function is not for random using! 29321da177e4SLinus Torvalds */ 29331da177e4SLinus Torvalds } else { 2934463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 29353f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 29363f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 29371da177e4SLinus Torvalds } 29381da177e4SLinus Torvalds } 29391da177e4SLinus Torvalds 29406687e988SArnaldo Carvalho de Melo static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 29411da177e4SLinus Torvalds { 29421da177e4SLinus Torvalds return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 29436687e988SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ca_state != TCP_CA_Open); 29441da177e4SLinus Torvalds } 29451da177e4SLinus Torvalds 29466687e988SArnaldo Carvalho de Melo static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 29471da177e4SLinus Torvalds { 29486687e988SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 29491da177e4SLinus Torvalds return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 29506687e988SArnaldo Carvalho de Melo !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); 29511da177e4SLinus Torvalds } 29521da177e4SLinus Torvalds 29531da177e4SLinus Torvalds /* Check that window update is acceptable. 29541da177e4SLinus Torvalds * The function assumes that snd_una<=ack<=snd_next. 29551da177e4SLinus Torvalds */ 2956056834d9SIlpo Järvinen static inline int tcp_may_update_window(const struct tcp_sock *tp, 2957056834d9SIlpo Järvinen const u32 ack, const u32 ack_seq, 2958056834d9SIlpo Järvinen const u32 nwin) 29591da177e4SLinus Torvalds { 29601da177e4SLinus Torvalds return (after(ack, tp->snd_una) || 29611da177e4SLinus Torvalds after(ack_seq, tp->snd_wl1) || 29621da177e4SLinus Torvalds (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd)); 29631da177e4SLinus Torvalds } 29641da177e4SLinus Torvalds 29651da177e4SLinus Torvalds /* Update our send window. 29661da177e4SLinus Torvalds * 29671da177e4SLinus Torvalds * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 29681da177e4SLinus Torvalds * and in FreeBSD. NetBSD's one is even worse.) is wrong. 29691da177e4SLinus Torvalds */ 29709e412ba7SIlpo Järvinen static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, 29719e412ba7SIlpo Järvinen u32 ack_seq) 29721da177e4SLinus Torvalds { 29739e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 29741da177e4SLinus Torvalds int flag = 0; 2975aa8223c7SArnaldo Carvalho de Melo u32 nwin = ntohs(tcp_hdr(skb)->window); 29761da177e4SLinus Torvalds 2977aa8223c7SArnaldo Carvalho de Melo if (likely(!tcp_hdr(skb)->syn)) 29781da177e4SLinus Torvalds nwin <<= tp->rx_opt.snd_wscale; 29791da177e4SLinus Torvalds 29801da177e4SLinus Torvalds if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 29811da177e4SLinus Torvalds flag |= FLAG_WIN_UPDATE; 29821da177e4SLinus Torvalds tcp_update_wl(tp, ack, ack_seq); 29831da177e4SLinus Torvalds 29841da177e4SLinus Torvalds if (tp->snd_wnd != nwin) { 29851da177e4SLinus Torvalds tp->snd_wnd = nwin; 29861da177e4SLinus Torvalds 29871da177e4SLinus Torvalds /* Note, it is the only place, where 29881da177e4SLinus Torvalds * fast path is recovered for sending TCP. 29891da177e4SLinus Torvalds */ 29902ad41065SHerbert Xu tp->pred_flags = 0; 29919e412ba7SIlpo Järvinen tcp_fast_path_check(sk); 29921da177e4SLinus Torvalds 29931da177e4SLinus Torvalds if (nwin > tp->max_window) { 29941da177e4SLinus Torvalds tp->max_window = nwin; 2995d83d8461SArnaldo Carvalho de Melo tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 29961da177e4SLinus Torvalds } 29971da177e4SLinus Torvalds } 29981da177e4SLinus Torvalds } 29991da177e4SLinus Torvalds 30001da177e4SLinus Torvalds tp->snd_una = ack; 30011da177e4SLinus Torvalds 30021da177e4SLinus Torvalds return flag; 30031da177e4SLinus Torvalds } 30041da177e4SLinus Torvalds 30059ead9a1dSIlpo Järvinen /* A very conservative spurious RTO response algorithm: reduce cwnd and 30069ead9a1dSIlpo Järvinen * continue in congestion avoidance. 30079ead9a1dSIlpo Järvinen */ 30089ead9a1dSIlpo Järvinen static void tcp_conservative_spur_to_response(struct tcp_sock *tp) 30099ead9a1dSIlpo Järvinen { 30109ead9a1dSIlpo Järvinen tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3011aa8b6a7aSIlpo Järvinen tp->snd_cwnd_cnt = 0; 301216e90681SIlpo Järvinen tp->bytes_acked = 0; 301346323655SIlpo Järvinen TCP_ECN_queue_cwr(tp); 30149ead9a1dSIlpo Järvinen tcp_moderate_cwnd(tp); 30159ead9a1dSIlpo Järvinen } 30169ead9a1dSIlpo Järvinen 30173cfe3baaSIlpo Järvinen /* A conservative spurious RTO response algorithm: reduce cwnd using 30183cfe3baaSIlpo Järvinen * rate halving and continue in congestion avoidance. 30193cfe3baaSIlpo Järvinen */ 30203cfe3baaSIlpo Järvinen static void tcp_ratehalving_spur_to_response(struct sock *sk) 30213cfe3baaSIlpo Järvinen { 30223cfe3baaSIlpo Järvinen tcp_enter_cwr(sk, 0); 30233cfe3baaSIlpo Järvinen } 30243cfe3baaSIlpo Järvinen 3025e317f6f6SIlpo Järvinen static void tcp_undo_spur_to_response(struct sock *sk, int flag) 30263cfe3baaSIlpo Järvinen { 3027e317f6f6SIlpo Järvinen if (flag & FLAG_ECE) 3028e317f6f6SIlpo Järvinen tcp_ratehalving_spur_to_response(sk); 3029e317f6f6SIlpo Järvinen else 30303cfe3baaSIlpo Järvinen tcp_undo_cwr(sk, 1); 30313cfe3baaSIlpo Järvinen } 30323cfe3baaSIlpo Järvinen 303330935cf4SIlpo Järvinen /* F-RTO spurious RTO detection algorithm (RFC4138) 303430935cf4SIlpo Järvinen * 30356408d206SIlpo Järvinen * F-RTO affects during two new ACKs following RTO (well, almost, see inline 30366408d206SIlpo Järvinen * comments). State (ACK number) is kept in frto_counter. When ACK advances 30376408d206SIlpo Järvinen * window (but not to or beyond highest sequence sent before RTO): 303830935cf4SIlpo Järvinen * On First ACK, send two new segments out. 303930935cf4SIlpo Järvinen * On Second ACK, RTO was likely spurious. Do spurious response (response 304030935cf4SIlpo Järvinen * algorithm is not part of the F-RTO detection algorithm 304130935cf4SIlpo Järvinen * given in RFC4138 but can be selected separately). 304230935cf4SIlpo Järvinen * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss 3043d551e454SIlpo Järvinen * and TCP falls back to conventional RTO recovery. F-RTO allows overriding 3044d551e454SIlpo Järvinen * of Nagle, this is done using frto_counter states 2 and 3, when a new data 3045d551e454SIlpo Järvinen * segment of any size sent during F-RTO, state 2 is upgraded to 3. 304630935cf4SIlpo Järvinen * 304730935cf4SIlpo Järvinen * Rationale: if the RTO was spurious, new ACKs should arrive from the 304830935cf4SIlpo Järvinen * original window even after we transmit two new data segments. 304930935cf4SIlpo Järvinen * 30504dc2665eSIlpo Järvinen * SACK version: 30514dc2665eSIlpo Järvinen * on first step, wait until first cumulative ACK arrives, then move to 30524dc2665eSIlpo Järvinen * the second step. In second step, the next ACK decides. 30534dc2665eSIlpo Järvinen * 305430935cf4SIlpo Järvinen * F-RTO is implemented (mainly) in four functions: 305530935cf4SIlpo Järvinen * - tcp_use_frto() is used to determine if TCP is can use F-RTO 305630935cf4SIlpo Järvinen * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is 305730935cf4SIlpo Järvinen * called when tcp_use_frto() showed green light 305830935cf4SIlpo Järvinen * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm 305930935cf4SIlpo Järvinen * - tcp_enter_frto_loss() is called if there is not enough evidence 306030935cf4SIlpo Järvinen * to prove that the RTO is indeed spurious. It transfers the control 306130935cf4SIlpo Järvinen * from F-RTO to the conventional RTO recovery 306230935cf4SIlpo Järvinen */ 30632e605294SIlpo Järvinen static int tcp_process_frto(struct sock *sk, int flag) 30641da177e4SLinus Torvalds { 30651da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30661da177e4SLinus Torvalds 3067005903bcSIlpo Järvinen tcp_verify_left_out(tp); 30681da177e4SLinus Torvalds 30697487c48cSIlpo Järvinen /* Duplicate the behavior from Loss state (fastretrans_alert) */ 30707487c48cSIlpo Järvinen if (flag & FLAG_DATA_ACKED) 30717487c48cSIlpo Järvinen inet_csk(sk)->icsk_retransmits = 0; 30727487c48cSIlpo Järvinen 3073009a2e3eSIlpo Järvinen if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || 3074009a2e3eSIlpo Järvinen ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) 3075009a2e3eSIlpo Järvinen tp->undo_marker = 0; 3076009a2e3eSIlpo Järvinen 307795c4922bSIlpo Järvinen if (!before(tp->snd_una, tp->frto_highmark)) { 3078d551e454SIlpo Järvinen tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 30797c9a4a5bSIlpo Järvinen return 1; 308095c4922bSIlpo Järvinen } 308195c4922bSIlpo Järvinen 3082e60402d0SIlpo Järvinen if (!IsSackFrto() || tcp_is_reno(tp)) { 30834dc2665eSIlpo Järvinen /* RFC4138 shortcoming in step 2; should also have case c): 30844dc2665eSIlpo Järvinen * ACK isn't duplicate nor advances window, e.g., opposite dir 30854dc2665eSIlpo Järvinen * data, winupdate 30866408d206SIlpo Järvinen */ 30872e605294SIlpo Järvinen if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 30887c9a4a5bSIlpo Järvinen return 1; 30896408d206SIlpo Järvinen 309095c4922bSIlpo Järvinen if (!(flag & FLAG_DATA_ACKED)) { 30914dc2665eSIlpo Järvinen tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 30924dc2665eSIlpo Järvinen flag); 30937c9a4a5bSIlpo Järvinen return 1; 30941da177e4SLinus Torvalds } 30954dc2665eSIlpo Järvinen } else { 30964dc2665eSIlpo Järvinen if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 30974dc2665eSIlpo Järvinen /* Prevent sending of new data. */ 30984dc2665eSIlpo Järvinen tp->snd_cwnd = min(tp->snd_cwnd, 30994dc2665eSIlpo Järvinen tcp_packets_in_flight(tp)); 31004dc2665eSIlpo Järvinen return 1; 31014dc2665eSIlpo Järvinen } 31024dc2665eSIlpo Järvinen 3103d551e454SIlpo Järvinen if ((tp->frto_counter >= 2) && 31044dc2665eSIlpo Järvinen (!(flag & FLAG_FORWARD_PROGRESS) || 3105056834d9SIlpo Järvinen ((flag & FLAG_DATA_SACKED) && 3106056834d9SIlpo Järvinen !(flag & FLAG_ONLY_ORIG_SACKED)))) { 31074dc2665eSIlpo Järvinen /* RFC4138 shortcoming (see comment above) */ 3108056834d9SIlpo Järvinen if (!(flag & FLAG_FORWARD_PROGRESS) && 3109056834d9SIlpo Järvinen (flag & FLAG_NOT_DUP)) 31104dc2665eSIlpo Järvinen return 1; 31114dc2665eSIlpo Järvinen 31124dc2665eSIlpo Järvinen tcp_enter_frto_loss(sk, 3, flag); 31134dc2665eSIlpo Järvinen return 1; 31144dc2665eSIlpo Järvinen } 31154dc2665eSIlpo Järvinen } 31161da177e4SLinus Torvalds 31171da177e4SLinus Torvalds if (tp->frto_counter == 1) { 31183e6f049eSIlpo Järvinen /* tcp_may_send_now needs to see updated state */ 31191da177e4SLinus Torvalds tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 312094d0ea77SIlpo Järvinen tp->frto_counter = 2; 31213e6f049eSIlpo Järvinen 31223e6f049eSIlpo Järvinen if (!tcp_may_send_now(sk)) 31233e6f049eSIlpo Järvinen tcp_enter_frto_loss(sk, 2, flag); 31243e6f049eSIlpo Järvinen 31257c9a4a5bSIlpo Järvinen return 1; 3126d551e454SIlpo Järvinen } else { 31273cfe3baaSIlpo Järvinen switch (sysctl_tcp_frto_response) { 31283cfe3baaSIlpo Järvinen case 2: 3129e317f6f6SIlpo Järvinen tcp_undo_spur_to_response(sk, flag); 31303cfe3baaSIlpo Järvinen break; 31313cfe3baaSIlpo Järvinen case 1: 31329ead9a1dSIlpo Järvinen tcp_conservative_spur_to_response(tp); 31333cfe3baaSIlpo Järvinen break; 31343cfe3baaSIlpo Järvinen default: 31353cfe3baaSIlpo Järvinen tcp_ratehalving_spur_to_response(sk); 31363cfe3baaSIlpo Järvinen break; 31373ff50b79SStephen Hemminger } 313894d0ea77SIlpo Järvinen tp->frto_counter = 0; 3139009a2e3eSIlpo Järvinen tp->undo_marker = 0; 3140912d8f0bSIlpo Järvinen NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); 31411da177e4SLinus Torvalds } 31427c9a4a5bSIlpo Järvinen return 0; 31431da177e4SLinus Torvalds } 31441da177e4SLinus Torvalds 31451da177e4SLinus Torvalds /* This routine deals with incoming acks, but not outgoing ones. */ 31461da177e4SLinus Torvalds static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 31471da177e4SLinus Torvalds { 31486687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 31491da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31501da177e4SLinus Torvalds u32 prior_snd_una = tp->snd_una; 31511da177e4SLinus Torvalds u32 ack_seq = TCP_SKB_CB(skb)->seq; 31521da177e4SLinus Torvalds u32 ack = TCP_SKB_CB(skb)->ack_seq; 31531da177e4SLinus Torvalds u32 prior_in_flight; 3154c7caf8d3SIlpo Järvinen u32 prior_fackets; 31551da177e4SLinus Torvalds int prior_packets; 31567c9a4a5bSIlpo Järvinen int frto_cwnd = 0; 31571da177e4SLinus Torvalds 31581da177e4SLinus Torvalds /* If the ack is newer than sent or older than previous acks 31591da177e4SLinus Torvalds * then we can probably ignore it. 31601da177e4SLinus Torvalds */ 31611da177e4SLinus Torvalds if (after(ack, tp->snd_nxt)) 31621da177e4SLinus Torvalds goto uninteresting_ack; 31631da177e4SLinus Torvalds 31641da177e4SLinus Torvalds if (before(ack, prior_snd_una)) 31651da177e4SLinus Torvalds goto old_ack; 31661da177e4SLinus Torvalds 31672e605294SIlpo Järvinen if (after(ack, prior_snd_una)) 31682e605294SIlpo Järvinen flag |= FLAG_SND_UNA_ADVANCED; 31692e605294SIlpo Järvinen 31703fdf3f0cSDaikichi Osuga if (sysctl_tcp_abc) { 31713fdf3f0cSDaikichi Osuga if (icsk->icsk_ca_state < TCP_CA_CWR) 31729772efb9SStephen Hemminger tp->bytes_acked += ack - prior_snd_una; 31733fdf3f0cSDaikichi Osuga else if (icsk->icsk_ca_state == TCP_CA_Loss) 31743fdf3f0cSDaikichi Osuga /* we assume just one segment left network */ 3175056834d9SIlpo Järvinen tp->bytes_acked += min(ack - prior_snd_una, 3176056834d9SIlpo Järvinen tp->mss_cache); 31773fdf3f0cSDaikichi Osuga } 31789772efb9SStephen Hemminger 3179c7caf8d3SIlpo Järvinen prior_fackets = tp->fackets_out; 318052d34081SIlpo Järvinen prior_in_flight = tcp_packets_in_flight(tp); 3181c7caf8d3SIlpo Järvinen 31821da177e4SLinus Torvalds if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 31831da177e4SLinus Torvalds /* Window is constant, pure forward advance. 31841da177e4SLinus Torvalds * No more checks are required. 31851da177e4SLinus Torvalds * Note, we use the fact that SND.UNA>=SND.WL2. 31861da177e4SLinus Torvalds */ 31871da177e4SLinus Torvalds tcp_update_wl(tp, ack, ack_seq); 31881da177e4SLinus Torvalds tp->snd_una = ack; 31891da177e4SLinus Torvalds flag |= FLAG_WIN_UPDATE; 31901da177e4SLinus Torvalds 31916687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3192317a76f9SStephen Hemminger 31931da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 31941da177e4SLinus Torvalds } else { 31951da177e4SLinus Torvalds if (ack_seq != TCP_SKB_CB(skb)->end_seq) 31961da177e4SLinus Torvalds flag |= FLAG_DATA; 31971da177e4SLinus Torvalds else 31981da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 31991da177e4SLinus Torvalds 32009e412ba7SIlpo Järvinen flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 32011da177e4SLinus Torvalds 32021da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked) 32031da177e4SLinus Torvalds flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 32041da177e4SLinus Torvalds 3205aa8223c7SArnaldo Carvalho de Melo if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 32061da177e4SLinus Torvalds flag |= FLAG_ECE; 32071da177e4SLinus Torvalds 32086687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 32091da177e4SLinus Torvalds } 32101da177e4SLinus Torvalds 32111da177e4SLinus Torvalds /* We passed data and got it acked, remove any soft error 32121da177e4SLinus Torvalds * log. Something worked... 32131da177e4SLinus Torvalds */ 32141da177e4SLinus Torvalds sk->sk_err_soft = 0; 32151da177e4SLinus Torvalds tp->rcv_tstamp = tcp_time_stamp; 32161da177e4SLinus Torvalds prior_packets = tp->packets_out; 32171da177e4SLinus Torvalds if (!prior_packets) 32181da177e4SLinus Torvalds goto no_queue; 32191da177e4SLinus Torvalds 32201da177e4SLinus Torvalds /* See if we can take anything off of the retransmit queue. */ 3221c776ee01SIlpo Järvinen flag |= tcp_clean_rtx_queue(sk, prior_fackets); 32221da177e4SLinus Torvalds 3223e1cd8f78SIlpo Järvinen if (tp->frto_counter) 3224e1cd8f78SIlpo Järvinen frto_cwnd = tcp_process_frto(sk, flag); 32253de96471SIlpo Järvinen /* Guarantee sacktag reordering detection against wrap-arounds */ 32263de96471SIlpo Järvinen if (before(tp->frto_highmark, tp->snd_una)) 32273de96471SIlpo Järvinen tp->frto_highmark = 0; 32281da177e4SLinus Torvalds 32296687e988SArnaldo Carvalho de Melo if (tcp_ack_is_dubious(sk, flag)) { 3230caa20d9aSStephen Hemminger /* Advance CWND, if state allows this. */ 32317c9a4a5bSIlpo Järvinen if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 32327c9a4a5bSIlpo Järvinen tcp_may_raise_cwnd(sk, flag)) 3233c3a05c60SIlpo Järvinen tcp_cong_avoid(sk, ack, prior_in_flight); 3234056834d9SIlpo Järvinen tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, 3235056834d9SIlpo Järvinen flag); 32361da177e4SLinus Torvalds } else { 32377c9a4a5bSIlpo Järvinen if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3238c3a05c60SIlpo Järvinen tcp_cong_avoid(sk, ack, prior_in_flight); 32391da177e4SLinus Torvalds } 32401da177e4SLinus Torvalds 32411da177e4SLinus Torvalds if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 32421da177e4SLinus Torvalds dst_confirm(sk->sk_dst_cache); 32431da177e4SLinus Torvalds 32441da177e4SLinus Torvalds return 1; 32451da177e4SLinus Torvalds 32461da177e4SLinus Torvalds no_queue: 32476687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 32481da177e4SLinus Torvalds 32491da177e4SLinus Torvalds /* If this ack opens up a zero window, clear backoff. It was 32501da177e4SLinus Torvalds * being used to time the probes, and is probably far higher than 32511da177e4SLinus Torvalds * it needs to be for normal retransmission. 32521da177e4SLinus Torvalds */ 3253fe067e8aSDavid S. Miller if (tcp_send_head(sk)) 32541da177e4SLinus Torvalds tcp_ack_probe(sk); 32551da177e4SLinus Torvalds return 1; 32561da177e4SLinus Torvalds 32571da177e4SLinus Torvalds old_ack: 32581da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked) 32591da177e4SLinus Torvalds tcp_sacktag_write_queue(sk, skb, prior_snd_una); 32601da177e4SLinus Torvalds 32611da177e4SLinus Torvalds uninteresting_ack: 32621da177e4SLinus Torvalds SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 32631da177e4SLinus Torvalds return 0; 32641da177e4SLinus Torvalds } 32651da177e4SLinus Torvalds 32661da177e4SLinus Torvalds /* Look for tcp options. Normally only called on SYN and SYNACK packets. 32671da177e4SLinus Torvalds * But, this can also be called on packets in the established flow when 32681da177e4SLinus Torvalds * the fast version below fails. 32691da177e4SLinus Torvalds */ 3270056834d9SIlpo Järvinen void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3271056834d9SIlpo Järvinen int estab) 32721da177e4SLinus Torvalds { 32731da177e4SLinus Torvalds unsigned char *ptr; 3274aa8223c7SArnaldo Carvalho de Melo struct tcphdr *th = tcp_hdr(skb); 32751da177e4SLinus Torvalds int length = (th->doff * 4) - sizeof(struct tcphdr); 32761da177e4SLinus Torvalds 32771da177e4SLinus Torvalds ptr = (unsigned char *)(th + 1); 32781da177e4SLinus Torvalds opt_rx->saw_tstamp = 0; 32791da177e4SLinus Torvalds 32801da177e4SLinus Torvalds while (length > 0) { 32811da177e4SLinus Torvalds int opcode = *ptr++; 32821da177e4SLinus Torvalds int opsize; 32831da177e4SLinus Torvalds 32841da177e4SLinus Torvalds switch (opcode) { 32851da177e4SLinus Torvalds case TCPOPT_EOL: 32861da177e4SLinus Torvalds return; 32871da177e4SLinus Torvalds case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 32881da177e4SLinus Torvalds length--; 32891da177e4SLinus Torvalds continue; 32901da177e4SLinus Torvalds default: 32911da177e4SLinus Torvalds opsize = *ptr++; 32921da177e4SLinus Torvalds if (opsize < 2) /* "silly options" */ 32931da177e4SLinus Torvalds return; 32941da177e4SLinus Torvalds if (opsize > length) 32951da177e4SLinus Torvalds return; /* don't parse partial options */ 32961da177e4SLinus Torvalds switch (opcode) { 32971da177e4SLinus Torvalds case TCPOPT_MSS: 32981da177e4SLinus Torvalds if (opsize == TCPOLEN_MSS && th->syn && !estab) { 32994f3608b7SAl Viro u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); 33001da177e4SLinus Torvalds if (in_mss) { 3301f038ac8fSIlpo Järvinen if (opt_rx->user_mss && 3302f038ac8fSIlpo Järvinen opt_rx->user_mss < in_mss) 33031da177e4SLinus Torvalds in_mss = opt_rx->user_mss; 33041da177e4SLinus Torvalds opt_rx->mss_clamp = in_mss; 33051da177e4SLinus Torvalds } 33061da177e4SLinus Torvalds } 33071da177e4SLinus Torvalds break; 33081da177e4SLinus Torvalds case TCPOPT_WINDOW: 3309f038ac8fSIlpo Järvinen if (opsize == TCPOLEN_WINDOW && th->syn && 3310f038ac8fSIlpo Järvinen !estab && sysctl_tcp_window_scaling) { 33111da177e4SLinus Torvalds __u8 snd_wscale = *(__u8 *)ptr; 33121da177e4SLinus Torvalds opt_rx->wscale_ok = 1; 33131da177e4SLinus Torvalds if (snd_wscale > 14) { 33141da177e4SLinus Torvalds if (net_ratelimit()) 33151da177e4SLinus Torvalds printk(KERN_INFO "tcp_parse_options: Illegal window " 33161da177e4SLinus Torvalds "scaling value %d >14 received.\n", 33171da177e4SLinus Torvalds snd_wscale); 33181da177e4SLinus Torvalds snd_wscale = 14; 33191da177e4SLinus Torvalds } 33201da177e4SLinus Torvalds opt_rx->snd_wscale = snd_wscale; 33211da177e4SLinus Torvalds } 33221da177e4SLinus Torvalds break; 33231da177e4SLinus Torvalds case TCPOPT_TIMESTAMP: 3324f038ac8fSIlpo Järvinen if ((opsize == TCPOLEN_TIMESTAMP) && 3325f038ac8fSIlpo Järvinen ((estab && opt_rx->tstamp_ok) || 3326f038ac8fSIlpo Järvinen (!estab && sysctl_tcp_timestamps))) { 33271da177e4SLinus Torvalds opt_rx->saw_tstamp = 1; 33284f3608b7SAl Viro opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr)); 33294f3608b7SAl Viro opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4))); 33301da177e4SLinus Torvalds } 33311da177e4SLinus Torvalds break; 33321da177e4SLinus Torvalds case TCPOPT_SACK_PERM: 3333f038ac8fSIlpo Järvinen if (opsize == TCPOLEN_SACK_PERM && th->syn && 3334f038ac8fSIlpo Järvinen !estab && sysctl_tcp_sack) { 33351da177e4SLinus Torvalds opt_rx->sack_ok = 1; 33361da177e4SLinus Torvalds tcp_sack_reset(opt_rx); 33371da177e4SLinus Torvalds } 33381da177e4SLinus Torvalds break; 33391da177e4SLinus Torvalds 33401da177e4SLinus Torvalds case TCPOPT_SACK: 33411da177e4SLinus Torvalds if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 33421da177e4SLinus Torvalds !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 33431da177e4SLinus Torvalds opt_rx->sack_ok) { 33441da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 33451da177e4SLinus Torvalds } 3346d7ea5b91SIlpo Järvinen break; 3347cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3348cfb6eeb4SYOSHIFUJI Hideaki case TCPOPT_MD5SIG: 3349cfb6eeb4SYOSHIFUJI Hideaki /* 3350cfb6eeb4SYOSHIFUJI Hideaki * The MD5 Hash has already been 3351cfb6eeb4SYOSHIFUJI Hideaki * checked (see tcp_v{4,6}_do_rcv()). 3352cfb6eeb4SYOSHIFUJI Hideaki */ 3353cfb6eeb4SYOSHIFUJI Hideaki break; 3354cfb6eeb4SYOSHIFUJI Hideaki #endif 33553ff50b79SStephen Hemminger } 33563ff50b79SStephen Hemminger 33571da177e4SLinus Torvalds ptr += opsize-2; 33581da177e4SLinus Torvalds length -= opsize; 33593ff50b79SStephen Hemminger } 33601da177e4SLinus Torvalds } 33611da177e4SLinus Torvalds } 33621da177e4SLinus Torvalds 33631da177e4SLinus Torvalds /* Fast parse options. This hopes to only see timestamps. 33641da177e4SLinus Torvalds * If it is wrong it falls back on tcp_parse_options(). 33651da177e4SLinus Torvalds */ 336640efc6faSStephen Hemminger static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 33671da177e4SLinus Torvalds struct tcp_sock *tp) 33681da177e4SLinus Torvalds { 33691da177e4SLinus Torvalds if (th->doff == sizeof(struct tcphdr) >> 2) { 33701da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 0; 33711da177e4SLinus Torvalds return 0; 33721da177e4SLinus Torvalds } else if (tp->rx_opt.tstamp_ok && 33731da177e4SLinus Torvalds th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 33744f3608b7SAl Viro __be32 *ptr = (__be32 *)(th + 1); 33754f3608b7SAl Viro if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 33761da177e4SLinus Torvalds | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 33771da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 1; 33781da177e4SLinus Torvalds ++ptr; 33791da177e4SLinus Torvalds tp->rx_opt.rcv_tsval = ntohl(*ptr); 33801da177e4SLinus Torvalds ++ptr; 33811da177e4SLinus Torvalds tp->rx_opt.rcv_tsecr = ntohl(*ptr); 33821da177e4SLinus Torvalds return 1; 33831da177e4SLinus Torvalds } 33841da177e4SLinus Torvalds } 33851da177e4SLinus Torvalds tcp_parse_options(skb, &tp->rx_opt, 1); 33861da177e4SLinus Torvalds return 1; 33871da177e4SLinus Torvalds } 33881da177e4SLinus Torvalds 33891da177e4SLinus Torvalds static inline void tcp_store_ts_recent(struct tcp_sock *tp) 33901da177e4SLinus Torvalds { 33911da177e4SLinus Torvalds tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 33929d729f72SJames Morris tp->rx_opt.ts_recent_stamp = get_seconds(); 33931da177e4SLinus Torvalds } 33941da177e4SLinus Torvalds 33951da177e4SLinus Torvalds static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 33961da177e4SLinus Torvalds { 33971da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 33981da177e4SLinus Torvalds /* PAWS bug workaround wrt. ACK frames, the PAWS discard 33991da177e4SLinus Torvalds * extra check below makes sure this can only happen 34001da177e4SLinus Torvalds * for pure ACK frames. -DaveM 34011da177e4SLinus Torvalds * 34021da177e4SLinus Torvalds * Not only, also it occurs for expired timestamps. 34031da177e4SLinus Torvalds */ 34041da177e4SLinus Torvalds 34051da177e4SLinus Torvalds if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || 34069d729f72SJames Morris get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) 34071da177e4SLinus Torvalds tcp_store_ts_recent(tp); 34081da177e4SLinus Torvalds } 34091da177e4SLinus Torvalds } 34101da177e4SLinus Torvalds 34111da177e4SLinus Torvalds /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 34121da177e4SLinus Torvalds * 34131da177e4SLinus Torvalds * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 34141da177e4SLinus Torvalds * it can pass through stack. So, the following predicate verifies that 34151da177e4SLinus Torvalds * this segment is not used for anything but congestion avoidance or 34161da177e4SLinus Torvalds * fast retransmit. Moreover, we even are able to eliminate most of such 34171da177e4SLinus Torvalds * second order effects, if we apply some small "replay" window (~RTO) 34181da177e4SLinus Torvalds * to timestamp space. 34191da177e4SLinus Torvalds * 34201da177e4SLinus Torvalds * All these measures still do not guarantee that we reject wrapped ACKs 34211da177e4SLinus Torvalds * on networks with high bandwidth, when sequence space is recycled fastly, 34221da177e4SLinus Torvalds * but it guarantees that such events will be very rare and do not affect 34231da177e4SLinus Torvalds * connection seriously. This doesn't look nice, but alas, PAWS is really 34241da177e4SLinus Torvalds * buggy extension. 34251da177e4SLinus Torvalds * 34261da177e4SLinus Torvalds * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 34271da177e4SLinus Torvalds * states that events when retransmit arrives after original data are rare. 34281da177e4SLinus Torvalds * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 34291da177e4SLinus Torvalds * the biggest problem on large power networks even with minor reordering. 34301da177e4SLinus Torvalds * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 34311da177e4SLinus Torvalds * up to bandwidth of 18Gigabit/sec. 8) ] 34321da177e4SLinus Torvalds */ 34331da177e4SLinus Torvalds 3434463c84b9SArnaldo Carvalho de Melo static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 34351da177e4SLinus Torvalds { 3436463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 3437aa8223c7SArnaldo Carvalho de Melo struct tcphdr *th = tcp_hdr(skb); 34381da177e4SLinus Torvalds u32 seq = TCP_SKB_CB(skb)->seq; 34391da177e4SLinus Torvalds u32 ack = TCP_SKB_CB(skb)->ack_seq; 34401da177e4SLinus Torvalds 34411da177e4SLinus Torvalds return (/* 1. Pure ACK with correct sequence number. */ 34421da177e4SLinus Torvalds (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 34431da177e4SLinus Torvalds 34441da177e4SLinus Torvalds /* 2. ... and duplicate ACK. */ 34451da177e4SLinus Torvalds ack == tp->snd_una && 34461da177e4SLinus Torvalds 34471da177e4SLinus Torvalds /* 3. ... and does not update window. */ 34481da177e4SLinus Torvalds !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 34491da177e4SLinus Torvalds 34501da177e4SLinus Torvalds /* 4. ... and sits in replay window. */ 3451463c84b9SArnaldo Carvalho de Melo (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 34521da177e4SLinus Torvalds } 34531da177e4SLinus Torvalds 3454056834d9SIlpo Järvinen static inline int tcp_paws_discard(const struct sock *sk, 3455056834d9SIlpo Järvinen const struct sk_buff *skb) 34561da177e4SLinus Torvalds { 3457463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 34581da177e4SLinus Torvalds return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 34599d729f72SJames Morris get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 3460463c84b9SArnaldo Carvalho de Melo !tcp_disordered_ack(sk, skb)); 34611da177e4SLinus Torvalds } 34621da177e4SLinus Torvalds 34631da177e4SLinus Torvalds /* Check segment sequence number for validity. 34641da177e4SLinus Torvalds * 34651da177e4SLinus Torvalds * Segment controls are considered valid, if the segment 34661da177e4SLinus Torvalds * fits to the window after truncation to the window. Acceptability 34671da177e4SLinus Torvalds * of data (and SYN, FIN, of course) is checked separately. 34681da177e4SLinus Torvalds * See tcp_data_queue(), for example. 34691da177e4SLinus Torvalds * 34701da177e4SLinus Torvalds * Also, controls (RST is main one) are accepted using RCV.WUP instead 34711da177e4SLinus Torvalds * of RCV.NXT. Peer still did not advance his SND.UNA when we 34721da177e4SLinus Torvalds * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 34731da177e4SLinus Torvalds * (borrowed from freebsd) 34741da177e4SLinus Torvalds */ 34751da177e4SLinus Torvalds 34761da177e4SLinus Torvalds static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) 34771da177e4SLinus Torvalds { 34781da177e4SLinus Torvalds return !before(end_seq, tp->rcv_wup) && 34791da177e4SLinus Torvalds !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 34801da177e4SLinus Torvalds } 34811da177e4SLinus Torvalds 34821da177e4SLinus Torvalds /* When we get a reset we do this. */ 34831da177e4SLinus Torvalds static void tcp_reset(struct sock *sk) 34841da177e4SLinus Torvalds { 34851da177e4SLinus Torvalds /* We want the right error as BSD sees it (and indeed as we do). */ 34861da177e4SLinus Torvalds switch (sk->sk_state) { 34871da177e4SLinus Torvalds case TCP_SYN_SENT: 34881da177e4SLinus Torvalds sk->sk_err = ECONNREFUSED; 34891da177e4SLinus Torvalds break; 34901da177e4SLinus Torvalds case TCP_CLOSE_WAIT: 34911da177e4SLinus Torvalds sk->sk_err = EPIPE; 34921da177e4SLinus Torvalds break; 34931da177e4SLinus Torvalds case TCP_CLOSE: 34941da177e4SLinus Torvalds return; 34951da177e4SLinus Torvalds default: 34961da177e4SLinus Torvalds sk->sk_err = ECONNRESET; 34971da177e4SLinus Torvalds } 34981da177e4SLinus Torvalds 34991da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) 35001da177e4SLinus Torvalds sk->sk_error_report(sk); 35011da177e4SLinus Torvalds 35021da177e4SLinus Torvalds tcp_done(sk); 35031da177e4SLinus Torvalds } 35041da177e4SLinus Torvalds 35051da177e4SLinus Torvalds /* 35061da177e4SLinus Torvalds * Process the FIN bit. This now behaves as it is supposed to work 35071da177e4SLinus Torvalds * and the FIN takes effect when it is validly part of sequence 35081da177e4SLinus Torvalds * space. Not before when we get holes. 35091da177e4SLinus Torvalds * 35101da177e4SLinus Torvalds * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 35111da177e4SLinus Torvalds * (and thence onto LAST-ACK and finally, CLOSE, we never enter 35121da177e4SLinus Torvalds * TIME-WAIT) 35131da177e4SLinus Torvalds * 35141da177e4SLinus Torvalds * If we are in FINWAIT-1, a received FIN indicates simultaneous 35151da177e4SLinus Torvalds * close and we go into CLOSING (and later onto TIME-WAIT) 35161da177e4SLinus Torvalds * 35171da177e4SLinus Torvalds * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 35181da177e4SLinus Torvalds */ 35191da177e4SLinus Torvalds static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) 35201da177e4SLinus Torvalds { 35211da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 35221da177e4SLinus Torvalds 3523463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 35241da177e4SLinus Torvalds 35251da177e4SLinus Torvalds sk->sk_shutdown |= RCV_SHUTDOWN; 35261da177e4SLinus Torvalds sock_set_flag(sk, SOCK_DONE); 35271da177e4SLinus Torvalds 35281da177e4SLinus Torvalds switch (sk->sk_state) { 35291da177e4SLinus Torvalds case TCP_SYN_RECV: 35301da177e4SLinus Torvalds case TCP_ESTABLISHED: 35311da177e4SLinus Torvalds /* Move to CLOSE_WAIT */ 35321da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE_WAIT); 3533463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.pingpong = 1; 35341da177e4SLinus Torvalds break; 35351da177e4SLinus Torvalds 35361da177e4SLinus Torvalds case TCP_CLOSE_WAIT: 35371da177e4SLinus Torvalds case TCP_CLOSING: 35381da177e4SLinus Torvalds /* Received a retransmission of the FIN, do 35391da177e4SLinus Torvalds * nothing. 35401da177e4SLinus Torvalds */ 35411da177e4SLinus Torvalds break; 35421da177e4SLinus Torvalds case TCP_LAST_ACK: 35431da177e4SLinus Torvalds /* RFC793: Remain in the LAST-ACK state. */ 35441da177e4SLinus Torvalds break; 35451da177e4SLinus Torvalds 35461da177e4SLinus Torvalds case TCP_FIN_WAIT1: 35471da177e4SLinus Torvalds /* This case occurs when a simultaneous close 35481da177e4SLinus Torvalds * happens, we must ack the received FIN and 35491da177e4SLinus Torvalds * enter the CLOSING state. 35501da177e4SLinus Torvalds */ 35511da177e4SLinus Torvalds tcp_send_ack(sk); 35521da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSING); 35531da177e4SLinus Torvalds break; 35541da177e4SLinus Torvalds case TCP_FIN_WAIT2: 35551da177e4SLinus Torvalds /* Received a FIN -- send ACK and enter TIME_WAIT. */ 35561da177e4SLinus Torvalds tcp_send_ack(sk); 35571da177e4SLinus Torvalds tcp_time_wait(sk, TCP_TIME_WAIT, 0); 35581da177e4SLinus Torvalds break; 35591da177e4SLinus Torvalds default: 35601da177e4SLinus Torvalds /* Only TCP_LISTEN and TCP_CLOSE are left, in these 35611da177e4SLinus Torvalds * cases we should never reach this piece of code. 35621da177e4SLinus Torvalds */ 35631da177e4SLinus Torvalds printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", 35641da177e4SLinus Torvalds __FUNCTION__, sk->sk_state); 35651da177e4SLinus Torvalds break; 35663ff50b79SStephen Hemminger } 35671da177e4SLinus Torvalds 35681da177e4SLinus Torvalds /* It _is_ possible, that we have something out-of-order _after_ FIN. 35691da177e4SLinus Torvalds * Probably, we should reset in this case. For now drop them. 35701da177e4SLinus Torvalds */ 35711da177e4SLinus Torvalds __skb_queue_purge(&tp->out_of_order_queue); 3572e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 35731da177e4SLinus Torvalds tcp_sack_reset(&tp->rx_opt); 35743ab224beSHideo Aoki sk_mem_reclaim(sk); 35751da177e4SLinus Torvalds 35761da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) { 35771da177e4SLinus Torvalds sk->sk_state_change(sk); 35781da177e4SLinus Torvalds 35791da177e4SLinus Torvalds /* Do not send POLL_HUP for half duplex close. */ 35801da177e4SLinus Torvalds if (sk->sk_shutdown == SHUTDOWN_MASK || 35811da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE) 35828d8ad9d7SPavel Emelyanov sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 35831da177e4SLinus Torvalds else 35848d8ad9d7SPavel Emelyanov sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 35851da177e4SLinus Torvalds } 35861da177e4SLinus Torvalds } 35871da177e4SLinus Torvalds 3588056834d9SIlpo Järvinen static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 3589056834d9SIlpo Järvinen u32 end_seq) 35901da177e4SLinus Torvalds { 35911da177e4SLinus Torvalds if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 35921da177e4SLinus Torvalds if (before(seq, sp->start_seq)) 35931da177e4SLinus Torvalds sp->start_seq = seq; 35941da177e4SLinus Torvalds if (after(end_seq, sp->end_seq)) 35951da177e4SLinus Torvalds sp->end_seq = end_seq; 35961da177e4SLinus Torvalds return 1; 35971da177e4SLinus Torvalds } 35981da177e4SLinus Torvalds return 0; 35991da177e4SLinus Torvalds } 36001da177e4SLinus Torvalds 360140efc6faSStephen Hemminger static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) 36021da177e4SLinus Torvalds { 3603e60402d0SIlpo Järvinen if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 36041da177e4SLinus Torvalds if (before(seq, tp->rcv_nxt)) 36051da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); 36061da177e4SLinus Torvalds else 36071da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); 36081da177e4SLinus Torvalds 36091da177e4SLinus Torvalds tp->rx_opt.dsack = 1; 36101da177e4SLinus Torvalds tp->duplicate_sack[0].start_seq = seq; 36111da177e4SLinus Torvalds tp->duplicate_sack[0].end_seq = end_seq; 3612056834d9SIlpo Järvinen tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 3613056834d9SIlpo Järvinen 4 - tp->rx_opt.tstamp_ok); 36141da177e4SLinus Torvalds } 36151da177e4SLinus Torvalds } 36161da177e4SLinus Torvalds 361740efc6faSStephen Hemminger static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) 36181da177e4SLinus Torvalds { 36191da177e4SLinus Torvalds if (!tp->rx_opt.dsack) 36201da177e4SLinus Torvalds tcp_dsack_set(tp, seq, end_seq); 36211da177e4SLinus Torvalds else 36221da177e4SLinus Torvalds tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 36231da177e4SLinus Torvalds } 36241da177e4SLinus Torvalds 36251da177e4SLinus Torvalds static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 36261da177e4SLinus Torvalds { 36271da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 36281da177e4SLinus Torvalds 36291da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 36301da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 36311da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 3632463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 36331da177e4SLinus Torvalds 3634e60402d0SIlpo Järvinen if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 36351da177e4SLinus Torvalds u32 end_seq = TCP_SKB_CB(skb)->end_seq; 36361da177e4SLinus Torvalds 36371da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 36381da177e4SLinus Torvalds end_seq = tp->rcv_nxt; 36391da177e4SLinus Torvalds tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq); 36401da177e4SLinus Torvalds } 36411da177e4SLinus Torvalds } 36421da177e4SLinus Torvalds 36431da177e4SLinus Torvalds tcp_send_ack(sk); 36441da177e4SLinus Torvalds } 36451da177e4SLinus Torvalds 36461da177e4SLinus Torvalds /* These routines update the SACK block as out-of-order packets arrive or 36471da177e4SLinus Torvalds * in-order packets close up the sequence space. 36481da177e4SLinus Torvalds */ 36491da177e4SLinus Torvalds static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 36501da177e4SLinus Torvalds { 36511da177e4SLinus Torvalds int this_sack; 36521da177e4SLinus Torvalds struct tcp_sack_block *sp = &tp->selective_acks[0]; 36531da177e4SLinus Torvalds struct tcp_sack_block *swalk = sp + 1; 36541da177e4SLinus Torvalds 36551da177e4SLinus Torvalds /* See if the recent change to the first SACK eats into 36561da177e4SLinus Torvalds * or hits the sequence space of other SACK blocks, if so coalesce. 36571da177e4SLinus Torvalds */ 36581da177e4SLinus Torvalds for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 36591da177e4SLinus Torvalds if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 36601da177e4SLinus Torvalds int i; 36611da177e4SLinus Torvalds 36621da177e4SLinus Torvalds /* Zap SWALK, by moving every further SACK up by one slot. 36631da177e4SLinus Torvalds * Decrease num_sacks. 36641da177e4SLinus Torvalds */ 36651da177e4SLinus Torvalds tp->rx_opt.num_sacks--; 3666056834d9SIlpo Järvinen tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 3667056834d9SIlpo Järvinen tp->rx_opt.dsack, 3668056834d9SIlpo Järvinen 4 - tp->rx_opt.tstamp_ok); 36691da177e4SLinus Torvalds for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 36701da177e4SLinus Torvalds sp[i] = sp[i + 1]; 36711da177e4SLinus Torvalds continue; 36721da177e4SLinus Torvalds } 36731da177e4SLinus Torvalds this_sack++, swalk++; 36741da177e4SLinus Torvalds } 36751da177e4SLinus Torvalds } 36761da177e4SLinus Torvalds 3677056834d9SIlpo Järvinen static inline void tcp_sack_swap(struct tcp_sack_block *sack1, 3678056834d9SIlpo Järvinen struct tcp_sack_block *sack2) 36791da177e4SLinus Torvalds { 36801da177e4SLinus Torvalds __u32 tmp; 36811da177e4SLinus Torvalds 36821da177e4SLinus Torvalds tmp = sack1->start_seq; 36831da177e4SLinus Torvalds sack1->start_seq = sack2->start_seq; 36841da177e4SLinus Torvalds sack2->start_seq = tmp; 36851da177e4SLinus Torvalds 36861da177e4SLinus Torvalds tmp = sack1->end_seq; 36871da177e4SLinus Torvalds sack1->end_seq = sack2->end_seq; 36881da177e4SLinus Torvalds sack2->end_seq = tmp; 36891da177e4SLinus Torvalds } 36901da177e4SLinus Torvalds 36911da177e4SLinus Torvalds static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 36921da177e4SLinus Torvalds { 36931da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 36941da177e4SLinus Torvalds struct tcp_sack_block *sp = &tp->selective_acks[0]; 36951da177e4SLinus Torvalds int cur_sacks = tp->rx_opt.num_sacks; 36961da177e4SLinus Torvalds int this_sack; 36971da177e4SLinus Torvalds 36981da177e4SLinus Torvalds if (!cur_sacks) 36991da177e4SLinus Torvalds goto new_sack; 37001da177e4SLinus Torvalds 37011da177e4SLinus Torvalds for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 37021da177e4SLinus Torvalds if (tcp_sack_extend(sp, seq, end_seq)) { 37031da177e4SLinus Torvalds /* Rotate this_sack to the first one. */ 37041da177e4SLinus Torvalds for (; this_sack > 0; this_sack--, sp--) 37051da177e4SLinus Torvalds tcp_sack_swap(sp, sp - 1); 37061da177e4SLinus Torvalds if (cur_sacks > 1) 37071da177e4SLinus Torvalds tcp_sack_maybe_coalesce(tp); 37081da177e4SLinus Torvalds return; 37091da177e4SLinus Torvalds } 37101da177e4SLinus Torvalds } 37111da177e4SLinus Torvalds 37121da177e4SLinus Torvalds /* Could not find an adjacent existing SACK, build a new one, 37131da177e4SLinus Torvalds * put it at the front, and shift everyone else down. We 37141da177e4SLinus Torvalds * always know there is at least one SACK present already here. 37151da177e4SLinus Torvalds * 37161da177e4SLinus Torvalds * If the sack array is full, forget about the last one. 37171da177e4SLinus Torvalds */ 37181da177e4SLinus Torvalds if (this_sack >= 4) { 37191da177e4SLinus Torvalds this_sack--; 37201da177e4SLinus Torvalds tp->rx_opt.num_sacks--; 37211da177e4SLinus Torvalds sp--; 37221da177e4SLinus Torvalds } 37231da177e4SLinus Torvalds for (; this_sack > 0; this_sack--, sp--) 37241da177e4SLinus Torvalds *sp = *(sp - 1); 37251da177e4SLinus Torvalds 37261da177e4SLinus Torvalds new_sack: 37271da177e4SLinus Torvalds /* Build the new head SACK, and we're done. */ 37281da177e4SLinus Torvalds sp->start_seq = seq; 37291da177e4SLinus Torvalds sp->end_seq = end_seq; 37301da177e4SLinus Torvalds tp->rx_opt.num_sacks++; 3731056834d9SIlpo Järvinen tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 3732056834d9SIlpo Järvinen 4 - tp->rx_opt.tstamp_ok); 37331da177e4SLinus Torvalds } 37341da177e4SLinus Torvalds 37351da177e4SLinus Torvalds /* RCV.NXT advances, some SACKs should be eaten. */ 37361da177e4SLinus Torvalds 37371da177e4SLinus Torvalds static void tcp_sack_remove(struct tcp_sock *tp) 37381da177e4SLinus Torvalds { 37391da177e4SLinus Torvalds struct tcp_sack_block *sp = &tp->selective_acks[0]; 37401da177e4SLinus Torvalds int num_sacks = tp->rx_opt.num_sacks; 37411da177e4SLinus Torvalds int this_sack; 37421da177e4SLinus Torvalds 37431da177e4SLinus Torvalds /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 3744b03efcfbSDavid S. Miller if (skb_queue_empty(&tp->out_of_order_queue)) { 37451da177e4SLinus Torvalds tp->rx_opt.num_sacks = 0; 37461da177e4SLinus Torvalds tp->rx_opt.eff_sacks = tp->rx_opt.dsack; 37471da177e4SLinus Torvalds return; 37481da177e4SLinus Torvalds } 37491da177e4SLinus Torvalds 37501da177e4SLinus Torvalds for (this_sack = 0; this_sack < num_sacks;) { 37511da177e4SLinus Torvalds /* Check if the start of the sack is covered by RCV.NXT. */ 37521da177e4SLinus Torvalds if (!before(tp->rcv_nxt, sp->start_seq)) { 37531da177e4SLinus Torvalds int i; 37541da177e4SLinus Torvalds 37551da177e4SLinus Torvalds /* RCV.NXT must cover all the block! */ 37561da177e4SLinus Torvalds BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq)); 37571da177e4SLinus Torvalds 37581da177e4SLinus Torvalds /* Zap this SACK, by moving forward any other SACKS. */ 37591da177e4SLinus Torvalds for (i=this_sack+1; i < num_sacks; i++) 37601da177e4SLinus Torvalds tp->selective_acks[i-1] = tp->selective_acks[i]; 37611da177e4SLinus Torvalds num_sacks--; 37621da177e4SLinus Torvalds continue; 37631da177e4SLinus Torvalds } 37641da177e4SLinus Torvalds this_sack++; 37651da177e4SLinus Torvalds sp++; 37661da177e4SLinus Torvalds } 37671da177e4SLinus Torvalds if (num_sacks != tp->rx_opt.num_sacks) { 37681da177e4SLinus Torvalds tp->rx_opt.num_sacks = num_sacks; 3769056834d9SIlpo Järvinen tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 3770056834d9SIlpo Järvinen tp->rx_opt.dsack, 3771056834d9SIlpo Järvinen 4 - tp->rx_opt.tstamp_ok); 37721da177e4SLinus Torvalds } 37731da177e4SLinus Torvalds } 37741da177e4SLinus Torvalds 37751da177e4SLinus Torvalds /* This one checks to see if we can put data from the 37761da177e4SLinus Torvalds * out_of_order queue into the receive_queue. 37771da177e4SLinus Torvalds */ 37781da177e4SLinus Torvalds static void tcp_ofo_queue(struct sock *sk) 37791da177e4SLinus Torvalds { 37801da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 37811da177e4SLinus Torvalds __u32 dsack_high = tp->rcv_nxt; 37821da177e4SLinus Torvalds struct sk_buff *skb; 37831da177e4SLinus Torvalds 37841da177e4SLinus Torvalds while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { 37851da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 37861da177e4SLinus Torvalds break; 37871da177e4SLinus Torvalds 37881da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 37891da177e4SLinus Torvalds __u32 dsack = dsack_high; 37901da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 37911da177e4SLinus Torvalds dsack_high = TCP_SKB_CB(skb)->end_seq; 37921da177e4SLinus Torvalds tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack); 37931da177e4SLinus Torvalds } 37941da177e4SLinus Torvalds 37951da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 37961da177e4SLinus Torvalds SOCK_DEBUG(sk, "ofo packet was already received \n"); 37978728b834SDavid S. Miller __skb_unlink(skb, &tp->out_of_order_queue); 37981da177e4SLinus Torvalds __kfree_skb(skb); 37991da177e4SLinus Torvalds continue; 38001da177e4SLinus Torvalds } 38011da177e4SLinus Torvalds SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 38021da177e4SLinus Torvalds tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 38031da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq); 38041da177e4SLinus Torvalds 38058728b834SDavid S. Miller __skb_unlink(skb, &tp->out_of_order_queue); 38061da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_receive_queue, skb); 38071da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3808aa8223c7SArnaldo Carvalho de Melo if (tcp_hdr(skb)->fin) 3809aa8223c7SArnaldo Carvalho de Melo tcp_fin(skb, sk, tcp_hdr(skb)); 38101da177e4SLinus Torvalds } 38111da177e4SLinus Torvalds } 38121da177e4SLinus Torvalds 38131da177e4SLinus Torvalds static int tcp_prune_queue(struct sock *sk); 38141da177e4SLinus Torvalds 38151da177e4SLinus Torvalds static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 38161da177e4SLinus Torvalds { 3817aa8223c7SArnaldo Carvalho de Melo struct tcphdr *th = tcp_hdr(skb); 38181da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 38191da177e4SLinus Torvalds int eaten = -1; 38201da177e4SLinus Torvalds 38211da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 38221da177e4SLinus Torvalds goto drop; 38231da177e4SLinus Torvalds 38241da177e4SLinus Torvalds __skb_pull(skb, th->doff * 4); 38251da177e4SLinus Torvalds 38261da177e4SLinus Torvalds TCP_ECN_accept_cwr(tp, skb); 38271da177e4SLinus Torvalds 38281da177e4SLinus Torvalds if (tp->rx_opt.dsack) { 38291da177e4SLinus Torvalds tp->rx_opt.dsack = 0; 38301da177e4SLinus Torvalds tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, 38311da177e4SLinus Torvalds 4 - tp->rx_opt.tstamp_ok); 38321da177e4SLinus Torvalds } 38331da177e4SLinus Torvalds 38341da177e4SLinus Torvalds /* Queue data for delivery to the user. 38351da177e4SLinus Torvalds * Packets in sequence go to the receive queue. 38361da177e4SLinus Torvalds * Out of sequence packets to the out_of_order_queue. 38371da177e4SLinus Torvalds */ 38381da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 38391da177e4SLinus Torvalds if (tcp_receive_window(tp) == 0) 38401da177e4SLinus Torvalds goto out_of_window; 38411da177e4SLinus Torvalds 38421da177e4SLinus Torvalds /* Ok. In sequence. In window. */ 38431da177e4SLinus Torvalds if (tp->ucopy.task == current && 38441da177e4SLinus Torvalds tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && 38451da177e4SLinus Torvalds sock_owned_by_user(sk) && !tp->urg_data) { 38461da177e4SLinus Torvalds int chunk = min_t(unsigned int, skb->len, 38471da177e4SLinus Torvalds tp->ucopy.len); 38481da177e4SLinus Torvalds 38491da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 38501da177e4SLinus Torvalds 38511da177e4SLinus Torvalds local_bh_enable(); 38521da177e4SLinus Torvalds if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 38531da177e4SLinus Torvalds tp->ucopy.len -= chunk; 38541da177e4SLinus Torvalds tp->copied_seq += chunk; 38551da177e4SLinus Torvalds eaten = (chunk == skb->len && !th->fin); 38561da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 38571da177e4SLinus Torvalds } 38581da177e4SLinus Torvalds local_bh_disable(); 38591da177e4SLinus Torvalds } 38601da177e4SLinus Torvalds 38611da177e4SLinus Torvalds if (eaten <= 0) { 38621da177e4SLinus Torvalds queue_and_out: 38631da177e4SLinus Torvalds if (eaten < 0 && 38641da177e4SLinus Torvalds (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 38653ab224beSHideo Aoki !sk_rmem_schedule(sk, skb->truesize))) { 38661da177e4SLinus Torvalds if (tcp_prune_queue(sk) < 0 || 38673ab224beSHideo Aoki !sk_rmem_schedule(sk, skb->truesize)) 38681da177e4SLinus Torvalds goto drop; 38691da177e4SLinus Torvalds } 38703ab224beSHideo Aoki skb_set_owner_r(skb, sk); 38711da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_receive_queue, skb); 38721da177e4SLinus Torvalds } 38731da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 38741da177e4SLinus Torvalds if (skb->len) 38759e412ba7SIlpo Järvinen tcp_event_data_recv(sk, skb); 38761da177e4SLinus Torvalds if (th->fin) 38771da177e4SLinus Torvalds tcp_fin(skb, sk, th); 38781da177e4SLinus Torvalds 3879b03efcfbSDavid S. Miller if (!skb_queue_empty(&tp->out_of_order_queue)) { 38801da177e4SLinus Torvalds tcp_ofo_queue(sk); 38811da177e4SLinus Torvalds 38821da177e4SLinus Torvalds /* RFC2581. 4.2. SHOULD send immediate ACK, when 38831da177e4SLinus Torvalds * gap in queue is filled. 38841da177e4SLinus Torvalds */ 3885b03efcfbSDavid S. Miller if (skb_queue_empty(&tp->out_of_order_queue)) 3886463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.pingpong = 0; 38871da177e4SLinus Torvalds } 38881da177e4SLinus Torvalds 38891da177e4SLinus Torvalds if (tp->rx_opt.num_sacks) 38901da177e4SLinus Torvalds tcp_sack_remove(tp); 38911da177e4SLinus Torvalds 38929e412ba7SIlpo Järvinen tcp_fast_path_check(sk); 38931da177e4SLinus Torvalds 38941da177e4SLinus Torvalds if (eaten > 0) 38951da177e4SLinus Torvalds __kfree_skb(skb); 38961da177e4SLinus Torvalds else if (!sock_flag(sk, SOCK_DEAD)) 38971da177e4SLinus Torvalds sk->sk_data_ready(sk, 0); 38981da177e4SLinus Torvalds return; 38991da177e4SLinus Torvalds } 39001da177e4SLinus Torvalds 39011da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 39021da177e4SLinus Torvalds /* A retransmit, 2nd most common case. Force an immediate ack. */ 39031da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 39041da177e4SLinus Torvalds tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 39051da177e4SLinus Torvalds 39061da177e4SLinus Torvalds out_of_window: 3907463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 3908463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 39091da177e4SLinus Torvalds drop: 39101da177e4SLinus Torvalds __kfree_skb(skb); 39111da177e4SLinus Torvalds return; 39121da177e4SLinus Torvalds } 39131da177e4SLinus Torvalds 39141da177e4SLinus Torvalds /* Out of window. F.e. zero window probe. */ 39151da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 39161da177e4SLinus Torvalds goto out_of_window; 39171da177e4SLinus Torvalds 3918463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 39191da177e4SLinus Torvalds 39201da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 39211da177e4SLinus Torvalds /* Partial packet, seq < rcv_next < end_seq */ 39221da177e4SLinus Torvalds SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 39231da177e4SLinus Torvalds tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 39241da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq); 39251da177e4SLinus Torvalds 39261da177e4SLinus Torvalds tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 39271da177e4SLinus Torvalds 39281da177e4SLinus Torvalds /* If window is closed, drop tail of packet. But after 39291da177e4SLinus Torvalds * remembering D-SACK for its head made in previous line. 39301da177e4SLinus Torvalds */ 39311da177e4SLinus Torvalds if (!tcp_receive_window(tp)) 39321da177e4SLinus Torvalds goto out_of_window; 39331da177e4SLinus Torvalds goto queue_and_out; 39341da177e4SLinus Torvalds } 39351da177e4SLinus Torvalds 39361da177e4SLinus Torvalds TCP_ECN_check_ce(tp, skb); 39371da177e4SLinus Torvalds 39381da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 39393ab224beSHideo Aoki !sk_rmem_schedule(sk, skb->truesize)) { 39401da177e4SLinus Torvalds if (tcp_prune_queue(sk) < 0 || 39413ab224beSHideo Aoki !sk_rmem_schedule(sk, skb->truesize)) 39421da177e4SLinus Torvalds goto drop; 39431da177e4SLinus Torvalds } 39441da177e4SLinus Torvalds 39451da177e4SLinus Torvalds /* Disable header prediction. */ 39461da177e4SLinus Torvalds tp->pred_flags = 0; 3947463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 39481da177e4SLinus Torvalds 39491da177e4SLinus Torvalds SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 39501da177e4SLinus Torvalds tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 39511da177e4SLinus Torvalds 39523ab224beSHideo Aoki skb_set_owner_r(skb, sk); 39531da177e4SLinus Torvalds 39541da177e4SLinus Torvalds if (!skb_peek(&tp->out_of_order_queue)) { 39551da177e4SLinus Torvalds /* Initial out of order segment, build 1 SACK. */ 3956e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) { 39571da177e4SLinus Torvalds tp->rx_opt.num_sacks = 1; 39581da177e4SLinus Torvalds tp->rx_opt.dsack = 0; 39591da177e4SLinus Torvalds tp->rx_opt.eff_sacks = 1; 39601da177e4SLinus Torvalds tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 39611da177e4SLinus Torvalds tp->selective_acks[0].end_seq = 39621da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq; 39631da177e4SLinus Torvalds } 39641da177e4SLinus Torvalds __skb_queue_head(&tp->out_of_order_queue, skb); 39651da177e4SLinus Torvalds } else { 39661da177e4SLinus Torvalds struct sk_buff *skb1 = tp->out_of_order_queue.prev; 39671da177e4SLinus Torvalds u32 seq = TCP_SKB_CB(skb)->seq; 39681da177e4SLinus Torvalds u32 end_seq = TCP_SKB_CB(skb)->end_seq; 39691da177e4SLinus Torvalds 39701da177e4SLinus Torvalds if (seq == TCP_SKB_CB(skb1)->end_seq) { 39718728b834SDavid S. Miller __skb_append(skb1, skb, &tp->out_of_order_queue); 39721da177e4SLinus Torvalds 39731da177e4SLinus Torvalds if (!tp->rx_opt.num_sacks || 39741da177e4SLinus Torvalds tp->selective_acks[0].end_seq != seq) 39751da177e4SLinus Torvalds goto add_sack; 39761da177e4SLinus Torvalds 39771da177e4SLinus Torvalds /* Common case: data arrive in order after hole. */ 39781da177e4SLinus Torvalds tp->selective_acks[0].end_seq = end_seq; 39791da177e4SLinus Torvalds return; 39801da177e4SLinus Torvalds } 39811da177e4SLinus Torvalds 39821da177e4SLinus Torvalds /* Find place to insert this segment. */ 39831da177e4SLinus Torvalds do { 39841da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb1)->seq, seq)) 39851da177e4SLinus Torvalds break; 39861da177e4SLinus Torvalds } while ((skb1 = skb1->prev) != 39871da177e4SLinus Torvalds (struct sk_buff *)&tp->out_of_order_queue); 39881da177e4SLinus Torvalds 39891da177e4SLinus Torvalds /* Do skb overlap to previous one? */ 39901da177e4SLinus Torvalds if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && 39911da177e4SLinus Torvalds before(seq, TCP_SKB_CB(skb1)->end_seq)) { 39921da177e4SLinus Torvalds if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 39931da177e4SLinus Torvalds /* All the bits are present. Drop. */ 39941da177e4SLinus Torvalds __kfree_skb(skb); 39951da177e4SLinus Torvalds tcp_dsack_set(tp, seq, end_seq); 39961da177e4SLinus Torvalds goto add_sack; 39971da177e4SLinus Torvalds } 39981da177e4SLinus Torvalds if (after(seq, TCP_SKB_CB(skb1)->seq)) { 39991da177e4SLinus Torvalds /* Partial overlap. */ 4000056834d9SIlpo Järvinen tcp_dsack_set(tp, seq, 4001056834d9SIlpo Järvinen TCP_SKB_CB(skb1)->end_seq); 40021da177e4SLinus Torvalds } else { 40031da177e4SLinus Torvalds skb1 = skb1->prev; 40041da177e4SLinus Torvalds } 40051da177e4SLinus Torvalds } 40061da177e4SLinus Torvalds __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 40071da177e4SLinus Torvalds 40081da177e4SLinus Torvalds /* And clean segments covered by new one as whole. */ 40091da177e4SLinus Torvalds while ((skb1 = skb->next) != 40101da177e4SLinus Torvalds (struct sk_buff *)&tp->out_of_order_queue && 40111da177e4SLinus Torvalds after(end_seq, TCP_SKB_CB(skb1)->seq)) { 40121da177e4SLinus Torvalds if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4013056834d9SIlpo Järvinen tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 4014056834d9SIlpo Järvinen end_seq); 40151da177e4SLinus Torvalds break; 40161da177e4SLinus Torvalds } 40178728b834SDavid S. Miller __skb_unlink(skb1, &tp->out_of_order_queue); 4018056834d9SIlpo Järvinen tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, 4019056834d9SIlpo Järvinen TCP_SKB_CB(skb1)->end_seq); 40201da177e4SLinus Torvalds __kfree_skb(skb1); 40211da177e4SLinus Torvalds } 40221da177e4SLinus Torvalds 40231da177e4SLinus Torvalds add_sack: 4024e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 40251da177e4SLinus Torvalds tcp_sack_new_ofo_skb(sk, seq, end_seq); 40261da177e4SLinus Torvalds } 40271da177e4SLinus Torvalds } 40281da177e4SLinus Torvalds 40291da177e4SLinus Torvalds /* Collapse contiguous sequence of skbs head..tail with 40301da177e4SLinus Torvalds * sequence numbers start..end. 40311da177e4SLinus Torvalds * Segments with FIN/SYN are not collapsed (only because this 40321da177e4SLinus Torvalds * simplifies code) 40331da177e4SLinus Torvalds */ 40341da177e4SLinus Torvalds static void 40358728b834SDavid S. Miller tcp_collapse(struct sock *sk, struct sk_buff_head *list, 40368728b834SDavid S. Miller struct sk_buff *head, struct sk_buff *tail, 40378728b834SDavid S. Miller u32 start, u32 end) 40381da177e4SLinus Torvalds { 40391da177e4SLinus Torvalds struct sk_buff *skb; 40401da177e4SLinus Torvalds 4041caa20d9aSStephen Hemminger /* First, check that queue is collapsible and find 40421da177e4SLinus Torvalds * the point where collapsing can be useful. */ 40431da177e4SLinus Torvalds for (skb = head; skb != tail;) { 40441da177e4SLinus Torvalds /* No new bits? It is possible on ofo queue. */ 40451da177e4SLinus Torvalds if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 40461da177e4SLinus Torvalds struct sk_buff *next = skb->next; 40478728b834SDavid S. Miller __skb_unlink(skb, list); 40481da177e4SLinus Torvalds __kfree_skb(skb); 40491da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 40501da177e4SLinus Torvalds skb = next; 40511da177e4SLinus Torvalds continue; 40521da177e4SLinus Torvalds } 40531da177e4SLinus Torvalds 40541da177e4SLinus Torvalds /* The first skb to collapse is: 40551da177e4SLinus Torvalds * - not SYN/FIN and 40561da177e4SLinus Torvalds * - bloated or contains data before "start" or 40571da177e4SLinus Torvalds * overlaps to the next one. 40581da177e4SLinus Torvalds */ 4059aa8223c7SArnaldo Carvalho de Melo if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 40601da177e4SLinus Torvalds (tcp_win_from_space(skb->truesize) > skb->len || 40611da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, start) || 40621da177e4SLinus Torvalds (skb->next != tail && 40631da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq))) 40641da177e4SLinus Torvalds break; 40651da177e4SLinus Torvalds 40661da177e4SLinus Torvalds /* Decided to skip this, advance start seq. */ 40671da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->end_seq; 40681da177e4SLinus Torvalds skb = skb->next; 40691da177e4SLinus Torvalds } 4070aa8223c7SArnaldo Carvalho de Melo if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 40711da177e4SLinus Torvalds return; 40721da177e4SLinus Torvalds 40731da177e4SLinus Torvalds while (before(start, end)) { 40741da177e4SLinus Torvalds struct sk_buff *nskb; 4075c2636b4dSChuck Lever unsigned int header = skb_headroom(skb); 40761da177e4SLinus Torvalds int copy = SKB_MAX_ORDER(header, 0); 40771da177e4SLinus Torvalds 40781da177e4SLinus Torvalds /* Too big header? This can happen with IPv6. */ 40791da177e4SLinus Torvalds if (copy < 0) 40801da177e4SLinus Torvalds return; 40811da177e4SLinus Torvalds if (end - start < copy) 40821da177e4SLinus Torvalds copy = end - start; 40831da177e4SLinus Torvalds nskb = alloc_skb(copy + header, GFP_ATOMIC); 40841da177e4SLinus Torvalds if (!nskb) 40851da177e4SLinus Torvalds return; 4086c51957daSArnaldo Carvalho de Melo 408798e399f8SArnaldo Carvalho de Melo skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 40889c70220bSArnaldo Carvalho de Melo skb_set_network_header(nskb, (skb_network_header(skb) - 40899c70220bSArnaldo Carvalho de Melo skb->head)); 40909c70220bSArnaldo Carvalho de Melo skb_set_transport_header(nskb, (skb_transport_header(skb) - 40919c70220bSArnaldo Carvalho de Melo skb->head)); 40921da177e4SLinus Torvalds skb_reserve(nskb, header); 40931da177e4SLinus Torvalds memcpy(nskb->head, skb->head, header); 40941da177e4SLinus Torvalds memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 40951da177e4SLinus Torvalds TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 40968728b834SDavid S. Miller __skb_insert(nskb, skb->prev, skb, list); 40973ab224beSHideo Aoki skb_set_owner_r(nskb, sk); 40981da177e4SLinus Torvalds 40991da177e4SLinus Torvalds /* Copy data, releasing collapsed skbs. */ 41001da177e4SLinus Torvalds while (copy > 0) { 41011da177e4SLinus Torvalds int offset = start - TCP_SKB_CB(skb)->seq; 41021da177e4SLinus Torvalds int size = TCP_SKB_CB(skb)->end_seq - start; 41031da177e4SLinus Torvalds 410409a62660SKris Katterjohn BUG_ON(offset < 0); 41051da177e4SLinus Torvalds if (size > 0) { 41061da177e4SLinus Torvalds size = min(copy, size); 41071da177e4SLinus Torvalds if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 41081da177e4SLinus Torvalds BUG(); 41091da177e4SLinus Torvalds TCP_SKB_CB(nskb)->end_seq += size; 41101da177e4SLinus Torvalds copy -= size; 41111da177e4SLinus Torvalds start += size; 41121da177e4SLinus Torvalds } 41131da177e4SLinus Torvalds if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 41141da177e4SLinus Torvalds struct sk_buff *next = skb->next; 41158728b834SDavid S. Miller __skb_unlink(skb, list); 41161da177e4SLinus Torvalds __kfree_skb(skb); 41171da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 41181da177e4SLinus Torvalds skb = next; 4119aa8223c7SArnaldo Carvalho de Melo if (skb == tail || 4120aa8223c7SArnaldo Carvalho de Melo tcp_hdr(skb)->syn || 4121aa8223c7SArnaldo Carvalho de Melo tcp_hdr(skb)->fin) 41221da177e4SLinus Torvalds return; 41231da177e4SLinus Torvalds } 41241da177e4SLinus Torvalds } 41251da177e4SLinus Torvalds } 41261da177e4SLinus Torvalds } 41271da177e4SLinus Torvalds 41281da177e4SLinus Torvalds /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 41291da177e4SLinus Torvalds * and tcp_collapse() them until all the queue is collapsed. 41301da177e4SLinus Torvalds */ 41311da177e4SLinus Torvalds static void tcp_collapse_ofo_queue(struct sock *sk) 41321da177e4SLinus Torvalds { 41331da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 41341da177e4SLinus Torvalds struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 41351da177e4SLinus Torvalds struct sk_buff *head; 41361da177e4SLinus Torvalds u32 start, end; 41371da177e4SLinus Torvalds 41381da177e4SLinus Torvalds if (skb == NULL) 41391da177e4SLinus Torvalds return; 41401da177e4SLinus Torvalds 41411da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->seq; 41421da177e4SLinus Torvalds end = TCP_SKB_CB(skb)->end_seq; 41431da177e4SLinus Torvalds head = skb; 41441da177e4SLinus Torvalds 41451da177e4SLinus Torvalds for (;;) { 41461da177e4SLinus Torvalds skb = skb->next; 41471da177e4SLinus Torvalds 41481da177e4SLinus Torvalds /* Segment is terminated when we see gap or when 41491da177e4SLinus Torvalds * we are at the end of all the queue. */ 41501da177e4SLinus Torvalds if (skb == (struct sk_buff *)&tp->out_of_order_queue || 41511da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->seq, end) || 41521da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->end_seq, start)) { 41538728b834SDavid S. Miller tcp_collapse(sk, &tp->out_of_order_queue, 41548728b834SDavid S. Miller head, skb, start, end); 41551da177e4SLinus Torvalds head = skb; 41561da177e4SLinus Torvalds if (skb == (struct sk_buff *)&tp->out_of_order_queue) 41571da177e4SLinus Torvalds break; 41581da177e4SLinus Torvalds /* Start new segment */ 41591da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->seq; 41601da177e4SLinus Torvalds end = TCP_SKB_CB(skb)->end_seq; 41611da177e4SLinus Torvalds } else { 41621da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, start)) 41631da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->seq; 41641da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->end_seq, end)) 41651da177e4SLinus Torvalds end = TCP_SKB_CB(skb)->end_seq; 41661da177e4SLinus Torvalds } 41671da177e4SLinus Torvalds } 41681da177e4SLinus Torvalds } 41691da177e4SLinus Torvalds 41701da177e4SLinus Torvalds /* Reduce allocated memory if we can, trying to get 41711da177e4SLinus Torvalds * the socket within its memory limits again. 41721da177e4SLinus Torvalds * 41731da177e4SLinus Torvalds * Return less than zero if we should start dropping frames 41741da177e4SLinus Torvalds * until the socket owning process reads some of the data 41751da177e4SLinus Torvalds * to stabilize the situation. 41761da177e4SLinus Torvalds */ 41771da177e4SLinus Torvalds static int tcp_prune_queue(struct sock *sk) 41781da177e4SLinus Torvalds { 41791da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 41801da177e4SLinus Torvalds 41811da177e4SLinus Torvalds SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 41821da177e4SLinus Torvalds 41831da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 41841da177e4SLinus Torvalds 41851da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 41869e412ba7SIlpo Järvinen tcp_clamp_window(sk); 41871da177e4SLinus Torvalds else if (tcp_memory_pressure) 41881da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 41891da177e4SLinus Torvalds 41901da177e4SLinus Torvalds tcp_collapse_ofo_queue(sk); 41918728b834SDavid S. Miller tcp_collapse(sk, &sk->sk_receive_queue, 41928728b834SDavid S. Miller sk->sk_receive_queue.next, 41931da177e4SLinus Torvalds (struct sk_buff *)&sk->sk_receive_queue, 41941da177e4SLinus Torvalds tp->copied_seq, tp->rcv_nxt); 41953ab224beSHideo Aoki sk_mem_reclaim(sk); 41961da177e4SLinus Torvalds 41971da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 41981da177e4SLinus Torvalds return 0; 41991da177e4SLinus Torvalds 42001da177e4SLinus Torvalds /* Collapsing did not help, destructive actions follow. 42011da177e4SLinus Torvalds * This must not ever occur. */ 42021da177e4SLinus Torvalds 42031da177e4SLinus Torvalds /* First, purge the out_of_order queue. */ 4204b03efcfbSDavid S. Miller if (!skb_queue_empty(&tp->out_of_order_queue)) { 4205b03efcfbSDavid S. Miller NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 42061da177e4SLinus Torvalds __skb_queue_purge(&tp->out_of_order_queue); 42071da177e4SLinus Torvalds 42081da177e4SLinus Torvalds /* Reset SACK state. A conforming SACK implementation will 42091da177e4SLinus Torvalds * do the same at a timeout based retransmit. When a connection 42101da177e4SLinus Torvalds * is in a sad state like this, we care only about integrity 42111da177e4SLinus Torvalds * of the connection not performance. 42121da177e4SLinus Torvalds */ 4213e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 42141da177e4SLinus Torvalds tcp_sack_reset(&tp->rx_opt); 42153ab224beSHideo Aoki sk_mem_reclaim(sk); 42161da177e4SLinus Torvalds } 42171da177e4SLinus Torvalds 42181da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 42191da177e4SLinus Torvalds return 0; 42201da177e4SLinus Torvalds 42211da177e4SLinus Torvalds /* If we are really being abused, tell the caller to silently 42221da177e4SLinus Torvalds * drop receive data on the floor. It will get retransmitted 42231da177e4SLinus Torvalds * and hopefully then we'll have sufficient space. 42241da177e4SLinus Torvalds */ 42251da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 42261da177e4SLinus Torvalds 42271da177e4SLinus Torvalds /* Massive buffer overcommit. */ 42281da177e4SLinus Torvalds tp->pred_flags = 0; 42291da177e4SLinus Torvalds return -1; 42301da177e4SLinus Torvalds } 42311da177e4SLinus Torvalds 42321da177e4SLinus Torvalds /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 42331da177e4SLinus Torvalds * As additional protections, we do not touch cwnd in retransmission phases, 42341da177e4SLinus Torvalds * and if application hit its sndbuf limit recently. 42351da177e4SLinus Torvalds */ 42361da177e4SLinus Torvalds void tcp_cwnd_application_limited(struct sock *sk) 42371da177e4SLinus Torvalds { 42381da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 42391da177e4SLinus Torvalds 42406687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 42411da177e4SLinus Torvalds sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 42421da177e4SLinus Torvalds /* Limited by application or receiver window. */ 4243d254bcdbSIlpo Järvinen u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 4244d254bcdbSIlpo Järvinen u32 win_used = max(tp->snd_cwnd_used, init_win); 42451da177e4SLinus Torvalds if (win_used < tp->snd_cwnd) { 42466687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 42471da177e4SLinus Torvalds tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 42481da177e4SLinus Torvalds } 42491da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 42501da177e4SLinus Torvalds } 42511da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 42521da177e4SLinus Torvalds } 42531da177e4SLinus Torvalds 42549e412ba7SIlpo Järvinen static int tcp_should_expand_sndbuf(struct sock *sk) 42550d9901dfSDavid S. Miller { 42569e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 42579e412ba7SIlpo Järvinen 42580d9901dfSDavid S. Miller /* If the user specified a specific send buffer setting, do 42590d9901dfSDavid S. Miller * not modify it. 42600d9901dfSDavid S. Miller */ 42610d9901dfSDavid S. Miller if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 42620d9901dfSDavid S. Miller return 0; 42630d9901dfSDavid S. Miller 42640d9901dfSDavid S. Miller /* If we are under global TCP memory pressure, do not expand. */ 42650d9901dfSDavid S. Miller if (tcp_memory_pressure) 42660d9901dfSDavid S. Miller return 0; 42670d9901dfSDavid S. Miller 42680d9901dfSDavid S. Miller /* If we are under soft global TCP memory pressure, do not expand. */ 42690d9901dfSDavid S. Miller if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 42700d9901dfSDavid S. Miller return 0; 42710d9901dfSDavid S. Miller 42720d9901dfSDavid S. Miller /* If we filled the congestion window, do not expand. */ 42730d9901dfSDavid S. Miller if (tp->packets_out >= tp->snd_cwnd) 42740d9901dfSDavid S. Miller return 0; 42750d9901dfSDavid S. Miller 42760d9901dfSDavid S. Miller return 1; 42770d9901dfSDavid S. Miller } 42781da177e4SLinus Torvalds 42791da177e4SLinus Torvalds /* When incoming ACK allowed to free some skb from write_queue, 42801da177e4SLinus Torvalds * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 42811da177e4SLinus Torvalds * on the exit from tcp input handler. 42821da177e4SLinus Torvalds * 42831da177e4SLinus Torvalds * PROBLEM: sndbuf expansion does not work well with largesend. 42841da177e4SLinus Torvalds */ 42851da177e4SLinus Torvalds static void tcp_new_space(struct sock *sk) 42861da177e4SLinus Torvalds { 42871da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 42881da177e4SLinus Torvalds 42899e412ba7SIlpo Järvinen if (tcp_should_expand_sndbuf(sk)) { 4290c1b4a7e6SDavid S. Miller int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 42911da177e4SLinus Torvalds MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), 42921da177e4SLinus Torvalds demanded = max_t(unsigned int, tp->snd_cwnd, 42931da177e4SLinus Torvalds tp->reordering + 1); 42941da177e4SLinus Torvalds sndmem *= 2 * demanded; 42951da177e4SLinus Torvalds if (sndmem > sk->sk_sndbuf) 42961da177e4SLinus Torvalds sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 42971da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 42981da177e4SLinus Torvalds } 42991da177e4SLinus Torvalds 43001da177e4SLinus Torvalds sk->sk_write_space(sk); 43011da177e4SLinus Torvalds } 43021da177e4SLinus Torvalds 430340efc6faSStephen Hemminger static void tcp_check_space(struct sock *sk) 43041da177e4SLinus Torvalds { 43051da177e4SLinus Torvalds if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 43061da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 43071da177e4SLinus Torvalds if (sk->sk_socket && 43081da177e4SLinus Torvalds test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 43091da177e4SLinus Torvalds tcp_new_space(sk); 43101da177e4SLinus Torvalds } 43111da177e4SLinus Torvalds } 43121da177e4SLinus Torvalds 43139e412ba7SIlpo Järvinen static inline void tcp_data_snd_check(struct sock *sk) 43141da177e4SLinus Torvalds { 43159e412ba7SIlpo Järvinen tcp_push_pending_frames(sk); 43161da177e4SLinus Torvalds tcp_check_space(sk); 43171da177e4SLinus Torvalds } 43181da177e4SLinus Torvalds 43191da177e4SLinus Torvalds /* 43201da177e4SLinus Torvalds * Check if sending an ack is needed. 43211da177e4SLinus Torvalds */ 43221da177e4SLinus Torvalds static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 43231da177e4SLinus Torvalds { 43241da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 43251da177e4SLinus Torvalds 43261da177e4SLinus Torvalds /* More than one full frame received... */ 4327463c84b9SArnaldo Carvalho de Melo if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss 43281da177e4SLinus Torvalds /* ... and right edge of window advances far enough. 43291da177e4SLinus Torvalds * (tcp_recvmsg() will send ACK otherwise). Or... 43301da177e4SLinus Torvalds */ 43311da177e4SLinus Torvalds && __tcp_select_window(sk) >= tp->rcv_wnd) || 43321da177e4SLinus Torvalds /* We ACK each frame or... */ 4333463c84b9SArnaldo Carvalho de Melo tcp_in_quickack_mode(sk) || 43341da177e4SLinus Torvalds /* We have out of order data. */ 4335056834d9SIlpo Järvinen (ofo_possible && skb_peek(&tp->out_of_order_queue))) { 43361da177e4SLinus Torvalds /* Then ack it now */ 43371da177e4SLinus Torvalds tcp_send_ack(sk); 43381da177e4SLinus Torvalds } else { 43391da177e4SLinus Torvalds /* Else, send delayed ack. */ 43401da177e4SLinus Torvalds tcp_send_delayed_ack(sk); 43411da177e4SLinus Torvalds } 43421da177e4SLinus Torvalds } 43431da177e4SLinus Torvalds 434440efc6faSStephen Hemminger static inline void tcp_ack_snd_check(struct sock *sk) 43451da177e4SLinus Torvalds { 4346463c84b9SArnaldo Carvalho de Melo if (!inet_csk_ack_scheduled(sk)) { 43471da177e4SLinus Torvalds /* We sent a data segment already. */ 43481da177e4SLinus Torvalds return; 43491da177e4SLinus Torvalds } 43501da177e4SLinus Torvalds __tcp_ack_snd_check(sk, 1); 43511da177e4SLinus Torvalds } 43521da177e4SLinus Torvalds 43531da177e4SLinus Torvalds /* 43541da177e4SLinus Torvalds * This routine is only called when we have urgent data 4355caa20d9aSStephen Hemminger * signaled. Its the 'slow' part of tcp_urg. It could be 43561da177e4SLinus Torvalds * moved inline now as tcp_urg is only called from one 43571da177e4SLinus Torvalds * place. We handle URGent data wrong. We have to - as 43581da177e4SLinus Torvalds * BSD still doesn't use the correction from RFC961. 43591da177e4SLinus Torvalds * For 1003.1g we should support a new option TCP_STDURG to permit 43601da177e4SLinus Torvalds * either form (or just set the sysctl tcp_stdurg). 43611da177e4SLinus Torvalds */ 43621da177e4SLinus Torvalds 43631da177e4SLinus Torvalds static void tcp_check_urg(struct sock *sk, struct tcphdr *th) 43641da177e4SLinus Torvalds { 43651da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 43661da177e4SLinus Torvalds u32 ptr = ntohs(th->urg_ptr); 43671da177e4SLinus Torvalds 43681da177e4SLinus Torvalds if (ptr && !sysctl_tcp_stdurg) 43691da177e4SLinus Torvalds ptr--; 43701da177e4SLinus Torvalds ptr += ntohl(th->seq); 43711da177e4SLinus Torvalds 43721da177e4SLinus Torvalds /* Ignore urgent data that we've already seen and read. */ 43731da177e4SLinus Torvalds if (after(tp->copied_seq, ptr)) 43741da177e4SLinus Torvalds return; 43751da177e4SLinus Torvalds 43761da177e4SLinus Torvalds /* Do not replay urg ptr. 43771da177e4SLinus Torvalds * 43781da177e4SLinus Torvalds * NOTE: interesting situation not covered by specs. 43791da177e4SLinus Torvalds * Misbehaving sender may send urg ptr, pointing to segment, 43801da177e4SLinus Torvalds * which we already have in ofo queue. We are not able to fetch 43811da177e4SLinus Torvalds * such data and will stay in TCP_URG_NOTYET until will be eaten 43821da177e4SLinus Torvalds * by recvmsg(). Seems, we are not obliged to handle such wicked 43831da177e4SLinus Torvalds * situations. But it is worth to think about possibility of some 43841da177e4SLinus Torvalds * DoSes using some hypothetical application level deadlock. 43851da177e4SLinus Torvalds */ 43861da177e4SLinus Torvalds if (before(ptr, tp->rcv_nxt)) 43871da177e4SLinus Torvalds return; 43881da177e4SLinus Torvalds 43891da177e4SLinus Torvalds /* Do we already have a newer (or duplicate) urgent pointer? */ 43901da177e4SLinus Torvalds if (tp->urg_data && !after(ptr, tp->urg_seq)) 43911da177e4SLinus Torvalds return; 43921da177e4SLinus Torvalds 43931da177e4SLinus Torvalds /* Tell the world about our new urgent pointer. */ 43941da177e4SLinus Torvalds sk_send_sigurg(sk); 43951da177e4SLinus Torvalds 43961da177e4SLinus Torvalds /* We may be adding urgent data when the last byte read was 43971da177e4SLinus Torvalds * urgent. To do this requires some care. We cannot just ignore 43981da177e4SLinus Torvalds * tp->copied_seq since we would read the last urgent byte again 43991da177e4SLinus Torvalds * as data, nor can we alter copied_seq until this data arrives 4400caa20d9aSStephen Hemminger * or we break the semantics of SIOCATMARK (and thus sockatmark()) 44011da177e4SLinus Torvalds * 44021da177e4SLinus Torvalds * NOTE. Double Dutch. Rendering to plain English: author of comment 44031da177e4SLinus Torvalds * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 44041da177e4SLinus Torvalds * and expect that both A and B disappear from stream. This is _wrong_. 44051da177e4SLinus Torvalds * Though this happens in BSD with high probability, this is occasional. 44061da177e4SLinus Torvalds * Any application relying on this is buggy. Note also, that fix "works" 44071da177e4SLinus Torvalds * only in this artificial test. Insert some normal data between A and B and we will 44081da177e4SLinus Torvalds * decline of BSD again. Verdict: it is better to remove to trap 44091da177e4SLinus Torvalds * buggy users. 44101da177e4SLinus Torvalds */ 44111da177e4SLinus Torvalds if (tp->urg_seq == tp->copied_seq && tp->urg_data && 4412056834d9SIlpo Järvinen !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 44131da177e4SLinus Torvalds struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 44141da177e4SLinus Torvalds tp->copied_seq++; 44151da177e4SLinus Torvalds if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 44168728b834SDavid S. Miller __skb_unlink(skb, &sk->sk_receive_queue); 44171da177e4SLinus Torvalds __kfree_skb(skb); 44181da177e4SLinus Torvalds } 44191da177e4SLinus Torvalds } 44201da177e4SLinus Torvalds 44211da177e4SLinus Torvalds tp->urg_data = TCP_URG_NOTYET; 44221da177e4SLinus Torvalds tp->urg_seq = ptr; 44231da177e4SLinus Torvalds 44241da177e4SLinus Torvalds /* Disable header prediction. */ 44251da177e4SLinus Torvalds tp->pred_flags = 0; 44261da177e4SLinus Torvalds } 44271da177e4SLinus Torvalds 44281da177e4SLinus Torvalds /* This is the 'fast' part of urgent handling. */ 44291da177e4SLinus Torvalds static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) 44301da177e4SLinus Torvalds { 44311da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 44321da177e4SLinus Torvalds 44331da177e4SLinus Torvalds /* Check if we get a new urgent pointer - normally not. */ 44341da177e4SLinus Torvalds if (th->urg) 44351da177e4SLinus Torvalds tcp_check_urg(sk, th); 44361da177e4SLinus Torvalds 44371da177e4SLinus Torvalds /* Do we wait for any urgent data? - normally not... */ 44381da177e4SLinus Torvalds if (tp->urg_data == TCP_URG_NOTYET) { 44391da177e4SLinus Torvalds u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 44401da177e4SLinus Torvalds th->syn; 44411da177e4SLinus Torvalds 44421da177e4SLinus Torvalds /* Is the urgent pointer pointing into this packet? */ 44431da177e4SLinus Torvalds if (ptr < skb->len) { 44441da177e4SLinus Torvalds u8 tmp; 44451da177e4SLinus Torvalds if (skb_copy_bits(skb, ptr, &tmp, 1)) 44461da177e4SLinus Torvalds BUG(); 44471da177e4SLinus Torvalds tp->urg_data = TCP_URG_VALID | tmp; 44481da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) 44491da177e4SLinus Torvalds sk->sk_data_ready(sk, 0); 44501da177e4SLinus Torvalds } 44511da177e4SLinus Torvalds } 44521da177e4SLinus Torvalds } 44531da177e4SLinus Torvalds 44541da177e4SLinus Torvalds static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 44551da177e4SLinus Torvalds { 44561da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 44571da177e4SLinus Torvalds int chunk = skb->len - hlen; 44581da177e4SLinus Torvalds int err; 44591da177e4SLinus Torvalds 44601da177e4SLinus Torvalds local_bh_enable(); 446160476372SHerbert Xu if (skb_csum_unnecessary(skb)) 44621da177e4SLinus Torvalds err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); 44631da177e4SLinus Torvalds else 44641da177e4SLinus Torvalds err = skb_copy_and_csum_datagram_iovec(skb, hlen, 44651da177e4SLinus Torvalds tp->ucopy.iov); 44661da177e4SLinus Torvalds 44671da177e4SLinus Torvalds if (!err) { 44681da177e4SLinus Torvalds tp->ucopy.len -= chunk; 44691da177e4SLinus Torvalds tp->copied_seq += chunk; 44701da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 44711da177e4SLinus Torvalds } 44721da177e4SLinus Torvalds 44731da177e4SLinus Torvalds local_bh_disable(); 44741da177e4SLinus Torvalds return err; 44751da177e4SLinus Torvalds } 44761da177e4SLinus Torvalds 4477056834d9SIlpo Järvinen static __sum16 __tcp_checksum_complete_user(struct sock *sk, 4478056834d9SIlpo Järvinen struct sk_buff *skb) 44791da177e4SLinus Torvalds { 4480b51655b9SAl Viro __sum16 result; 44811da177e4SLinus Torvalds 44821da177e4SLinus Torvalds if (sock_owned_by_user(sk)) { 44831da177e4SLinus Torvalds local_bh_enable(); 44841da177e4SLinus Torvalds result = __tcp_checksum_complete(skb); 44851da177e4SLinus Torvalds local_bh_disable(); 44861da177e4SLinus Torvalds } else { 44871da177e4SLinus Torvalds result = __tcp_checksum_complete(skb); 44881da177e4SLinus Torvalds } 44891da177e4SLinus Torvalds return result; 44901da177e4SLinus Torvalds } 44911da177e4SLinus Torvalds 4492056834d9SIlpo Järvinen static inline int tcp_checksum_complete_user(struct sock *sk, 4493056834d9SIlpo Järvinen struct sk_buff *skb) 44941da177e4SLinus Torvalds { 449560476372SHerbert Xu return !skb_csum_unnecessary(skb) && 44961da177e4SLinus Torvalds __tcp_checksum_complete_user(sk, skb); 44971da177e4SLinus Torvalds } 44981da177e4SLinus Torvalds 44991a2449a8SChris Leech #ifdef CONFIG_NET_DMA 4500056834d9SIlpo Järvinen static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 4501056834d9SIlpo Järvinen int hlen) 45021a2449a8SChris Leech { 45031a2449a8SChris Leech struct tcp_sock *tp = tcp_sk(sk); 45041a2449a8SChris Leech int chunk = skb->len - hlen; 45051a2449a8SChris Leech int dma_cookie; 45061a2449a8SChris Leech int copied_early = 0; 45071a2449a8SChris Leech 45081a2449a8SChris Leech if (tp->ucopy.wakeup) 45091a2449a8SChris Leech return 0; 45101a2449a8SChris Leech 45111a2449a8SChris Leech if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 45121a2449a8SChris Leech tp->ucopy.dma_chan = get_softnet_dma(); 45131a2449a8SChris Leech 451460476372SHerbert Xu if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 45151a2449a8SChris Leech 45161a2449a8SChris Leech dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, 4517056834d9SIlpo Järvinen skb, hlen, 4518056834d9SIlpo Järvinen tp->ucopy.iov, chunk, 4519056834d9SIlpo Järvinen tp->ucopy.pinned_list); 45201a2449a8SChris Leech 45211a2449a8SChris Leech if (dma_cookie < 0) 45221a2449a8SChris Leech goto out; 45231a2449a8SChris Leech 45241a2449a8SChris Leech tp->ucopy.dma_cookie = dma_cookie; 45251a2449a8SChris Leech copied_early = 1; 45261a2449a8SChris Leech 45271a2449a8SChris Leech tp->ucopy.len -= chunk; 45281a2449a8SChris Leech tp->copied_seq += chunk; 45291a2449a8SChris Leech tcp_rcv_space_adjust(sk); 45301a2449a8SChris Leech 45311a2449a8SChris Leech if ((tp->ucopy.len == 0) || 4532aa8223c7SArnaldo Carvalho de Melo (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 45331a2449a8SChris Leech (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 45341a2449a8SChris Leech tp->ucopy.wakeup = 1; 45351a2449a8SChris Leech sk->sk_data_ready(sk, 0); 45361a2449a8SChris Leech } 45371a2449a8SChris Leech } else if (chunk > 0) { 45381a2449a8SChris Leech tp->ucopy.wakeup = 1; 45391a2449a8SChris Leech sk->sk_data_ready(sk, 0); 45401a2449a8SChris Leech } 45411a2449a8SChris Leech out: 45421a2449a8SChris Leech return copied_early; 45431a2449a8SChris Leech } 45441a2449a8SChris Leech #endif /* CONFIG_NET_DMA */ 45451a2449a8SChris Leech 45461da177e4SLinus Torvalds /* 45471da177e4SLinus Torvalds * TCP receive function for the ESTABLISHED state. 45481da177e4SLinus Torvalds * 45491da177e4SLinus Torvalds * It is split into a fast path and a slow path. The fast path is 45501da177e4SLinus Torvalds * disabled when: 45511da177e4SLinus Torvalds * - A zero window was announced from us - zero window probing 45521da177e4SLinus Torvalds * is only handled properly in the slow path. 45531da177e4SLinus Torvalds * - Out of order segments arrived. 45541da177e4SLinus Torvalds * - Urgent data is expected. 45551da177e4SLinus Torvalds * - There is no buffer space left 45561da177e4SLinus Torvalds * - Unexpected TCP flags/window values/header lengths are received 45571da177e4SLinus Torvalds * (detected by checking the TCP header against pred_flags) 45581da177e4SLinus Torvalds * - Data is sent in both directions. Fast path only supports pure senders 45591da177e4SLinus Torvalds * or pure receivers (this means either the sequence number or the ack 45601da177e4SLinus Torvalds * value must stay constant) 45611da177e4SLinus Torvalds * - Unexpected TCP option. 45621da177e4SLinus Torvalds * 45631da177e4SLinus Torvalds * When these conditions are not satisfied it drops into a standard 45641da177e4SLinus Torvalds * receive procedure patterned after RFC793 to handle all cases. 45651da177e4SLinus Torvalds * The first three cases are guaranteed by proper pred_flags setting, 45661da177e4SLinus Torvalds * the rest is checked inline. Fast processing is turned on in 45671da177e4SLinus Torvalds * tcp_data_queue when everything is OK. 45681da177e4SLinus Torvalds */ 45691da177e4SLinus Torvalds int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 45701da177e4SLinus Torvalds struct tcphdr *th, unsigned len) 45711da177e4SLinus Torvalds { 45721da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 45731da177e4SLinus Torvalds 45741da177e4SLinus Torvalds /* 45751da177e4SLinus Torvalds * Header prediction. 45761da177e4SLinus Torvalds * The code loosely follows the one in the famous 45771da177e4SLinus Torvalds * "30 instruction TCP receive" Van Jacobson mail. 45781da177e4SLinus Torvalds * 45791da177e4SLinus Torvalds * Van's trick is to deposit buffers into socket queue 45801da177e4SLinus Torvalds * on a device interrupt, to call tcp_recv function 45811da177e4SLinus Torvalds * on the receive process context and checksum and copy 45821da177e4SLinus Torvalds * the buffer to user space. smart... 45831da177e4SLinus Torvalds * 45841da177e4SLinus Torvalds * Our current scheme is not silly either but we take the 45851da177e4SLinus Torvalds * extra cost of the net_bh soft interrupt processing... 45861da177e4SLinus Torvalds * We do checksum and copy also but from device to kernel. 45871da177e4SLinus Torvalds */ 45881da177e4SLinus Torvalds 45891da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 0; 45901da177e4SLinus Torvalds 45911da177e4SLinus Torvalds /* pred_flags is 0xS?10 << 16 + snd_wnd 4592caa20d9aSStephen Hemminger * if header_prediction is to be made 45931da177e4SLinus Torvalds * 'S' will always be tp->tcp_header_len >> 2 45941da177e4SLinus Torvalds * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 45951da177e4SLinus Torvalds * turn it off (when there are holes in the receive 45961da177e4SLinus Torvalds * space for instance) 45971da177e4SLinus Torvalds * PSH flag is ignored. 45981da177e4SLinus Torvalds */ 45991da177e4SLinus Torvalds 46001da177e4SLinus Torvalds if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 46011da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 46021da177e4SLinus Torvalds int tcp_header_len = tp->tcp_header_len; 46031da177e4SLinus Torvalds 46041da177e4SLinus Torvalds /* Timestamp header prediction: tcp_header_len 46051da177e4SLinus Torvalds * is automatically equal to th->doff*4 due to pred_flags 46061da177e4SLinus Torvalds * match. 46071da177e4SLinus Torvalds */ 46081da177e4SLinus Torvalds 46091da177e4SLinus Torvalds /* Check timestamp */ 46101da177e4SLinus Torvalds if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 46114f3608b7SAl Viro __be32 *ptr = (__be32 *)(th + 1); 46121da177e4SLinus Torvalds 46131da177e4SLinus Torvalds /* No? Slow path! */ 46144f3608b7SAl Viro if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 46151da177e4SLinus Torvalds | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) 46161da177e4SLinus Torvalds goto slow_path; 46171da177e4SLinus Torvalds 46181da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 1; 46191da177e4SLinus Torvalds ++ptr; 46201da177e4SLinus Torvalds tp->rx_opt.rcv_tsval = ntohl(*ptr); 46211da177e4SLinus Torvalds ++ptr; 46221da177e4SLinus Torvalds tp->rx_opt.rcv_tsecr = ntohl(*ptr); 46231da177e4SLinus Torvalds 46241da177e4SLinus Torvalds /* If PAWS failed, check it more carefully in slow path */ 46251da177e4SLinus Torvalds if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 46261da177e4SLinus Torvalds goto slow_path; 46271da177e4SLinus Torvalds 46281da177e4SLinus Torvalds /* DO NOT update ts_recent here, if checksum fails 46291da177e4SLinus Torvalds * and timestamp was corrupted part, it will result 46301da177e4SLinus Torvalds * in a hung connection since we will drop all 46311da177e4SLinus Torvalds * future packets due to the PAWS test. 46321da177e4SLinus Torvalds */ 46331da177e4SLinus Torvalds } 46341da177e4SLinus Torvalds 46351da177e4SLinus Torvalds if (len <= tcp_header_len) { 46361da177e4SLinus Torvalds /* Bulk data transfer: sender */ 46371da177e4SLinus Torvalds if (len == tcp_header_len) { 46381da177e4SLinus Torvalds /* Predicted packet is in window by definition. 46391da177e4SLinus Torvalds * seq == rcv_nxt and rcv_wup <= rcv_nxt. 46401da177e4SLinus Torvalds * Hence, check seq<=rcv_wup reduces to: 46411da177e4SLinus Torvalds */ 46421da177e4SLinus Torvalds if (tcp_header_len == 46431da177e4SLinus Torvalds (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 46441da177e4SLinus Torvalds tp->rcv_nxt == tp->rcv_wup) 46451da177e4SLinus Torvalds tcp_store_ts_recent(tp); 46461da177e4SLinus Torvalds 46471da177e4SLinus Torvalds /* We know that such packets are checksummed 46481da177e4SLinus Torvalds * on entry. 46491da177e4SLinus Torvalds */ 46501da177e4SLinus Torvalds tcp_ack(sk, skb, 0); 46511da177e4SLinus Torvalds __kfree_skb(skb); 46529e412ba7SIlpo Järvinen tcp_data_snd_check(sk); 46531da177e4SLinus Torvalds return 0; 46541da177e4SLinus Torvalds } else { /* Header too small */ 46551da177e4SLinus Torvalds TCP_INC_STATS_BH(TCP_MIB_INERRS); 46561da177e4SLinus Torvalds goto discard; 46571da177e4SLinus Torvalds } 46581da177e4SLinus Torvalds } else { 46591da177e4SLinus Torvalds int eaten = 0; 46601a2449a8SChris Leech int copied_early = 0; 46611da177e4SLinus Torvalds 46621a2449a8SChris Leech if (tp->copied_seq == tp->rcv_nxt && 46631a2449a8SChris Leech len - tcp_header_len <= tp->ucopy.len) { 46641a2449a8SChris Leech #ifdef CONFIG_NET_DMA 46651a2449a8SChris Leech if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 46661a2449a8SChris Leech copied_early = 1; 46671a2449a8SChris Leech eaten = 1; 46681a2449a8SChris Leech } 46691a2449a8SChris Leech #endif 4670056834d9SIlpo Järvinen if (tp->ucopy.task == current && 4671056834d9SIlpo Järvinen sock_owned_by_user(sk) && !copied_early) { 46721da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 46731da177e4SLinus Torvalds 46741a2449a8SChris Leech if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 46751a2449a8SChris Leech eaten = 1; 46761a2449a8SChris Leech } 46771a2449a8SChris Leech if (eaten) { 46781da177e4SLinus Torvalds /* Predicted packet is in window by definition. 46791da177e4SLinus Torvalds * seq == rcv_nxt and rcv_wup <= rcv_nxt. 46801da177e4SLinus Torvalds * Hence, check seq<=rcv_wup reduces to: 46811da177e4SLinus Torvalds */ 46821da177e4SLinus Torvalds if (tcp_header_len == 46831da177e4SLinus Torvalds (sizeof(struct tcphdr) + 46841da177e4SLinus Torvalds TCPOLEN_TSTAMP_ALIGNED) && 46851da177e4SLinus Torvalds tp->rcv_nxt == tp->rcv_wup) 46861da177e4SLinus Torvalds tcp_store_ts_recent(tp); 46871da177e4SLinus Torvalds 4688463c84b9SArnaldo Carvalho de Melo tcp_rcv_rtt_measure_ts(sk, skb); 46891da177e4SLinus Torvalds 46901da177e4SLinus Torvalds __skb_pull(skb, tcp_header_len); 46911da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 46921da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 46931da177e4SLinus Torvalds } 46941a2449a8SChris Leech if (copied_early) 46951a2449a8SChris Leech tcp_cleanup_rbuf(sk, skb->len); 46961da177e4SLinus Torvalds } 46971da177e4SLinus Torvalds if (!eaten) { 46981da177e4SLinus Torvalds if (tcp_checksum_complete_user(sk, skb)) 46991da177e4SLinus Torvalds goto csum_error; 47001da177e4SLinus Torvalds 47011da177e4SLinus Torvalds /* Predicted packet is in window by definition. 47021da177e4SLinus Torvalds * seq == rcv_nxt and rcv_wup <= rcv_nxt. 47031da177e4SLinus Torvalds * Hence, check seq<=rcv_wup reduces to: 47041da177e4SLinus Torvalds */ 47051da177e4SLinus Torvalds if (tcp_header_len == 47061da177e4SLinus Torvalds (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 47071da177e4SLinus Torvalds tp->rcv_nxt == tp->rcv_wup) 47081da177e4SLinus Torvalds tcp_store_ts_recent(tp); 47091da177e4SLinus Torvalds 4710463c84b9SArnaldo Carvalho de Melo tcp_rcv_rtt_measure_ts(sk, skb); 47111da177e4SLinus Torvalds 47121da177e4SLinus Torvalds if ((int)skb->truesize > sk->sk_forward_alloc) 47131da177e4SLinus Torvalds goto step5; 47141da177e4SLinus Torvalds 47151da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 47161da177e4SLinus Torvalds 47171da177e4SLinus Torvalds /* Bulk data transfer: receiver */ 47181da177e4SLinus Torvalds __skb_pull(skb, tcp_header_len); 47191da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_receive_queue, skb); 47203ab224beSHideo Aoki skb_set_owner_r(skb, sk); 47211da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 47221da177e4SLinus Torvalds } 47231da177e4SLinus Torvalds 47249e412ba7SIlpo Järvinen tcp_event_data_recv(sk, skb); 47251da177e4SLinus Torvalds 47261da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 47271da177e4SLinus Torvalds /* Well, only one small jumplet in fast path... */ 47281da177e4SLinus Torvalds tcp_ack(sk, skb, FLAG_DATA); 47299e412ba7SIlpo Järvinen tcp_data_snd_check(sk); 4730463c84b9SArnaldo Carvalho de Melo if (!inet_csk_ack_scheduled(sk)) 47311da177e4SLinus Torvalds goto no_ack; 47321da177e4SLinus Torvalds } 47331da177e4SLinus Torvalds 47341da177e4SLinus Torvalds __tcp_ack_snd_check(sk, 0); 47351da177e4SLinus Torvalds no_ack: 47361a2449a8SChris Leech #ifdef CONFIG_NET_DMA 47371a2449a8SChris Leech if (copied_early) 47381a2449a8SChris Leech __skb_queue_tail(&sk->sk_async_wait_queue, skb); 47391a2449a8SChris Leech else 47401a2449a8SChris Leech #endif 47411da177e4SLinus Torvalds if (eaten) 47421da177e4SLinus Torvalds __kfree_skb(skb); 47431da177e4SLinus Torvalds else 47441da177e4SLinus Torvalds sk->sk_data_ready(sk, 0); 47451da177e4SLinus Torvalds return 0; 47461da177e4SLinus Torvalds } 47471da177e4SLinus Torvalds } 47481da177e4SLinus Torvalds 47491da177e4SLinus Torvalds slow_path: 47501da177e4SLinus Torvalds if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 47511da177e4SLinus Torvalds goto csum_error; 47521da177e4SLinus Torvalds 47531da177e4SLinus Torvalds /* 47541da177e4SLinus Torvalds * RFC1323: H1. Apply PAWS check first. 47551da177e4SLinus Torvalds */ 47561da177e4SLinus Torvalds if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4757463c84b9SArnaldo Carvalho de Melo tcp_paws_discard(sk, skb)) { 47581da177e4SLinus Torvalds if (!th->rst) { 47591da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 47601da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 47611da177e4SLinus Torvalds goto discard; 47621da177e4SLinus Torvalds } 47631da177e4SLinus Torvalds /* Resets are accepted even if PAWS failed. 47641da177e4SLinus Torvalds 47651da177e4SLinus Torvalds ts_recent update must be made after we are sure 47661da177e4SLinus Torvalds that the packet is in window. 47671da177e4SLinus Torvalds */ 47681da177e4SLinus Torvalds } 47691da177e4SLinus Torvalds 47701da177e4SLinus Torvalds /* 47711da177e4SLinus Torvalds * Standard slow path. 47721da177e4SLinus Torvalds */ 47731da177e4SLinus Torvalds 47741da177e4SLinus Torvalds if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 47751da177e4SLinus Torvalds /* RFC793, page 37: "In all states except SYN-SENT, all reset 47761da177e4SLinus Torvalds * (RST) segments are validated by checking their SEQ-fields." 47771da177e4SLinus Torvalds * And page 69: "If an incoming segment is not acceptable, 47781da177e4SLinus Torvalds * an acknowledgment should be sent in reply (unless the RST bit 47791da177e4SLinus Torvalds * is set, if so drop the segment and return)". 47801da177e4SLinus Torvalds */ 47811da177e4SLinus Torvalds if (!th->rst) 47821da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 47831da177e4SLinus Torvalds goto discard; 47841da177e4SLinus Torvalds } 47851da177e4SLinus Torvalds 47861da177e4SLinus Torvalds if (th->rst) { 47871da177e4SLinus Torvalds tcp_reset(sk); 47881da177e4SLinus Torvalds goto discard; 47891da177e4SLinus Torvalds } 47901da177e4SLinus Torvalds 47911da177e4SLinus Torvalds tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 47921da177e4SLinus Torvalds 47931da177e4SLinus Torvalds if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 47941da177e4SLinus Torvalds TCP_INC_STATS_BH(TCP_MIB_INERRS); 47951da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 47961da177e4SLinus Torvalds tcp_reset(sk); 47971da177e4SLinus Torvalds return 1; 47981da177e4SLinus Torvalds } 47991da177e4SLinus Torvalds 48001da177e4SLinus Torvalds step5: 48011da177e4SLinus Torvalds if (th->ack) 48021da177e4SLinus Torvalds tcp_ack(sk, skb, FLAG_SLOWPATH); 48031da177e4SLinus Torvalds 4804463c84b9SArnaldo Carvalho de Melo tcp_rcv_rtt_measure_ts(sk, skb); 48051da177e4SLinus Torvalds 48061da177e4SLinus Torvalds /* Process urgent data. */ 48071da177e4SLinus Torvalds tcp_urg(sk, skb, th); 48081da177e4SLinus Torvalds 48091da177e4SLinus Torvalds /* step 7: process the segment text */ 48101da177e4SLinus Torvalds tcp_data_queue(sk, skb); 48111da177e4SLinus Torvalds 48129e412ba7SIlpo Järvinen tcp_data_snd_check(sk); 48131da177e4SLinus Torvalds tcp_ack_snd_check(sk); 48141da177e4SLinus Torvalds return 0; 48151da177e4SLinus Torvalds 48161da177e4SLinus Torvalds csum_error: 48171da177e4SLinus Torvalds TCP_INC_STATS_BH(TCP_MIB_INERRS); 48181da177e4SLinus Torvalds 48191da177e4SLinus Torvalds discard: 48201da177e4SLinus Torvalds __kfree_skb(skb); 48211da177e4SLinus Torvalds return 0; 48221da177e4SLinus Torvalds } 48231da177e4SLinus Torvalds 48241da177e4SLinus Torvalds static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 48251da177e4SLinus Torvalds struct tcphdr *th, unsigned len) 48261da177e4SLinus Torvalds { 48271da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 4828d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 48291da177e4SLinus Torvalds int saved_clamp = tp->rx_opt.mss_clamp; 48301da177e4SLinus Torvalds 48311da177e4SLinus Torvalds tcp_parse_options(skb, &tp->rx_opt, 0); 48321da177e4SLinus Torvalds 48331da177e4SLinus Torvalds if (th->ack) { 48341da177e4SLinus Torvalds /* rfc793: 48351da177e4SLinus Torvalds * "If the state is SYN-SENT then 48361da177e4SLinus Torvalds * first check the ACK bit 48371da177e4SLinus Torvalds * If the ACK bit is set 48381da177e4SLinus Torvalds * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 48391da177e4SLinus Torvalds * a reset (unless the RST bit is set, if so drop 48401da177e4SLinus Torvalds * the segment and return)" 48411da177e4SLinus Torvalds * 48421da177e4SLinus Torvalds * We do not send data with SYN, so that RFC-correct 48431da177e4SLinus Torvalds * test reduces to: 48441da177e4SLinus Torvalds */ 48451da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 48461da177e4SLinus Torvalds goto reset_and_undo; 48471da177e4SLinus Torvalds 48481da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 48491da177e4SLinus Torvalds !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 48501da177e4SLinus Torvalds tcp_time_stamp)) { 48511da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 48521da177e4SLinus Torvalds goto reset_and_undo; 48531da177e4SLinus Torvalds } 48541da177e4SLinus Torvalds 48551da177e4SLinus Torvalds /* Now ACK is acceptable. 48561da177e4SLinus Torvalds * 48571da177e4SLinus Torvalds * "If the RST bit is set 48581da177e4SLinus Torvalds * If the ACK was acceptable then signal the user "error: 48591da177e4SLinus Torvalds * connection reset", drop the segment, enter CLOSED state, 48601da177e4SLinus Torvalds * delete TCB, and return." 48611da177e4SLinus Torvalds */ 48621da177e4SLinus Torvalds 48631da177e4SLinus Torvalds if (th->rst) { 48641da177e4SLinus Torvalds tcp_reset(sk); 48651da177e4SLinus Torvalds goto discard; 48661da177e4SLinus Torvalds } 48671da177e4SLinus Torvalds 48681da177e4SLinus Torvalds /* rfc793: 48691da177e4SLinus Torvalds * "fifth, if neither of the SYN or RST bits is set then 48701da177e4SLinus Torvalds * drop the segment and return." 48711da177e4SLinus Torvalds * 48721da177e4SLinus Torvalds * See note below! 48731da177e4SLinus Torvalds * --ANK(990513) 48741da177e4SLinus Torvalds */ 48751da177e4SLinus Torvalds if (!th->syn) 48761da177e4SLinus Torvalds goto discard_and_undo; 48771da177e4SLinus Torvalds 48781da177e4SLinus Torvalds /* rfc793: 48791da177e4SLinus Torvalds * "If the SYN bit is on ... 48801da177e4SLinus Torvalds * are acceptable then ... 48811da177e4SLinus Torvalds * (our SYN has been ACKed), change the connection 48821da177e4SLinus Torvalds * state to ESTABLISHED..." 48831da177e4SLinus Torvalds */ 48841da177e4SLinus Torvalds 48851da177e4SLinus Torvalds TCP_ECN_rcv_synack(tp, th); 48861da177e4SLinus Torvalds 48871da177e4SLinus Torvalds tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 48881da177e4SLinus Torvalds tcp_ack(sk, skb, FLAG_SLOWPATH); 48891da177e4SLinus Torvalds 48901da177e4SLinus Torvalds /* Ok.. it's good. Set up sequence numbers and 48911da177e4SLinus Torvalds * move to established. 48921da177e4SLinus Torvalds */ 48931da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 48941da177e4SLinus Torvalds tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 48951da177e4SLinus Torvalds 48961da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is 48971da177e4SLinus Torvalds * never scaled. 48981da177e4SLinus Torvalds */ 48991da177e4SLinus Torvalds tp->snd_wnd = ntohs(th->window); 49001da177e4SLinus Torvalds tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); 49011da177e4SLinus Torvalds 49021da177e4SLinus Torvalds if (!tp->rx_opt.wscale_ok) { 49031da177e4SLinus Torvalds tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 49041da177e4SLinus Torvalds tp->window_clamp = min(tp->window_clamp, 65535U); 49051da177e4SLinus Torvalds } 49061da177e4SLinus Torvalds 49071da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp) { 49081da177e4SLinus Torvalds tp->rx_opt.tstamp_ok = 1; 49091da177e4SLinus Torvalds tp->tcp_header_len = 49101da177e4SLinus Torvalds sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 49111da177e4SLinus Torvalds tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 49121da177e4SLinus Torvalds tcp_store_ts_recent(tp); 49131da177e4SLinus Torvalds } else { 49141da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr); 49151da177e4SLinus Torvalds } 49161da177e4SLinus Torvalds 4917e60402d0SIlpo Järvinen if (tcp_is_sack(tp) && sysctl_tcp_fack) 4918e60402d0SIlpo Järvinen tcp_enable_fack(tp); 49191da177e4SLinus Torvalds 49205d424d5aSJohn Heffner tcp_mtup_init(sk); 4921d83d8461SArnaldo Carvalho de Melo tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 49221da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 49231da177e4SLinus Torvalds 49241da177e4SLinus Torvalds /* Remember, tcp_poll() does not lock socket! 49251da177e4SLinus Torvalds * Change state from SYN-SENT only after copied_seq 49261da177e4SLinus Torvalds * is initialized. */ 49271da177e4SLinus Torvalds tp->copied_seq = tp->rcv_nxt; 4928e16aa207SRalf Baechle smp_mb(); 49291da177e4SLinus Torvalds tcp_set_state(sk, TCP_ESTABLISHED); 49301da177e4SLinus Torvalds 49316b877699SVenkat Yekkirala security_inet_conn_established(sk, skb); 49326b877699SVenkat Yekkirala 49331da177e4SLinus Torvalds /* Make sure socket is routed, for correct metrics. */ 49348292a17aSArnaldo Carvalho de Melo icsk->icsk_af_ops->rebuild_header(sk); 49351da177e4SLinus Torvalds 49361da177e4SLinus Torvalds tcp_init_metrics(sk); 49371da177e4SLinus Torvalds 49386687e988SArnaldo Carvalho de Melo tcp_init_congestion_control(sk); 4939317a76f9SStephen Hemminger 49401da177e4SLinus Torvalds /* Prevent spurious tcp_cwnd_restart() on first data 49411da177e4SLinus Torvalds * packet. 49421da177e4SLinus Torvalds */ 49431da177e4SLinus Torvalds tp->lsndtime = tcp_time_stamp; 49441da177e4SLinus Torvalds 49451da177e4SLinus Torvalds tcp_init_buffer_space(sk); 49461da177e4SLinus Torvalds 49471da177e4SLinus Torvalds if (sock_flag(sk, SOCK_KEEPOPEN)) 4948463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 49491da177e4SLinus Torvalds 49501da177e4SLinus Torvalds if (!tp->rx_opt.snd_wscale) 49511da177e4SLinus Torvalds __tcp_fast_path_on(tp, tp->snd_wnd); 49521da177e4SLinus Torvalds else 49531da177e4SLinus Torvalds tp->pred_flags = 0; 49541da177e4SLinus Torvalds 49551da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) { 49561da177e4SLinus Torvalds sk->sk_state_change(sk); 49578d8ad9d7SPavel Emelyanov sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 49581da177e4SLinus Torvalds } 49591da177e4SLinus Torvalds 4960295f7324SArnaldo Carvalho de Melo if (sk->sk_write_pending || 4961295f7324SArnaldo Carvalho de Melo icsk->icsk_accept_queue.rskq_defer_accept || 4962295f7324SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong) { 49631da177e4SLinus Torvalds /* Save one ACK. Data will be ready after 49641da177e4SLinus Torvalds * several ticks, if write_pending is set. 49651da177e4SLinus Torvalds * 49661da177e4SLinus Torvalds * It may be deleted, but with this feature tcpdumps 49671da177e4SLinus Torvalds * look so _wonderfully_ clever, that I was not able 49681da177e4SLinus Torvalds * to stand against the temptation 8) --ANK 49691da177e4SLinus Torvalds */ 4970463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 4971295f7324SArnaldo Carvalho de Melo icsk->icsk_ack.lrcvtime = tcp_time_stamp; 4972295f7324SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 4973463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 4974463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 49753f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 49763f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 49771da177e4SLinus Torvalds 49781da177e4SLinus Torvalds discard: 49791da177e4SLinus Torvalds __kfree_skb(skb); 49801da177e4SLinus Torvalds return 0; 49811da177e4SLinus Torvalds } else { 49821da177e4SLinus Torvalds tcp_send_ack(sk); 49831da177e4SLinus Torvalds } 49841da177e4SLinus Torvalds return -1; 49851da177e4SLinus Torvalds } 49861da177e4SLinus Torvalds 49871da177e4SLinus Torvalds /* No ACK in the segment */ 49881da177e4SLinus Torvalds 49891da177e4SLinus Torvalds if (th->rst) { 49901da177e4SLinus Torvalds /* rfc793: 49911da177e4SLinus Torvalds * "If the RST bit is set 49921da177e4SLinus Torvalds * 49931da177e4SLinus Torvalds * Otherwise (no ACK) drop the segment and return." 49941da177e4SLinus Torvalds */ 49951da177e4SLinus Torvalds 49961da177e4SLinus Torvalds goto discard_and_undo; 49971da177e4SLinus Torvalds } 49981da177e4SLinus Torvalds 49991da177e4SLinus Torvalds /* PAWS check. */ 5000056834d9SIlpo Järvinen if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5001056834d9SIlpo Järvinen tcp_paws_check(&tp->rx_opt, 0)) 50021da177e4SLinus Torvalds goto discard_and_undo; 50031da177e4SLinus Torvalds 50041da177e4SLinus Torvalds if (th->syn) { 50051da177e4SLinus Torvalds /* We see SYN without ACK. It is attempt of 50061da177e4SLinus Torvalds * simultaneous connect with crossed SYNs. 50071da177e4SLinus Torvalds * Particularly, it can be connect to self. 50081da177e4SLinus Torvalds */ 50091da177e4SLinus Torvalds tcp_set_state(sk, TCP_SYN_RECV); 50101da177e4SLinus Torvalds 50111da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp) { 50121da177e4SLinus Torvalds tp->rx_opt.tstamp_ok = 1; 50131da177e4SLinus Torvalds tcp_store_ts_recent(tp); 50141da177e4SLinus Torvalds tp->tcp_header_len = 50151da177e4SLinus Torvalds sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 50161da177e4SLinus Torvalds } else { 50171da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr); 50181da177e4SLinus Torvalds } 50191da177e4SLinus Torvalds 50201da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 50211da177e4SLinus Torvalds tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 50221da177e4SLinus Torvalds 50231da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is 50241da177e4SLinus Torvalds * never scaled. 50251da177e4SLinus Torvalds */ 50261da177e4SLinus Torvalds tp->snd_wnd = ntohs(th->window); 50271da177e4SLinus Torvalds tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 50281da177e4SLinus Torvalds tp->max_window = tp->snd_wnd; 50291da177e4SLinus Torvalds 50301da177e4SLinus Torvalds TCP_ECN_rcv_syn(tp, th); 50311da177e4SLinus Torvalds 50325d424d5aSJohn Heffner tcp_mtup_init(sk); 5033d83d8461SArnaldo Carvalho de Melo tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 50341da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 50351da177e4SLinus Torvalds 50361da177e4SLinus Torvalds tcp_send_synack(sk); 50371da177e4SLinus Torvalds #if 0 50381da177e4SLinus Torvalds /* Note, we could accept data and URG from this segment. 50391da177e4SLinus Torvalds * There are no obstacles to make this. 50401da177e4SLinus Torvalds * 50411da177e4SLinus Torvalds * However, if we ignore data in ACKless segments sometimes, 50421da177e4SLinus Torvalds * we have no reasons to accept it sometimes. 50431da177e4SLinus Torvalds * Also, seems the code doing it in step6 of tcp_rcv_state_process 50441da177e4SLinus Torvalds * is not flawless. So, discard packet for sanity. 50451da177e4SLinus Torvalds * Uncomment this return to process the data. 50461da177e4SLinus Torvalds */ 50471da177e4SLinus Torvalds return -1; 50481da177e4SLinus Torvalds #else 50491da177e4SLinus Torvalds goto discard; 50501da177e4SLinus Torvalds #endif 50511da177e4SLinus Torvalds } 50521da177e4SLinus Torvalds /* "fifth, if neither of the SYN or RST bits is set then 50531da177e4SLinus Torvalds * drop the segment and return." 50541da177e4SLinus Torvalds */ 50551da177e4SLinus Torvalds 50561da177e4SLinus Torvalds discard_and_undo: 50571da177e4SLinus Torvalds tcp_clear_options(&tp->rx_opt); 50581da177e4SLinus Torvalds tp->rx_opt.mss_clamp = saved_clamp; 50591da177e4SLinus Torvalds goto discard; 50601da177e4SLinus Torvalds 50611da177e4SLinus Torvalds reset_and_undo: 50621da177e4SLinus Torvalds tcp_clear_options(&tp->rx_opt); 50631da177e4SLinus Torvalds tp->rx_opt.mss_clamp = saved_clamp; 50641da177e4SLinus Torvalds return 1; 50651da177e4SLinus Torvalds } 50661da177e4SLinus Torvalds 50671da177e4SLinus Torvalds /* 50681da177e4SLinus Torvalds * This function implements the receiving procedure of RFC 793 for 50691da177e4SLinus Torvalds * all states except ESTABLISHED and TIME_WAIT. 50701da177e4SLinus Torvalds * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 50711da177e4SLinus Torvalds * address independent. 50721da177e4SLinus Torvalds */ 50731da177e4SLinus Torvalds 50741da177e4SLinus Torvalds int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 50751da177e4SLinus Torvalds struct tcphdr *th, unsigned len) 50761da177e4SLinus Torvalds { 50771da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 50788292a17aSArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 50791da177e4SLinus Torvalds int queued = 0; 50801da177e4SLinus Torvalds 50811da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 0; 50821da177e4SLinus Torvalds 50831da177e4SLinus Torvalds switch (sk->sk_state) { 50841da177e4SLinus Torvalds case TCP_CLOSE: 50851da177e4SLinus Torvalds goto discard; 50861da177e4SLinus Torvalds 50871da177e4SLinus Torvalds case TCP_LISTEN: 50881da177e4SLinus Torvalds if (th->ack) 50891da177e4SLinus Torvalds return 1; 50901da177e4SLinus Torvalds 50911da177e4SLinus Torvalds if (th->rst) 50921da177e4SLinus Torvalds goto discard; 50931da177e4SLinus Torvalds 50941da177e4SLinus Torvalds if (th->syn) { 50958292a17aSArnaldo Carvalho de Melo if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 50961da177e4SLinus Torvalds return 1; 50971da177e4SLinus Torvalds 50981da177e4SLinus Torvalds /* Now we have several options: In theory there is 50991da177e4SLinus Torvalds * nothing else in the frame. KA9Q has an option to 51001da177e4SLinus Torvalds * send data with the syn, BSD accepts data with the 51011da177e4SLinus Torvalds * syn up to the [to be] advertised window and 51021da177e4SLinus Torvalds * Solaris 2.1 gives you a protocol error. For now 51031da177e4SLinus Torvalds * we just ignore it, that fits the spec precisely 51041da177e4SLinus Torvalds * and avoids incompatibilities. It would be nice in 51051da177e4SLinus Torvalds * future to drop through and process the data. 51061da177e4SLinus Torvalds * 51071da177e4SLinus Torvalds * Now that TTCP is starting to be used we ought to 51081da177e4SLinus Torvalds * queue this data. 51091da177e4SLinus Torvalds * But, this leaves one open to an easy denial of 51101da177e4SLinus Torvalds * service attack, and SYN cookies can't defend 51111da177e4SLinus Torvalds * against this problem. So, we drop the data 5112fb7e2399SMasayuki Nakagawa * in the interest of security over speed unless 5113fb7e2399SMasayuki Nakagawa * it's still in use. 51141da177e4SLinus Torvalds */ 5115fb7e2399SMasayuki Nakagawa kfree_skb(skb); 5116fb7e2399SMasayuki Nakagawa return 0; 51171da177e4SLinus Torvalds } 51181da177e4SLinus Torvalds goto discard; 51191da177e4SLinus Torvalds 51201da177e4SLinus Torvalds case TCP_SYN_SENT: 51211da177e4SLinus Torvalds queued = tcp_rcv_synsent_state_process(sk, skb, th, len); 51221da177e4SLinus Torvalds if (queued >= 0) 51231da177e4SLinus Torvalds return queued; 51241da177e4SLinus Torvalds 51251da177e4SLinus Torvalds /* Do step6 onward by hand. */ 51261da177e4SLinus Torvalds tcp_urg(sk, skb, th); 51271da177e4SLinus Torvalds __kfree_skb(skb); 51289e412ba7SIlpo Järvinen tcp_data_snd_check(sk); 51291da177e4SLinus Torvalds return 0; 51301da177e4SLinus Torvalds } 51311da177e4SLinus Torvalds 51321da177e4SLinus Torvalds if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5133463c84b9SArnaldo Carvalho de Melo tcp_paws_discard(sk, skb)) { 51341da177e4SLinus Torvalds if (!th->rst) { 51351da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 51361da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 51371da177e4SLinus Torvalds goto discard; 51381da177e4SLinus Torvalds } 51391da177e4SLinus Torvalds /* Reset is accepted even if it did not pass PAWS. */ 51401da177e4SLinus Torvalds } 51411da177e4SLinus Torvalds 51421da177e4SLinus Torvalds /* step 1: check sequence number */ 51431da177e4SLinus Torvalds if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 51441da177e4SLinus Torvalds if (!th->rst) 51451da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 51461da177e4SLinus Torvalds goto discard; 51471da177e4SLinus Torvalds } 51481da177e4SLinus Torvalds 51491da177e4SLinus Torvalds /* step 2: check RST bit */ 51501da177e4SLinus Torvalds if (th->rst) { 51511da177e4SLinus Torvalds tcp_reset(sk); 51521da177e4SLinus Torvalds goto discard; 51531da177e4SLinus Torvalds } 51541da177e4SLinus Torvalds 51551da177e4SLinus Torvalds tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 51561da177e4SLinus Torvalds 51571da177e4SLinus Torvalds /* step 3: check security and precedence [ignored] */ 51581da177e4SLinus Torvalds 51591da177e4SLinus Torvalds /* step 4: 51601da177e4SLinus Torvalds * 51611da177e4SLinus Torvalds * Check for a SYN in window. 51621da177e4SLinus Torvalds */ 51631da177e4SLinus Torvalds if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 51641da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 51651da177e4SLinus Torvalds tcp_reset(sk); 51661da177e4SLinus Torvalds return 1; 51671da177e4SLinus Torvalds } 51681da177e4SLinus Torvalds 51691da177e4SLinus Torvalds /* step 5: check the ACK field */ 51701da177e4SLinus Torvalds if (th->ack) { 51711da177e4SLinus Torvalds int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH); 51721da177e4SLinus Torvalds 51731da177e4SLinus Torvalds switch (sk->sk_state) { 51741da177e4SLinus Torvalds case TCP_SYN_RECV: 51751da177e4SLinus Torvalds if (acceptable) { 51761da177e4SLinus Torvalds tp->copied_seq = tp->rcv_nxt; 5177e16aa207SRalf Baechle smp_mb(); 51781da177e4SLinus Torvalds tcp_set_state(sk, TCP_ESTABLISHED); 51791da177e4SLinus Torvalds sk->sk_state_change(sk); 51801da177e4SLinus Torvalds 51811da177e4SLinus Torvalds /* Note, that this wakeup is only for marginal 51821da177e4SLinus Torvalds * crossed SYN case. Passively open sockets 51831da177e4SLinus Torvalds * are not waked up, because sk->sk_sleep == 51841da177e4SLinus Torvalds * NULL and sk->sk_socket == NULL. 51851da177e4SLinus Torvalds */ 51868d8ad9d7SPavel Emelyanov if (sk->sk_socket) 51878d8ad9d7SPavel Emelyanov sk_wake_async(sk, 51888d8ad9d7SPavel Emelyanov SOCK_WAKE_IO, POLL_OUT); 51891da177e4SLinus Torvalds 51901da177e4SLinus Torvalds tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 51911da177e4SLinus Torvalds tp->snd_wnd = ntohs(th->window) << 51921da177e4SLinus Torvalds tp->rx_opt.snd_wscale; 51931da177e4SLinus Torvalds tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, 51941da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq); 51951da177e4SLinus Torvalds 51961da177e4SLinus Torvalds /* tcp_ack considers this ACK as duplicate 51971da177e4SLinus Torvalds * and does not calculate rtt. 51981da177e4SLinus Torvalds * Fix it at least with timestamps. 51991da177e4SLinus Torvalds */ 5200056834d9SIlpo Järvinen if (tp->rx_opt.saw_tstamp && 5201056834d9SIlpo Järvinen tp->rx_opt.rcv_tsecr && !tp->srtt) 52022d2abbabSStephen Hemminger tcp_ack_saw_tstamp(sk, 0); 52031da177e4SLinus Torvalds 52041da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 52051da177e4SLinus Torvalds tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 52061da177e4SLinus Torvalds 52071da177e4SLinus Torvalds /* Make sure socket is routed, for 52081da177e4SLinus Torvalds * correct metrics. 52091da177e4SLinus Torvalds */ 52108292a17aSArnaldo Carvalho de Melo icsk->icsk_af_ops->rebuild_header(sk); 52111da177e4SLinus Torvalds 52121da177e4SLinus Torvalds tcp_init_metrics(sk); 52131da177e4SLinus Torvalds 52146687e988SArnaldo Carvalho de Melo tcp_init_congestion_control(sk); 5215317a76f9SStephen Hemminger 52161da177e4SLinus Torvalds /* Prevent spurious tcp_cwnd_restart() on 52171da177e4SLinus Torvalds * first data packet. 52181da177e4SLinus Torvalds */ 52191da177e4SLinus Torvalds tp->lsndtime = tcp_time_stamp; 52201da177e4SLinus Torvalds 52215d424d5aSJohn Heffner tcp_mtup_init(sk); 52221da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 52231da177e4SLinus Torvalds tcp_init_buffer_space(sk); 52241da177e4SLinus Torvalds tcp_fast_path_on(tp); 52251da177e4SLinus Torvalds } else { 52261da177e4SLinus Torvalds return 1; 52271da177e4SLinus Torvalds } 52281da177e4SLinus Torvalds break; 52291da177e4SLinus Torvalds 52301da177e4SLinus Torvalds case TCP_FIN_WAIT1: 52311da177e4SLinus Torvalds if (tp->snd_una == tp->write_seq) { 52321da177e4SLinus Torvalds tcp_set_state(sk, TCP_FIN_WAIT2); 52331da177e4SLinus Torvalds sk->sk_shutdown |= SEND_SHUTDOWN; 52341da177e4SLinus Torvalds dst_confirm(sk->sk_dst_cache); 52351da177e4SLinus Torvalds 52361da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) 52371da177e4SLinus Torvalds /* Wake up lingering close() */ 52381da177e4SLinus Torvalds sk->sk_state_change(sk); 52391da177e4SLinus Torvalds else { 52401da177e4SLinus Torvalds int tmo; 52411da177e4SLinus Torvalds 52421da177e4SLinus Torvalds if (tp->linger2 < 0 || 52431da177e4SLinus Torvalds (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 52441da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 52451da177e4SLinus Torvalds tcp_done(sk); 52461da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 52471da177e4SLinus Torvalds return 1; 52481da177e4SLinus Torvalds } 52491da177e4SLinus Torvalds 5250463c84b9SArnaldo Carvalho de Melo tmo = tcp_fin_time(sk); 52511da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 5252463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 52531da177e4SLinus Torvalds } else if (th->fin || sock_owned_by_user(sk)) { 52541da177e4SLinus Torvalds /* Bad case. We could lose such FIN otherwise. 52551da177e4SLinus Torvalds * It is not a big problem, but it looks confusing 52561da177e4SLinus Torvalds * and not so rare event. We still can lose it now, 52571da177e4SLinus Torvalds * if it spins in bh_lock_sock(), but it is really 52581da177e4SLinus Torvalds * marginal case. 52591da177e4SLinus Torvalds */ 5260463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(sk, tmo); 52611da177e4SLinus Torvalds } else { 52621da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 52631da177e4SLinus Torvalds goto discard; 52641da177e4SLinus Torvalds } 52651da177e4SLinus Torvalds } 52661da177e4SLinus Torvalds } 52671da177e4SLinus Torvalds break; 52681da177e4SLinus Torvalds 52691da177e4SLinus Torvalds case TCP_CLOSING: 52701da177e4SLinus Torvalds if (tp->snd_una == tp->write_seq) { 52711da177e4SLinus Torvalds tcp_time_wait(sk, TCP_TIME_WAIT, 0); 52721da177e4SLinus Torvalds goto discard; 52731da177e4SLinus Torvalds } 52741da177e4SLinus Torvalds break; 52751da177e4SLinus Torvalds 52761da177e4SLinus Torvalds case TCP_LAST_ACK: 52771da177e4SLinus Torvalds if (tp->snd_una == tp->write_seq) { 52781da177e4SLinus Torvalds tcp_update_metrics(sk); 52791da177e4SLinus Torvalds tcp_done(sk); 52801da177e4SLinus Torvalds goto discard; 52811da177e4SLinus Torvalds } 52821da177e4SLinus Torvalds break; 52831da177e4SLinus Torvalds } 52841da177e4SLinus Torvalds } else 52851da177e4SLinus Torvalds goto discard; 52861da177e4SLinus Torvalds 52871da177e4SLinus Torvalds /* step 6: check the URG bit */ 52881da177e4SLinus Torvalds tcp_urg(sk, skb, th); 52891da177e4SLinus Torvalds 52901da177e4SLinus Torvalds /* step 7: process the segment text */ 52911da177e4SLinus Torvalds switch (sk->sk_state) { 52921da177e4SLinus Torvalds case TCP_CLOSE_WAIT: 52931da177e4SLinus Torvalds case TCP_CLOSING: 52941da177e4SLinus Torvalds case TCP_LAST_ACK: 52951da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 52961da177e4SLinus Torvalds break; 52971da177e4SLinus Torvalds case TCP_FIN_WAIT1: 52981da177e4SLinus Torvalds case TCP_FIN_WAIT2: 52991da177e4SLinus Torvalds /* RFC 793 says to queue data in these states, 53001da177e4SLinus Torvalds * RFC 1122 says we MUST send a reset. 53011da177e4SLinus Torvalds * BSD 4.4 also does reset. 53021da177e4SLinus Torvalds */ 53031da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) { 53041da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 53051da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 53061da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 53071da177e4SLinus Torvalds tcp_reset(sk); 53081da177e4SLinus Torvalds return 1; 53091da177e4SLinus Torvalds } 53101da177e4SLinus Torvalds } 53111da177e4SLinus Torvalds /* Fall through */ 53121da177e4SLinus Torvalds case TCP_ESTABLISHED: 53131da177e4SLinus Torvalds tcp_data_queue(sk, skb); 53141da177e4SLinus Torvalds queued = 1; 53151da177e4SLinus Torvalds break; 53161da177e4SLinus Torvalds } 53171da177e4SLinus Torvalds 53181da177e4SLinus Torvalds /* tcp_data could move socket to TIME-WAIT */ 53191da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 53209e412ba7SIlpo Järvinen tcp_data_snd_check(sk); 53211da177e4SLinus Torvalds tcp_ack_snd_check(sk); 53221da177e4SLinus Torvalds } 53231da177e4SLinus Torvalds 53241da177e4SLinus Torvalds if (!queued) { 53251da177e4SLinus Torvalds discard: 53261da177e4SLinus Torvalds __kfree_skb(skb); 53271da177e4SLinus Torvalds } 53281da177e4SLinus Torvalds return 0; 53291da177e4SLinus Torvalds } 53301da177e4SLinus Torvalds 53311da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_ecn); 53321da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_reordering); 53331da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_parse_options); 53341da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_rcv_established); 53351da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_rcv_state_process); 533640efc6faSStephen Hemminger EXPORT_SYMBOL(tcp_initialize_rcv_mss); 5337