11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Version: $Id: tcp_input.c,v 1.243 2002/02/01 22:01:04 davem Exp $ 91da177e4SLinus Torvalds * 1002c30a84SJesper Juhl * Authors: Ross Biro 111da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 121da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 131da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 141da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 151da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 161da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 171da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 181da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 191da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 201da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 211da177e4SLinus Torvalds */ 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds /* 241da177e4SLinus Torvalds * Changes: 251da177e4SLinus Torvalds * Pedro Roque : Fast Retransmit/Recovery. 261da177e4SLinus Torvalds * Two receive queues. 271da177e4SLinus Torvalds * Retransmit queue handled by TCP. 281da177e4SLinus Torvalds * Better retransmit timer handling. 291da177e4SLinus Torvalds * New congestion avoidance. 301da177e4SLinus Torvalds * Header prediction. 311da177e4SLinus Torvalds * Variable renaming. 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Eric : Fast Retransmit. 341da177e4SLinus Torvalds * Randy Scott : MSS option defines. 351da177e4SLinus Torvalds * Eric Schenk : Fixes to slow start algorithm. 361da177e4SLinus Torvalds * Eric Schenk : Yet another double ACK bug. 371da177e4SLinus Torvalds * Eric Schenk : Delayed ACK bug fixes. 381da177e4SLinus Torvalds * Eric Schenk : Floyd style fast retrans war avoidance. 391da177e4SLinus Torvalds * David S. Miller : Don't allow zero congestion window. 401da177e4SLinus Torvalds * Eric Schenk : Fix retransmitter so that it sends 411da177e4SLinus Torvalds * next packet on ack of previous packet. 421da177e4SLinus Torvalds * Andi Kleen : Moved open_request checking here 431da177e4SLinus Torvalds * and process RSTs for open_requests. 441da177e4SLinus Torvalds * Andi Kleen : Better prune_queue, and other fixes. 45caa20d9aSStephen Hemminger * Andrey Savochkin: Fix RTT measurements in the presence of 461da177e4SLinus Torvalds * timestamps. 471da177e4SLinus Torvalds * Andrey Savochkin: Check sequence numbers correctly when 481da177e4SLinus Torvalds * removing SACKs due to in sequence incoming 491da177e4SLinus Torvalds * data segments. 501da177e4SLinus Torvalds * Andi Kleen: Make sure we never ack data there is not 511da177e4SLinus Torvalds * enough room for. Also make this condition 521da177e4SLinus Torvalds * a fatal error if it might still happen. 531da177e4SLinus Torvalds * Andi Kleen: Add tcp_measure_rcv_mss to make 541da177e4SLinus Torvalds * connections with MSS<min(MTU,ann. MSS) 551da177e4SLinus Torvalds * work without delayed acks. 561da177e4SLinus Torvalds * Andi Kleen: Process packets with PSH set in the 571da177e4SLinus Torvalds * fast path. 581da177e4SLinus Torvalds * J Hadi Salim: ECN support 591da177e4SLinus Torvalds * Andrei Gurtov, 601da177e4SLinus Torvalds * Pasi Sarolahti, 611da177e4SLinus Torvalds * Panu Kuhlberg: Experimental audit of TCP (re)transmission 621da177e4SLinus Torvalds * engine. Lots of bugs are found. 631da177e4SLinus Torvalds * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 641da177e4SLinus Torvalds */ 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds #include <linux/mm.h> 671da177e4SLinus Torvalds #include <linux/module.h> 681da177e4SLinus Torvalds #include <linux/sysctl.h> 691da177e4SLinus Torvalds #include <net/tcp.h> 701da177e4SLinus Torvalds #include <net/inet_common.h> 711da177e4SLinus Torvalds #include <linux/ipsec.h> 721da177e4SLinus Torvalds #include <asm/unaligned.h> 731a2449a8SChris Leech #include <net/netdma.h> 741da177e4SLinus Torvalds 75ab32ea5dSBrian Haley int sysctl_tcp_timestamps __read_mostly = 1; 76ab32ea5dSBrian Haley int sysctl_tcp_window_scaling __read_mostly = 1; 77ab32ea5dSBrian Haley int sysctl_tcp_sack __read_mostly = 1; 78ab32ea5dSBrian Haley int sysctl_tcp_fack __read_mostly = 1; 79ab32ea5dSBrian Haley int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 80ab32ea5dSBrian Haley int sysctl_tcp_ecn __read_mostly; 81ab32ea5dSBrian Haley int sysctl_tcp_dsack __read_mostly = 1; 82ab32ea5dSBrian Haley int sysctl_tcp_app_win __read_mostly = 31; 83ab32ea5dSBrian Haley int sysctl_tcp_adv_win_scale __read_mostly = 2; 841da177e4SLinus Torvalds 85ab32ea5dSBrian Haley int sysctl_tcp_stdurg __read_mostly; 86ab32ea5dSBrian Haley int sysctl_tcp_rfc1337 __read_mostly; 87ab32ea5dSBrian Haley int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 88ab32ea5dSBrian Haley int sysctl_tcp_frto __read_mostly; 89ab32ea5dSBrian Haley int sysctl_tcp_nometrics_save __read_mostly; 901da177e4SLinus Torvalds 91ab32ea5dSBrian Haley int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 92ab32ea5dSBrian Haley int sysctl_tcp_abc __read_mostly; 931da177e4SLinus Torvalds 941da177e4SLinus Torvalds #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 951da177e4SLinus Torvalds #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 961da177e4SLinus Torvalds #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 971da177e4SLinus Torvalds #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 981da177e4SLinus Torvalds #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 991da177e4SLinus Torvalds #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 1001da177e4SLinus Torvalds #define FLAG_ECE 0x40 /* ECE in this ACK */ 1011da177e4SLinus Torvalds #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ 1021da177e4SLinus Torvalds #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 1031da177e4SLinus Torvalds 1041da177e4SLinus Torvalds #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 1051da177e4SLinus Torvalds #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 1061da177e4SLinus Torvalds #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 1071da177e4SLinus Torvalds #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 1081da177e4SLinus Torvalds 1091da177e4SLinus Torvalds #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) 1101da177e4SLinus Torvalds #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) 1111da177e4SLinus Torvalds #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4) 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 1141da177e4SLinus Torvalds 1151da177e4SLinus Torvalds /* Adapt the MSS value used to make delayed ack decision to the 1161da177e4SLinus Torvalds * real world. 1171da177e4SLinus Torvalds */ 11840efc6faSStephen Hemminger static void tcp_measure_rcv_mss(struct sock *sk, 119463c84b9SArnaldo Carvalho de Melo const struct sk_buff *skb) 1201da177e4SLinus Torvalds { 121463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 122463c84b9SArnaldo Carvalho de Melo const unsigned int lss = icsk->icsk_ack.last_seg_size; 123463c84b9SArnaldo Carvalho de Melo unsigned int len; 1241da177e4SLinus Torvalds 125463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.last_seg_size = 0; 1261da177e4SLinus Torvalds 1271da177e4SLinus Torvalds /* skb->len may jitter because of SACKs, even if peer 1281da177e4SLinus Torvalds * sends good full-sized frames. 1291da177e4SLinus Torvalds */ 130ff9b5e0fSHerbert Xu len = skb_shinfo(skb)->gso_size ?: skb->len; 131463c84b9SArnaldo Carvalho de Melo if (len >= icsk->icsk_ack.rcv_mss) { 132463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.rcv_mss = len; 1331da177e4SLinus Torvalds } else { 1341da177e4SLinus Torvalds /* Otherwise, we make more careful check taking into account, 1351da177e4SLinus Torvalds * that SACKs block is variable. 1361da177e4SLinus Torvalds * 1371da177e4SLinus Torvalds * "len" is invariant segment length, including TCP header. 1381da177e4SLinus Torvalds */ 1391da177e4SLinus Torvalds len += skb->data - skb->h.raw; 1401da177e4SLinus Torvalds if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || 1411da177e4SLinus Torvalds /* If PSH is not set, packet should be 1421da177e4SLinus Torvalds * full sized, provided peer TCP is not badly broken. 1431da177e4SLinus Torvalds * This observation (if it is correct 8)) allows 1441da177e4SLinus Torvalds * to handle super-low mtu links fairly. 1451da177e4SLinus Torvalds */ 1461da177e4SLinus Torvalds (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 1471da177e4SLinus Torvalds !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) { 1481da177e4SLinus Torvalds /* Subtract also invariant (if peer is RFC compliant), 1491da177e4SLinus Torvalds * tcp header plus fixed timestamp option length. 1501da177e4SLinus Torvalds * Resulting "len" is MSS free of SACK jitter. 1511da177e4SLinus Torvalds */ 152463c84b9SArnaldo Carvalho de Melo len -= tcp_sk(sk)->tcp_header_len; 153463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.last_seg_size = len; 1541da177e4SLinus Torvalds if (len == lss) { 155463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.rcv_mss = len; 1561da177e4SLinus Torvalds return; 1571da177e4SLinus Torvalds } 1581da177e4SLinus Torvalds } 1591ef9696cSAlexey Kuznetsov if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 1601ef9696cSAlexey Kuznetsov icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 161463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 1621da177e4SLinus Torvalds } 1631da177e4SLinus Torvalds } 1641da177e4SLinus Torvalds 165463c84b9SArnaldo Carvalho de Melo static void tcp_incr_quickack(struct sock *sk) 1661da177e4SLinus Torvalds { 167463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 168463c84b9SArnaldo Carvalho de Melo unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 1691da177e4SLinus Torvalds 1701da177e4SLinus Torvalds if (quickacks==0) 1711da177e4SLinus Torvalds quickacks=2; 172463c84b9SArnaldo Carvalho de Melo if (quickacks > icsk->icsk_ack.quick) 173463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 1741da177e4SLinus Torvalds } 1751da177e4SLinus Torvalds 176463c84b9SArnaldo Carvalho de Melo void tcp_enter_quickack_mode(struct sock *sk) 1771da177e4SLinus Torvalds { 178463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 179463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 180463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong = 0; 181463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 1821da177e4SLinus Torvalds } 1831da177e4SLinus Torvalds 1841da177e4SLinus Torvalds /* Send ACKs quickly, if "quick" count is not exhausted 1851da177e4SLinus Torvalds * and the session is not interactive. 1861da177e4SLinus Torvalds */ 1871da177e4SLinus Torvalds 188463c84b9SArnaldo Carvalho de Melo static inline int tcp_in_quickack_mode(const struct sock *sk) 1891da177e4SLinus Torvalds { 190463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 191463c84b9SArnaldo Carvalho de Melo return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 1921da177e4SLinus Torvalds } 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds /* Buffer size and advertised window tuning. 1951da177e4SLinus Torvalds * 1961da177e4SLinus Torvalds * 1. Tuning sk->sk_sndbuf, when connection enters established state. 1971da177e4SLinus Torvalds */ 1981da177e4SLinus Torvalds 1991da177e4SLinus Torvalds static void tcp_fixup_sndbuf(struct sock *sk) 2001da177e4SLinus Torvalds { 2011da177e4SLinus Torvalds int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 2021da177e4SLinus Torvalds sizeof(struct sk_buff); 2031da177e4SLinus Torvalds 2041da177e4SLinus Torvalds if (sk->sk_sndbuf < 3 * sndmem) 2051da177e4SLinus Torvalds sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 2061da177e4SLinus Torvalds } 2071da177e4SLinus Torvalds 2081da177e4SLinus Torvalds /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 2091da177e4SLinus Torvalds * 2101da177e4SLinus Torvalds * All tcp_full_space() is split to two parts: "network" buffer, allocated 2111da177e4SLinus Torvalds * forward and advertised in receiver window (tp->rcv_wnd) and 2121da177e4SLinus Torvalds * "application buffer", required to isolate scheduling/application 2131da177e4SLinus Torvalds * latencies from network. 2141da177e4SLinus Torvalds * window_clamp is maximal advertised window. It can be less than 2151da177e4SLinus Torvalds * tcp_full_space(), in this case tcp_full_space() - window_clamp 2161da177e4SLinus Torvalds * is reserved for "application" buffer. The less window_clamp is 2171da177e4SLinus Torvalds * the smoother our behaviour from viewpoint of network, but the lower 2181da177e4SLinus Torvalds * throughput and the higher sensitivity of the connection to losses. 8) 2191da177e4SLinus Torvalds * 2201da177e4SLinus Torvalds * rcv_ssthresh is more strict window_clamp used at "slow start" 2211da177e4SLinus Torvalds * phase to predict further behaviour of this connection. 2221da177e4SLinus Torvalds * It is used for two goals: 2231da177e4SLinus Torvalds * - to enforce header prediction at sender, even when application 2241da177e4SLinus Torvalds * requires some significant "application buffer". It is check #1. 2251da177e4SLinus Torvalds * - to prevent pruning of receive queue because of misprediction 2261da177e4SLinus Torvalds * of receiver window. Check #2. 2271da177e4SLinus Torvalds * 2281da177e4SLinus Torvalds * The scheme does not work when sender sends good segments opening 229caa20d9aSStephen Hemminger * window and then starts to feed us spaghetti. But it should work 2301da177e4SLinus Torvalds * in common situations. Otherwise, we have to rely on queue collapsing. 2311da177e4SLinus Torvalds */ 2321da177e4SLinus Torvalds 2331da177e4SLinus Torvalds /* Slow part of check#2. */ 234463c84b9SArnaldo Carvalho de Melo static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, 235463c84b9SArnaldo Carvalho de Melo const struct sk_buff *skb) 2361da177e4SLinus Torvalds { 2371da177e4SLinus Torvalds /* Optimize this! */ 2381da177e4SLinus Torvalds int truesize = tcp_win_from_space(skb->truesize)/2; 239326f36e9SJohn Heffner int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; 2401da177e4SLinus Torvalds 2411da177e4SLinus Torvalds while (tp->rcv_ssthresh <= window) { 2421da177e4SLinus Torvalds if (truesize <= skb->len) 243463c84b9SArnaldo Carvalho de Melo return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds truesize >>= 1; 2461da177e4SLinus Torvalds window >>= 1; 2471da177e4SLinus Torvalds } 2481da177e4SLinus Torvalds return 0; 2491da177e4SLinus Torvalds } 2501da177e4SLinus Torvalds 25140efc6faSStephen Hemminger static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, 2521da177e4SLinus Torvalds struct sk_buff *skb) 2531da177e4SLinus Torvalds { 2541da177e4SLinus Torvalds /* Check #1 */ 2551da177e4SLinus Torvalds if (tp->rcv_ssthresh < tp->window_clamp && 2561da177e4SLinus Torvalds (int)tp->rcv_ssthresh < tcp_space(sk) && 2571da177e4SLinus Torvalds !tcp_memory_pressure) { 2581da177e4SLinus Torvalds int incr; 2591da177e4SLinus Torvalds 2601da177e4SLinus Torvalds /* Check #2. Increase window, if skb with such overhead 2611da177e4SLinus Torvalds * will fit to rcvbuf in future. 2621da177e4SLinus Torvalds */ 2631da177e4SLinus Torvalds if (tcp_win_from_space(skb->truesize) <= skb->len) 2641da177e4SLinus Torvalds incr = 2*tp->advmss; 2651da177e4SLinus Torvalds else 2661da177e4SLinus Torvalds incr = __tcp_grow_window(sk, tp, skb); 2671da177e4SLinus Torvalds 2681da177e4SLinus Torvalds if (incr) { 2691da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); 270463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.quick |= 1; 2711da177e4SLinus Torvalds } 2721da177e4SLinus Torvalds } 2731da177e4SLinus Torvalds } 2741da177e4SLinus Torvalds 2751da177e4SLinus Torvalds /* 3. Tuning rcvbuf, when connection enters established state. */ 2761da177e4SLinus Torvalds 2771da177e4SLinus Torvalds static void tcp_fixup_rcvbuf(struct sock *sk) 2781da177e4SLinus Torvalds { 2791da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2801da177e4SLinus Torvalds int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds /* Try to select rcvbuf so that 4 mss-sized segments 283caa20d9aSStephen Hemminger * will fit to window and corresponding skbs will fit to our rcvbuf. 2841da177e4SLinus Torvalds * (was 3; 4 is minimum to allow fast retransmit to work.) 2851da177e4SLinus Torvalds */ 2861da177e4SLinus Torvalds while (tcp_win_from_space(rcvmem) < tp->advmss) 2871da177e4SLinus Torvalds rcvmem += 128; 2881da177e4SLinus Torvalds if (sk->sk_rcvbuf < 4 * rcvmem) 2891da177e4SLinus Torvalds sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 2901da177e4SLinus Torvalds } 2911da177e4SLinus Torvalds 292caa20d9aSStephen Hemminger /* 4. Try to fixup all. It is made immediately after connection enters 2931da177e4SLinus Torvalds * established state. 2941da177e4SLinus Torvalds */ 2951da177e4SLinus Torvalds static void tcp_init_buffer_space(struct sock *sk) 2961da177e4SLinus Torvalds { 2971da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2981da177e4SLinus Torvalds int maxwin; 2991da177e4SLinus Torvalds 3001da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 3011da177e4SLinus Torvalds tcp_fixup_rcvbuf(sk); 3021da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 3031da177e4SLinus Torvalds tcp_fixup_sndbuf(sk); 3041da177e4SLinus Torvalds 3051da177e4SLinus Torvalds tp->rcvq_space.space = tp->rcv_wnd; 3061da177e4SLinus Torvalds 3071da177e4SLinus Torvalds maxwin = tcp_full_space(sk); 3081da177e4SLinus Torvalds 3091da177e4SLinus Torvalds if (tp->window_clamp >= maxwin) { 3101da177e4SLinus Torvalds tp->window_clamp = maxwin; 3111da177e4SLinus Torvalds 3121da177e4SLinus Torvalds if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) 3131da177e4SLinus Torvalds tp->window_clamp = max(maxwin - 3141da177e4SLinus Torvalds (maxwin >> sysctl_tcp_app_win), 3151da177e4SLinus Torvalds 4 * tp->advmss); 3161da177e4SLinus Torvalds } 3171da177e4SLinus Torvalds 3181da177e4SLinus Torvalds /* Force reservation of one segment. */ 3191da177e4SLinus Torvalds if (sysctl_tcp_app_win && 3201da177e4SLinus Torvalds tp->window_clamp > 2 * tp->advmss && 3211da177e4SLinus Torvalds tp->window_clamp + tp->advmss > maxwin) 3221da177e4SLinus Torvalds tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 3231da177e4SLinus Torvalds 3241da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 3251da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 3261da177e4SLinus Torvalds } 3271da177e4SLinus Torvalds 3281da177e4SLinus Torvalds /* 5. Recalculate window clamp after socket hit its memory bounds. */ 3291da177e4SLinus Torvalds static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) 3301da177e4SLinus Torvalds { 3316687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 3321da177e4SLinus Torvalds 3336687e988SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 3341da177e4SLinus Torvalds 3351da177e4SLinus Torvalds if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 3361da177e4SLinus Torvalds !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 3371da177e4SLinus Torvalds !tcp_memory_pressure && 338326f36e9SJohn Heffner atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 3391da177e4SLinus Torvalds sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 3401da177e4SLinus Torvalds sysctl_tcp_rmem[2]); 3411da177e4SLinus Torvalds } 342326f36e9SJohn Heffner if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 3431da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); 3441da177e4SLinus Torvalds } 3451da177e4SLinus Torvalds 34640efc6faSStephen Hemminger 34740efc6faSStephen Hemminger /* Initialize RCV_MSS value. 34840efc6faSStephen Hemminger * RCV_MSS is an our guess about MSS used by the peer. 34940efc6faSStephen Hemminger * We haven't any direct information about the MSS. 35040efc6faSStephen Hemminger * It's better to underestimate the RCV_MSS rather than overestimate. 35140efc6faSStephen Hemminger * Overestimations make us ACKing less frequently than needed. 35240efc6faSStephen Hemminger * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 35340efc6faSStephen Hemminger */ 35440efc6faSStephen Hemminger void tcp_initialize_rcv_mss(struct sock *sk) 35540efc6faSStephen Hemminger { 35640efc6faSStephen Hemminger struct tcp_sock *tp = tcp_sk(sk); 35740efc6faSStephen Hemminger unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 35840efc6faSStephen Hemminger 35940efc6faSStephen Hemminger hint = min(hint, tp->rcv_wnd/2); 36040efc6faSStephen Hemminger hint = min(hint, TCP_MIN_RCVMSS); 36140efc6faSStephen Hemminger hint = max(hint, TCP_MIN_MSS); 36240efc6faSStephen Hemminger 36340efc6faSStephen Hemminger inet_csk(sk)->icsk_ack.rcv_mss = hint; 36440efc6faSStephen Hemminger } 36540efc6faSStephen Hemminger 3661da177e4SLinus Torvalds /* Receiver "autotuning" code. 3671da177e4SLinus Torvalds * 3681da177e4SLinus Torvalds * The algorithm for RTT estimation w/o timestamps is based on 3691da177e4SLinus Torvalds * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 3701da177e4SLinus Torvalds * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps> 3711da177e4SLinus Torvalds * 3721da177e4SLinus Torvalds * More detail on this code can be found at 3731da177e4SLinus Torvalds * <http://www.psc.edu/~jheffner/senior_thesis.ps>, 3741da177e4SLinus Torvalds * though this reference is out of date. A new paper 3751da177e4SLinus Torvalds * is pending. 3761da177e4SLinus Torvalds */ 3771da177e4SLinus Torvalds static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 3781da177e4SLinus Torvalds { 3791da177e4SLinus Torvalds u32 new_sample = tp->rcv_rtt_est.rtt; 3801da177e4SLinus Torvalds long m = sample; 3811da177e4SLinus Torvalds 3821da177e4SLinus Torvalds if (m == 0) 3831da177e4SLinus Torvalds m = 1; 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds if (new_sample != 0) { 3861da177e4SLinus Torvalds /* If we sample in larger samples in the non-timestamp 3871da177e4SLinus Torvalds * case, we could grossly overestimate the RTT especially 3881da177e4SLinus Torvalds * with chatty applications or bulk transfer apps which 3891da177e4SLinus Torvalds * are stalled on filesystem I/O. 3901da177e4SLinus Torvalds * 3911da177e4SLinus Torvalds * Also, since we are only going for a minimum in the 39231f34269SStephen Hemminger * non-timestamp case, we do not smooth things out 393caa20d9aSStephen Hemminger * else with timestamps disabled convergence takes too 3941da177e4SLinus Torvalds * long. 3951da177e4SLinus Torvalds */ 3961da177e4SLinus Torvalds if (!win_dep) { 3971da177e4SLinus Torvalds m -= (new_sample >> 3); 3981da177e4SLinus Torvalds new_sample += m; 3991da177e4SLinus Torvalds } else if (m < new_sample) 4001da177e4SLinus Torvalds new_sample = m << 3; 4011da177e4SLinus Torvalds } else { 402caa20d9aSStephen Hemminger /* No previous measure. */ 4031da177e4SLinus Torvalds new_sample = m << 3; 4041da177e4SLinus Torvalds } 4051da177e4SLinus Torvalds 4061da177e4SLinus Torvalds if (tp->rcv_rtt_est.rtt != new_sample) 4071da177e4SLinus Torvalds tp->rcv_rtt_est.rtt = new_sample; 4081da177e4SLinus Torvalds } 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 4111da177e4SLinus Torvalds { 4121da177e4SLinus Torvalds if (tp->rcv_rtt_est.time == 0) 4131da177e4SLinus Torvalds goto new_measure; 4141da177e4SLinus Torvalds if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 4151da177e4SLinus Torvalds return; 4161da177e4SLinus Torvalds tcp_rcv_rtt_update(tp, 4171da177e4SLinus Torvalds jiffies - tp->rcv_rtt_est.time, 4181da177e4SLinus Torvalds 1); 4191da177e4SLinus Torvalds 4201da177e4SLinus Torvalds new_measure: 4211da177e4SLinus Torvalds tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 4221da177e4SLinus Torvalds tp->rcv_rtt_est.time = tcp_time_stamp; 4231da177e4SLinus Torvalds } 4241da177e4SLinus Torvalds 425463c84b9SArnaldo Carvalho de Melo static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) 4261da177e4SLinus Torvalds { 427463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 4281da177e4SLinus Torvalds if (tp->rx_opt.rcv_tsecr && 4291da177e4SLinus Torvalds (TCP_SKB_CB(skb)->end_seq - 430463c84b9SArnaldo Carvalho de Melo TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 4311da177e4SLinus Torvalds tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds 4341da177e4SLinus Torvalds /* 4351da177e4SLinus Torvalds * This function should be called every time data is copied to user space. 4361da177e4SLinus Torvalds * It calculates the appropriate TCP receive buffer space. 4371da177e4SLinus Torvalds */ 4381da177e4SLinus Torvalds void tcp_rcv_space_adjust(struct sock *sk) 4391da177e4SLinus Torvalds { 4401da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 4411da177e4SLinus Torvalds int time; 4421da177e4SLinus Torvalds int space; 4431da177e4SLinus Torvalds 4441da177e4SLinus Torvalds if (tp->rcvq_space.time == 0) 4451da177e4SLinus Torvalds goto new_measure; 4461da177e4SLinus Torvalds 4471da177e4SLinus Torvalds time = tcp_time_stamp - tp->rcvq_space.time; 4481da177e4SLinus Torvalds if (time < (tp->rcv_rtt_est.rtt >> 3) || 4491da177e4SLinus Torvalds tp->rcv_rtt_est.rtt == 0) 4501da177e4SLinus Torvalds return; 4511da177e4SLinus Torvalds 4521da177e4SLinus Torvalds space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 4531da177e4SLinus Torvalds 4541da177e4SLinus Torvalds space = max(tp->rcvq_space.space, space); 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds if (tp->rcvq_space.space != space) { 4571da177e4SLinus Torvalds int rcvmem; 4581da177e4SLinus Torvalds 4591da177e4SLinus Torvalds tp->rcvq_space.space = space; 4601da177e4SLinus Torvalds 4616fcf9412SJohn Heffner if (sysctl_tcp_moderate_rcvbuf && 4626fcf9412SJohn Heffner !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 4631da177e4SLinus Torvalds int new_clamp = space; 4641da177e4SLinus Torvalds 4651da177e4SLinus Torvalds /* Receive space grows, normalize in order to 4661da177e4SLinus Torvalds * take into account packet headers and sk_buff 4671da177e4SLinus Torvalds * structure overhead. 4681da177e4SLinus Torvalds */ 4691da177e4SLinus Torvalds space /= tp->advmss; 4701da177e4SLinus Torvalds if (!space) 4711da177e4SLinus Torvalds space = 1; 4721da177e4SLinus Torvalds rcvmem = (tp->advmss + MAX_TCP_HEADER + 4731da177e4SLinus Torvalds 16 + sizeof(struct sk_buff)); 4741da177e4SLinus Torvalds while (tcp_win_from_space(rcvmem) < tp->advmss) 4751da177e4SLinus Torvalds rcvmem += 128; 4761da177e4SLinus Torvalds space *= rcvmem; 4771da177e4SLinus Torvalds space = min(space, sysctl_tcp_rmem[2]); 4781da177e4SLinus Torvalds if (space > sk->sk_rcvbuf) { 4791da177e4SLinus Torvalds sk->sk_rcvbuf = space; 4801da177e4SLinus Torvalds 4811da177e4SLinus Torvalds /* Make the window clamp follow along. */ 4821da177e4SLinus Torvalds tp->window_clamp = new_clamp; 4831da177e4SLinus Torvalds } 4841da177e4SLinus Torvalds } 4851da177e4SLinus Torvalds } 4861da177e4SLinus Torvalds 4871da177e4SLinus Torvalds new_measure: 4881da177e4SLinus Torvalds tp->rcvq_space.seq = tp->copied_seq; 4891da177e4SLinus Torvalds tp->rcvq_space.time = tcp_time_stamp; 4901da177e4SLinus Torvalds } 4911da177e4SLinus Torvalds 4921da177e4SLinus Torvalds /* There is something which you must keep in mind when you analyze the 4931da177e4SLinus Torvalds * behavior of the tp->ato delayed ack timeout interval. When a 4941da177e4SLinus Torvalds * connection starts up, we want to ack as quickly as possible. The 4951da177e4SLinus Torvalds * problem is that "good" TCP's do slow start at the beginning of data 4961da177e4SLinus Torvalds * transmission. The means that until we send the first few ACK's the 4971da177e4SLinus Torvalds * sender will sit on his end and only queue most of his data, because 4981da177e4SLinus Torvalds * he can only send snd_cwnd unacked packets at any given time. For 4991da177e4SLinus Torvalds * each ACK we send, he increments snd_cwnd and transmits more of his 5001da177e4SLinus Torvalds * queue. -DaveM 5011da177e4SLinus Torvalds */ 5021da177e4SLinus Torvalds static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 5031da177e4SLinus Torvalds { 504463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 5051da177e4SLinus Torvalds u32 now; 5061da177e4SLinus Torvalds 507463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 5081da177e4SLinus Torvalds 509463c84b9SArnaldo Carvalho de Melo tcp_measure_rcv_mss(sk, skb); 5101da177e4SLinus Torvalds 5111da177e4SLinus Torvalds tcp_rcv_rtt_measure(tp); 5121da177e4SLinus Torvalds 5131da177e4SLinus Torvalds now = tcp_time_stamp; 5141da177e4SLinus Torvalds 515463c84b9SArnaldo Carvalho de Melo if (!icsk->icsk_ack.ato) { 5161da177e4SLinus Torvalds /* The _first_ data packet received, initialize 5171da177e4SLinus Torvalds * delayed ACK engine. 5181da177e4SLinus Torvalds */ 519463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 520463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 5211da177e4SLinus Torvalds } else { 522463c84b9SArnaldo Carvalho de Melo int m = now - icsk->icsk_ack.lrcvtime; 5231da177e4SLinus Torvalds 5241da177e4SLinus Torvalds if (m <= TCP_ATO_MIN/2) { 5251da177e4SLinus Torvalds /* The fastest case is the first. */ 526463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 527463c84b9SArnaldo Carvalho de Melo } else if (m < icsk->icsk_ack.ato) { 528463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 529463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.ato > icsk->icsk_rto) 530463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = icsk->icsk_rto; 531463c84b9SArnaldo Carvalho de Melo } else if (m > icsk->icsk_rto) { 532caa20d9aSStephen Hemminger /* Too long gap. Apparently sender failed to 5331da177e4SLinus Torvalds * restart window, so that we send ACKs quickly. 5341da177e4SLinus Torvalds */ 535463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 5361da177e4SLinus Torvalds sk_stream_mem_reclaim(sk); 5371da177e4SLinus Torvalds } 5381da177e4SLinus Torvalds } 539463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.lrcvtime = now; 5401da177e4SLinus Torvalds 5411da177e4SLinus Torvalds TCP_ECN_check_ce(tp, skb); 5421da177e4SLinus Torvalds 5431da177e4SLinus Torvalds if (skb->len >= 128) 5441da177e4SLinus Torvalds tcp_grow_window(sk, tp, skb); 5451da177e4SLinus Torvalds } 5461da177e4SLinus Torvalds 5471da177e4SLinus Torvalds /* Called to compute a smoothed rtt estimate. The data fed to this 5481da177e4SLinus Torvalds * routine either comes from timestamps, or from segments that were 5491da177e4SLinus Torvalds * known _not_ to have been retransmitted [see Karn/Partridge 5501da177e4SLinus Torvalds * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 5511da177e4SLinus Torvalds * piece by Van Jacobson. 5521da177e4SLinus Torvalds * NOTE: the next three routines used to be one big routine. 5531da177e4SLinus Torvalds * To save cycles in the RFC 1323 implementation it was better to break 5541da177e4SLinus Torvalds * it up into three procedures. -- erics 5551da177e4SLinus Torvalds */ 5562d2abbabSStephen Hemminger static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 5571da177e4SLinus Torvalds { 5586687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 5591da177e4SLinus Torvalds long m = mrtt; /* RTT */ 5601da177e4SLinus Torvalds 5611da177e4SLinus Torvalds /* The following amusing code comes from Jacobson's 5621da177e4SLinus Torvalds * article in SIGCOMM '88. Note that rtt and mdev 5631da177e4SLinus Torvalds * are scaled versions of rtt and mean deviation. 5641da177e4SLinus Torvalds * This is designed to be as fast as possible 5651da177e4SLinus Torvalds * m stands for "measurement". 5661da177e4SLinus Torvalds * 5671da177e4SLinus Torvalds * On a 1990 paper the rto value is changed to: 5681da177e4SLinus Torvalds * RTO = rtt + 4 * mdev 5691da177e4SLinus Torvalds * 5701da177e4SLinus Torvalds * Funny. This algorithm seems to be very broken. 5711da177e4SLinus Torvalds * These formulae increase RTO, when it should be decreased, increase 57231f34269SStephen Hemminger * too slowly, when it should be increased quickly, decrease too quickly 5731da177e4SLinus Torvalds * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 5741da177e4SLinus Torvalds * does not matter how to _calculate_ it. Seems, it was trap 5751da177e4SLinus Torvalds * that VJ failed to avoid. 8) 5761da177e4SLinus Torvalds */ 5771da177e4SLinus Torvalds if(m == 0) 5781da177e4SLinus Torvalds m = 1; 5791da177e4SLinus Torvalds if (tp->srtt != 0) { 5801da177e4SLinus Torvalds m -= (tp->srtt >> 3); /* m is now error in rtt est */ 5811da177e4SLinus Torvalds tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 5821da177e4SLinus Torvalds if (m < 0) { 5831da177e4SLinus Torvalds m = -m; /* m is now abs(error) */ 5841da177e4SLinus Torvalds m -= (tp->mdev >> 2); /* similar update on mdev */ 5851da177e4SLinus Torvalds /* This is similar to one of Eifel findings. 5861da177e4SLinus Torvalds * Eifel blocks mdev updates when rtt decreases. 5871da177e4SLinus Torvalds * This solution is a bit different: we use finer gain 5881da177e4SLinus Torvalds * for mdev in this case (alpha*beta). 5891da177e4SLinus Torvalds * Like Eifel it also prevents growth of rto, 5901da177e4SLinus Torvalds * but also it limits too fast rto decreases, 5911da177e4SLinus Torvalds * happening in pure Eifel. 5921da177e4SLinus Torvalds */ 5931da177e4SLinus Torvalds if (m > 0) 5941da177e4SLinus Torvalds m >>= 3; 5951da177e4SLinus Torvalds } else { 5961da177e4SLinus Torvalds m -= (tp->mdev >> 2); /* similar update on mdev */ 5971da177e4SLinus Torvalds } 5981da177e4SLinus Torvalds tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 5991da177e4SLinus Torvalds if (tp->mdev > tp->mdev_max) { 6001da177e4SLinus Torvalds tp->mdev_max = tp->mdev; 6011da177e4SLinus Torvalds if (tp->mdev_max > tp->rttvar) 6021da177e4SLinus Torvalds tp->rttvar = tp->mdev_max; 6031da177e4SLinus Torvalds } 6041da177e4SLinus Torvalds if (after(tp->snd_una, tp->rtt_seq)) { 6051da177e4SLinus Torvalds if (tp->mdev_max < tp->rttvar) 6061da177e4SLinus Torvalds tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2; 6071da177e4SLinus Torvalds tp->rtt_seq = tp->snd_nxt; 6081da177e4SLinus Torvalds tp->mdev_max = TCP_RTO_MIN; 6091da177e4SLinus Torvalds } 6101da177e4SLinus Torvalds } else { 6111da177e4SLinus Torvalds /* no previous measure. */ 6121da177e4SLinus Torvalds tp->srtt = m<<3; /* take the measured time to be rtt */ 6131da177e4SLinus Torvalds tp->mdev = m<<1; /* make sure rto = 3*rtt */ 6141da177e4SLinus Torvalds tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 6151da177e4SLinus Torvalds tp->rtt_seq = tp->snd_nxt; 6161da177e4SLinus Torvalds } 6171da177e4SLinus Torvalds } 6181da177e4SLinus Torvalds 6191da177e4SLinus Torvalds /* Calculate rto without backoff. This is the second half of Van Jacobson's 6201da177e4SLinus Torvalds * routine referred to above. 6211da177e4SLinus Torvalds */ 622463c84b9SArnaldo Carvalho de Melo static inline void tcp_set_rto(struct sock *sk) 6231da177e4SLinus Torvalds { 624463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 6251da177e4SLinus Torvalds /* Old crap is replaced with new one. 8) 6261da177e4SLinus Torvalds * 6271da177e4SLinus Torvalds * More seriously: 6281da177e4SLinus Torvalds * 1. If rtt variance happened to be less 50msec, it is hallucination. 6291da177e4SLinus Torvalds * It cannot be less due to utterly erratic ACK generation made 6301da177e4SLinus Torvalds * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 6311da177e4SLinus Torvalds * to do with delayed acks, because at cwnd>2 true delack timeout 6321da177e4SLinus Torvalds * is invisible. Actually, Linux-2.4 also generates erratic 633caa20d9aSStephen Hemminger * ACKs in some circumstances. 6341da177e4SLinus Torvalds */ 635463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds /* 2. Fixups made earlier cannot be right. 6381da177e4SLinus Torvalds * If we do not estimate RTO correctly without them, 6391da177e4SLinus Torvalds * all the algo is pure shit and should be replaced 640caa20d9aSStephen Hemminger * with correct one. It is exactly, which we pretend to do. 6411da177e4SLinus Torvalds */ 6421da177e4SLinus Torvalds } 6431da177e4SLinus Torvalds 6441da177e4SLinus Torvalds /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 6451da177e4SLinus Torvalds * guarantees that rto is higher. 6461da177e4SLinus Torvalds */ 647463c84b9SArnaldo Carvalho de Melo static inline void tcp_bound_rto(struct sock *sk) 6481da177e4SLinus Torvalds { 649463c84b9SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 650463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 6511da177e4SLinus Torvalds } 6521da177e4SLinus Torvalds 6531da177e4SLinus Torvalds /* Save metrics learned by this TCP session. 6541da177e4SLinus Torvalds This function is called only, when TCP finishes successfully 6551da177e4SLinus Torvalds i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. 6561da177e4SLinus Torvalds */ 6571da177e4SLinus Torvalds void tcp_update_metrics(struct sock *sk) 6581da177e4SLinus Torvalds { 6591da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6601da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 6611da177e4SLinus Torvalds 6621da177e4SLinus Torvalds if (sysctl_tcp_nometrics_save) 6631da177e4SLinus Torvalds return; 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds dst_confirm(dst); 6661da177e4SLinus Torvalds 6671da177e4SLinus Torvalds if (dst && (dst->flags&DST_HOST)) { 6686687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 6691da177e4SLinus Torvalds int m; 6701da177e4SLinus Torvalds 6716687e988SArnaldo Carvalho de Melo if (icsk->icsk_backoff || !tp->srtt) { 6721da177e4SLinus Torvalds /* This session failed to estimate rtt. Why? 6731da177e4SLinus Torvalds * Probably, no packets returned in time. 6741da177e4SLinus Torvalds * Reset our results. 6751da177e4SLinus Torvalds */ 6761da177e4SLinus Torvalds if (!(dst_metric_locked(dst, RTAX_RTT))) 6771da177e4SLinus Torvalds dst->metrics[RTAX_RTT-1] = 0; 6781da177e4SLinus Torvalds return; 6791da177e4SLinus Torvalds } 6801da177e4SLinus Torvalds 6811da177e4SLinus Torvalds m = dst_metric(dst, RTAX_RTT) - tp->srtt; 6821da177e4SLinus Torvalds 6831da177e4SLinus Torvalds /* If newly calculated rtt larger than stored one, 6841da177e4SLinus Torvalds * store new one. Otherwise, use EWMA. Remember, 6851da177e4SLinus Torvalds * rtt overestimation is always better than underestimation. 6861da177e4SLinus Torvalds */ 6871da177e4SLinus Torvalds if (!(dst_metric_locked(dst, RTAX_RTT))) { 6881da177e4SLinus Torvalds if (m <= 0) 6891da177e4SLinus Torvalds dst->metrics[RTAX_RTT-1] = tp->srtt; 6901da177e4SLinus Torvalds else 6911da177e4SLinus Torvalds dst->metrics[RTAX_RTT-1] -= (m>>3); 6921da177e4SLinus Torvalds } 6931da177e4SLinus Torvalds 6941da177e4SLinus Torvalds if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { 6951da177e4SLinus Torvalds if (m < 0) 6961da177e4SLinus Torvalds m = -m; 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds /* Scale deviation to rttvar fixed point */ 6991da177e4SLinus Torvalds m >>= 1; 7001da177e4SLinus Torvalds if (m < tp->mdev) 7011da177e4SLinus Torvalds m = tp->mdev; 7021da177e4SLinus Torvalds 7031da177e4SLinus Torvalds if (m >= dst_metric(dst, RTAX_RTTVAR)) 7041da177e4SLinus Torvalds dst->metrics[RTAX_RTTVAR-1] = m; 7051da177e4SLinus Torvalds else 7061da177e4SLinus Torvalds dst->metrics[RTAX_RTTVAR-1] -= 7071da177e4SLinus Torvalds (dst->metrics[RTAX_RTTVAR-1] - m)>>2; 7081da177e4SLinus Torvalds } 7091da177e4SLinus Torvalds 7101da177e4SLinus Torvalds if (tp->snd_ssthresh >= 0xFFFF) { 7111da177e4SLinus Torvalds /* Slow start still did not finish. */ 7121da177e4SLinus Torvalds if (dst_metric(dst, RTAX_SSTHRESH) && 7131da177e4SLinus Torvalds !dst_metric_locked(dst, RTAX_SSTHRESH) && 7141da177e4SLinus Torvalds (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) 7151da177e4SLinus Torvalds dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1; 7161da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_CWND) && 7171da177e4SLinus Torvalds tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 7181da177e4SLinus Torvalds dst->metrics[RTAX_CWND-1] = tp->snd_cwnd; 7191da177e4SLinus Torvalds } else if (tp->snd_cwnd > tp->snd_ssthresh && 7206687e988SArnaldo Carvalho de Melo icsk->icsk_ca_state == TCP_CA_Open) { 7211da177e4SLinus Torvalds /* Cong. avoidance phase, cwnd is reliable. */ 7221da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 7231da177e4SLinus Torvalds dst->metrics[RTAX_SSTHRESH-1] = 7241da177e4SLinus Torvalds max(tp->snd_cwnd >> 1, tp->snd_ssthresh); 7251da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_CWND)) 7261da177e4SLinus Torvalds dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_cwnd) >> 1; 7271da177e4SLinus Torvalds } else { 7281da177e4SLinus Torvalds /* Else slow start did not finish, cwnd is non-sense, 7291da177e4SLinus Torvalds ssthresh may be also invalid. 7301da177e4SLinus Torvalds */ 7311da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_CWND)) 7321da177e4SLinus Torvalds dst->metrics[RTAX_CWND-1] = (dst->metrics[RTAX_CWND-1] + tp->snd_ssthresh) >> 1; 7331da177e4SLinus Torvalds if (dst->metrics[RTAX_SSTHRESH-1] && 7341da177e4SLinus Torvalds !dst_metric_locked(dst, RTAX_SSTHRESH) && 7351da177e4SLinus Torvalds tp->snd_ssthresh > dst->metrics[RTAX_SSTHRESH-1]) 7361da177e4SLinus Torvalds dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh; 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds 7391da177e4SLinus Torvalds if (!dst_metric_locked(dst, RTAX_REORDERING)) { 7401da177e4SLinus Torvalds if (dst->metrics[RTAX_REORDERING-1] < tp->reordering && 7411da177e4SLinus Torvalds tp->reordering != sysctl_tcp_reordering) 7421da177e4SLinus Torvalds dst->metrics[RTAX_REORDERING-1] = tp->reordering; 7431da177e4SLinus Torvalds } 7441da177e4SLinus Torvalds } 7451da177e4SLinus Torvalds } 7461da177e4SLinus Torvalds 7471da177e4SLinus Torvalds /* Numbers are taken from RFC2414. */ 7481da177e4SLinus Torvalds __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 7491da177e4SLinus Torvalds { 7501da177e4SLinus Torvalds __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds if (!cwnd) { 753c1b4a7e6SDavid S. Miller if (tp->mss_cache > 1460) 7541da177e4SLinus Torvalds cwnd = 2; 7551da177e4SLinus Torvalds else 756c1b4a7e6SDavid S. Miller cwnd = (tp->mss_cache > 1095) ? 3 : 4; 7571da177e4SLinus Torvalds } 7581da177e4SLinus Torvalds return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 7591da177e4SLinus Torvalds } 7601da177e4SLinus Torvalds 76140efc6faSStephen Hemminger /* Set slow start threshold and cwnd not falling to slow start */ 76240efc6faSStephen Hemminger void tcp_enter_cwr(struct sock *sk) 76340efc6faSStephen Hemminger { 76440efc6faSStephen Hemminger struct tcp_sock *tp = tcp_sk(sk); 76540efc6faSStephen Hemminger 76640efc6faSStephen Hemminger tp->prior_ssthresh = 0; 76740efc6faSStephen Hemminger tp->bytes_acked = 0; 76840efc6faSStephen Hemminger if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 76940efc6faSStephen Hemminger tp->undo_marker = 0; 77040efc6faSStephen Hemminger tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 77140efc6faSStephen Hemminger tp->snd_cwnd = min(tp->snd_cwnd, 77240efc6faSStephen Hemminger tcp_packets_in_flight(tp) + 1U); 77340efc6faSStephen Hemminger tp->snd_cwnd_cnt = 0; 77440efc6faSStephen Hemminger tp->high_seq = tp->snd_nxt; 77540efc6faSStephen Hemminger tp->snd_cwnd_stamp = tcp_time_stamp; 77640efc6faSStephen Hemminger TCP_ECN_queue_cwr(tp); 77740efc6faSStephen Hemminger 77840efc6faSStephen Hemminger tcp_set_ca_state(sk, TCP_CA_CWR); 77940efc6faSStephen Hemminger } 78040efc6faSStephen Hemminger } 78140efc6faSStephen Hemminger 7821da177e4SLinus Torvalds /* Initialize metrics on socket. */ 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds static void tcp_init_metrics(struct sock *sk) 7851da177e4SLinus Torvalds { 7861da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 7871da177e4SLinus Torvalds struct dst_entry *dst = __sk_dst_get(sk); 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds if (dst == NULL) 7901da177e4SLinus Torvalds goto reset; 7911da177e4SLinus Torvalds 7921da177e4SLinus Torvalds dst_confirm(dst); 7931da177e4SLinus Torvalds 7941da177e4SLinus Torvalds if (dst_metric_locked(dst, RTAX_CWND)) 7951da177e4SLinus Torvalds tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); 7961da177e4SLinus Torvalds if (dst_metric(dst, RTAX_SSTHRESH)) { 7971da177e4SLinus Torvalds tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); 7981da177e4SLinus Torvalds if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 7991da177e4SLinus Torvalds tp->snd_ssthresh = tp->snd_cwnd_clamp; 8001da177e4SLinus Torvalds } 8011da177e4SLinus Torvalds if (dst_metric(dst, RTAX_REORDERING) && 8021da177e4SLinus Torvalds tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 8031da177e4SLinus Torvalds tp->rx_opt.sack_ok &= ~2; 8041da177e4SLinus Torvalds tp->reordering = dst_metric(dst, RTAX_REORDERING); 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds 8071da177e4SLinus Torvalds if (dst_metric(dst, RTAX_RTT) == 0) 8081da177e4SLinus Torvalds goto reset; 8091da177e4SLinus Torvalds 8101da177e4SLinus Torvalds if (!tp->srtt && dst_metric(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3)) 8111da177e4SLinus Torvalds goto reset; 8121da177e4SLinus Torvalds 8131da177e4SLinus Torvalds /* Initial rtt is determined from SYN,SYN-ACK. 8141da177e4SLinus Torvalds * The segment is small and rtt may appear much 8151da177e4SLinus Torvalds * less than real one. Use per-dst memory 8161da177e4SLinus Torvalds * to make it more realistic. 8171da177e4SLinus Torvalds * 8181da177e4SLinus Torvalds * A bit of theory. RTT is time passed after "normal" sized packet 819caa20d9aSStephen Hemminger * is sent until it is ACKed. In normal circumstances sending small 8201da177e4SLinus Torvalds * packets force peer to delay ACKs and calculation is correct too. 8211da177e4SLinus Torvalds * The algorithm is adaptive and, provided we follow specs, it 8221da177e4SLinus Torvalds * NEVER underestimate RTT. BUT! If peer tries to make some clever 8231da177e4SLinus Torvalds * tricks sort of "quick acks" for time long enough to decrease RTT 8241da177e4SLinus Torvalds * to low value, and then abruptly stops to do it and starts to delay 8251da177e4SLinus Torvalds * ACKs, wait for troubles. 8261da177e4SLinus Torvalds */ 8271da177e4SLinus Torvalds if (dst_metric(dst, RTAX_RTT) > tp->srtt) { 8281da177e4SLinus Torvalds tp->srtt = dst_metric(dst, RTAX_RTT); 8291da177e4SLinus Torvalds tp->rtt_seq = tp->snd_nxt; 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds if (dst_metric(dst, RTAX_RTTVAR) > tp->mdev) { 8321da177e4SLinus Torvalds tp->mdev = dst_metric(dst, RTAX_RTTVAR); 8331da177e4SLinus Torvalds tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 8341da177e4SLinus Torvalds } 835463c84b9SArnaldo Carvalho de Melo tcp_set_rto(sk); 836463c84b9SArnaldo Carvalho de Melo tcp_bound_rto(sk); 837463c84b9SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 8381da177e4SLinus Torvalds goto reset; 8391da177e4SLinus Torvalds tp->snd_cwnd = tcp_init_cwnd(tp, dst); 8401da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 8411da177e4SLinus Torvalds return; 8421da177e4SLinus Torvalds 8431da177e4SLinus Torvalds reset: 8441da177e4SLinus Torvalds /* Play conservative. If timestamps are not 8451da177e4SLinus Torvalds * supported, TCP will fail to recalculate correct 8461da177e4SLinus Torvalds * rtt, if initial rto is too small. FORGET ALL AND RESET! 8471da177e4SLinus Torvalds */ 8481da177e4SLinus Torvalds if (!tp->rx_opt.saw_tstamp && tp->srtt) { 8491da177e4SLinus Torvalds tp->srtt = 0; 8501da177e4SLinus Torvalds tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 851463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 8521da177e4SLinus Torvalds } 8531da177e4SLinus Torvalds } 8541da177e4SLinus Torvalds 8556687e988SArnaldo Carvalho de Melo static void tcp_update_reordering(struct sock *sk, const int metric, 8566687e988SArnaldo Carvalho de Melo const int ts) 8571da177e4SLinus Torvalds { 8586687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 8591da177e4SLinus Torvalds if (metric > tp->reordering) { 8601da177e4SLinus Torvalds tp->reordering = min(TCP_MAX_REORDERING, metric); 8611da177e4SLinus Torvalds 8621da177e4SLinus Torvalds /* This exciting event is worth to be remembered. 8) */ 8631da177e4SLinus Torvalds if (ts) 8641da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); 8651da177e4SLinus Torvalds else if (IsReno(tp)) 8661da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); 8671da177e4SLinus Torvalds else if (IsFack(tp)) 8681da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); 8691da177e4SLinus Torvalds else 8701da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); 8711da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 1 8721da177e4SLinus Torvalds printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 8736687e988SArnaldo Carvalho de Melo tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 8741da177e4SLinus Torvalds tp->reordering, 8751da177e4SLinus Torvalds tp->fackets_out, 8761da177e4SLinus Torvalds tp->sacked_out, 8771da177e4SLinus Torvalds tp->undo_marker ? tp->undo_retrans : 0); 8781da177e4SLinus Torvalds #endif 8791da177e4SLinus Torvalds /* Disable FACK yet. */ 8801da177e4SLinus Torvalds tp->rx_opt.sack_ok &= ~2; 8811da177e4SLinus Torvalds } 8821da177e4SLinus Torvalds } 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds /* This procedure tags the retransmission queue when SACKs arrive. 8851da177e4SLinus Torvalds * 8861da177e4SLinus Torvalds * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 8871da177e4SLinus Torvalds * Packets in queue with these bits set are counted in variables 8881da177e4SLinus Torvalds * sacked_out, retrans_out and lost_out, correspondingly. 8891da177e4SLinus Torvalds * 8901da177e4SLinus Torvalds * Valid combinations are: 8911da177e4SLinus Torvalds * Tag InFlight Description 8921da177e4SLinus Torvalds * 0 1 - orig segment is in flight. 8931da177e4SLinus Torvalds * S 0 - nothing flies, orig reached receiver. 8941da177e4SLinus Torvalds * L 0 - nothing flies, orig lost by net. 8951da177e4SLinus Torvalds * R 2 - both orig and retransmit are in flight. 8961da177e4SLinus Torvalds * L|R 1 - orig is lost, retransmit is in flight. 8971da177e4SLinus Torvalds * S|R 1 - orig reached receiver, retrans is still in flight. 8981da177e4SLinus Torvalds * (L|S|R is logically valid, it could occur when L|R is sacked, 8991da177e4SLinus Torvalds * but it is equivalent to plain S and code short-curcuits it to S. 9001da177e4SLinus Torvalds * L|S is logically invalid, it would mean -1 packet in flight 8)) 9011da177e4SLinus Torvalds * 9021da177e4SLinus Torvalds * These 6 states form finite state machine, controlled by the following events: 9031da177e4SLinus Torvalds * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 9041da177e4SLinus Torvalds * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 9051da177e4SLinus Torvalds * 3. Loss detection event of one of three flavors: 9061da177e4SLinus Torvalds * A. Scoreboard estimator decided the packet is lost. 9071da177e4SLinus Torvalds * A'. Reno "three dupacks" marks head of queue lost. 9081da177e4SLinus Torvalds * A''. Its FACK modfication, head until snd.fack is lost. 9091da177e4SLinus Torvalds * B. SACK arrives sacking data transmitted after never retransmitted 9101da177e4SLinus Torvalds * hole was sent out. 9111da177e4SLinus Torvalds * C. SACK arrives sacking SND.NXT at the moment, when the 9121da177e4SLinus Torvalds * segment was retransmitted. 9131da177e4SLinus Torvalds * 4. D-SACK added new rule: D-SACK changes any tag to S. 9141da177e4SLinus Torvalds * 9151da177e4SLinus Torvalds * It is pleasant to note, that state diagram turns out to be commutative, 9161da177e4SLinus Torvalds * so that we are allowed not to be bothered by order of our actions, 9171da177e4SLinus Torvalds * when multiple events arrive simultaneously. (see the function below). 9181da177e4SLinus Torvalds * 9191da177e4SLinus Torvalds * Reordering detection. 9201da177e4SLinus Torvalds * -------------------- 9211da177e4SLinus Torvalds * Reordering metric is maximal distance, which a packet can be displaced 9221da177e4SLinus Torvalds * in packet stream. With SACKs we can estimate it: 9231da177e4SLinus Torvalds * 9241da177e4SLinus Torvalds * 1. SACK fills old hole and the corresponding segment was not 9251da177e4SLinus Torvalds * ever retransmitted -> reordering. Alas, we cannot use it 9261da177e4SLinus Torvalds * when segment was retransmitted. 9271da177e4SLinus Torvalds * 2. The last flaw is solved with D-SACK. D-SACK arrives 9281da177e4SLinus Torvalds * for retransmitted and already SACKed segment -> reordering.. 9291da177e4SLinus Torvalds * Both of these heuristics are not used in Loss state, when we cannot 9301da177e4SLinus Torvalds * account for retransmits accurately. 9311da177e4SLinus Torvalds */ 9321da177e4SLinus Torvalds static int 9331da177e4SLinus Torvalds tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 9341da177e4SLinus Torvalds { 9356687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 9361da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 9371da177e4SLinus Torvalds unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 938269bd27eSAl Viro struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2); 9391da177e4SLinus Torvalds int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; 9401da177e4SLinus Torvalds int reord = tp->packets_out; 9411da177e4SLinus Torvalds int prior_fackets; 9421da177e4SLinus Torvalds u32 lost_retrans = 0; 9431da177e4SLinus Torvalds int flag = 0; 9446a438bbeSStephen Hemminger int dup_sack = 0; 9451da177e4SLinus Torvalds int i; 9461da177e4SLinus Torvalds 9471da177e4SLinus Torvalds if (!tp->sacked_out) 9481da177e4SLinus Torvalds tp->fackets_out = 0; 9491da177e4SLinus Torvalds prior_fackets = tp->fackets_out; 9501da177e4SLinus Torvalds 9516a438bbeSStephen Hemminger /* SACK fastpath: 9526a438bbeSStephen Hemminger * if the only SACK change is the increase of the end_seq of 9536a438bbeSStephen Hemminger * the first block then only apply that SACK block 9546a438bbeSStephen Hemminger * and use retrans queue hinting otherwise slowpath */ 9556a438bbeSStephen Hemminger flag = 1; 9566a438bbeSStephen Hemminger for (i = 0; i< num_sacks; i++) { 9576a438bbeSStephen Hemminger __u32 start_seq = ntohl(sp[i].start_seq); 9586a438bbeSStephen Hemminger __u32 end_seq = ntohl(sp[i].end_seq); 9596a438bbeSStephen Hemminger 9606a438bbeSStephen Hemminger if (i == 0){ 9616a438bbeSStephen Hemminger if (tp->recv_sack_cache[i].start_seq != start_seq) 9626a438bbeSStephen Hemminger flag = 0; 9636a438bbeSStephen Hemminger } else { 9646a438bbeSStephen Hemminger if ((tp->recv_sack_cache[i].start_seq != start_seq) || 9656a438bbeSStephen Hemminger (tp->recv_sack_cache[i].end_seq != end_seq)) 9666a438bbeSStephen Hemminger flag = 0; 9676a438bbeSStephen Hemminger } 9686a438bbeSStephen Hemminger tp->recv_sack_cache[i].start_seq = start_seq; 9696a438bbeSStephen Hemminger tp->recv_sack_cache[i].end_seq = end_seq; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds /* Check for D-SACK. */ 9721da177e4SLinus Torvalds if (i == 0) { 9731da177e4SLinus Torvalds u32 ack = TCP_SKB_CB(ack_skb)->ack_seq; 9741da177e4SLinus Torvalds 9751da177e4SLinus Torvalds if (before(start_seq, ack)) { 9761da177e4SLinus Torvalds dup_sack = 1; 9771da177e4SLinus Torvalds tp->rx_opt.sack_ok |= 4; 9781da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 9791da177e4SLinus Torvalds } else if (num_sacks > 1 && 9801da177e4SLinus Torvalds !after(end_seq, ntohl(sp[1].end_seq)) && 9811da177e4SLinus Torvalds !before(start_seq, ntohl(sp[1].start_seq))) { 9821da177e4SLinus Torvalds dup_sack = 1; 9831da177e4SLinus Torvalds tp->rx_opt.sack_ok |= 4; 9841da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 9851da177e4SLinus Torvalds } 9861da177e4SLinus Torvalds 9871da177e4SLinus Torvalds /* D-SACK for already forgotten data... 9881da177e4SLinus Torvalds * Do dumb counting. */ 9891da177e4SLinus Torvalds if (dup_sack && 9901da177e4SLinus Torvalds !after(end_seq, prior_snd_una) && 9911da177e4SLinus Torvalds after(end_seq, tp->undo_marker)) 9921da177e4SLinus Torvalds tp->undo_retrans--; 9931da177e4SLinus Torvalds 9941da177e4SLinus Torvalds /* Eliminate too old ACKs, but take into 9951da177e4SLinus Torvalds * account more or less fresh ones, they can 9961da177e4SLinus Torvalds * contain valid SACK info. 9971da177e4SLinus Torvalds */ 9981da177e4SLinus Torvalds if (before(ack, prior_snd_una - tp->max_window)) 9991da177e4SLinus Torvalds return 0; 10001da177e4SLinus Torvalds } 10016a438bbeSStephen Hemminger } 10026a438bbeSStephen Hemminger 10036a438bbeSStephen Hemminger if (flag) 10046a438bbeSStephen Hemminger num_sacks = 1; 10056a438bbeSStephen Hemminger else { 10066a438bbeSStephen Hemminger int j; 10076a438bbeSStephen Hemminger tp->fastpath_skb_hint = NULL; 10086a438bbeSStephen Hemminger 10096a438bbeSStephen Hemminger /* order SACK blocks to allow in order walk of the retrans queue */ 10106a438bbeSStephen Hemminger for (i = num_sacks-1; i > 0; i--) { 10116a438bbeSStephen Hemminger for (j = 0; j < i; j++){ 10126a438bbeSStephen Hemminger if (after(ntohl(sp[j].start_seq), 10136a438bbeSStephen Hemminger ntohl(sp[j+1].start_seq))){ 10146a438bbeSStephen Hemminger sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq); 10156a438bbeSStephen Hemminger sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq); 10166a438bbeSStephen Hemminger sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq); 10176a438bbeSStephen Hemminger sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq); 10186a438bbeSStephen Hemminger } 10196a438bbeSStephen Hemminger 10206a438bbeSStephen Hemminger } 10216a438bbeSStephen Hemminger } 10226a438bbeSStephen Hemminger } 10236a438bbeSStephen Hemminger 10246a438bbeSStephen Hemminger /* clear flag as used for different purpose in following code */ 10256a438bbeSStephen Hemminger flag = 0; 10266a438bbeSStephen Hemminger 10276a438bbeSStephen Hemminger for (i=0; i<num_sacks; i++, sp++) { 10286a438bbeSStephen Hemminger struct sk_buff *skb; 10296a438bbeSStephen Hemminger __u32 start_seq = ntohl(sp->start_seq); 10306a438bbeSStephen Hemminger __u32 end_seq = ntohl(sp->end_seq); 10316a438bbeSStephen Hemminger int fack_count; 10326a438bbeSStephen Hemminger 10336a438bbeSStephen Hemminger /* Use SACK fastpath hint if valid */ 10346a438bbeSStephen Hemminger if (tp->fastpath_skb_hint) { 10356a438bbeSStephen Hemminger skb = tp->fastpath_skb_hint; 10366a438bbeSStephen Hemminger fack_count = tp->fastpath_cnt_hint; 10376a438bbeSStephen Hemminger } else { 10386a438bbeSStephen Hemminger skb = sk->sk_write_queue.next; 10396a438bbeSStephen Hemminger fack_count = 0; 10406a438bbeSStephen Hemminger } 10411da177e4SLinus Torvalds 10421da177e4SLinus Torvalds /* Event "B" in the comment above. */ 10431da177e4SLinus Torvalds if (after(end_seq, tp->high_seq)) 10441da177e4SLinus Torvalds flag |= FLAG_DATA_LOST; 10451da177e4SLinus Torvalds 10466a438bbeSStephen Hemminger sk_stream_for_retrans_queue_from(skb, sk) { 10476475be16SDavid S. Miller int in_sack, pcount; 10486475be16SDavid S. Miller u8 sacked; 10491da177e4SLinus Torvalds 10506a438bbeSStephen Hemminger tp->fastpath_skb_hint = skb; 10516a438bbeSStephen Hemminger tp->fastpath_cnt_hint = fack_count; 10526a438bbeSStephen Hemminger 10531da177e4SLinus Torvalds /* The retransmission queue is always in order, so 10541da177e4SLinus Torvalds * we can short-circuit the walk early. 10551da177e4SLinus Torvalds */ 10561da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 10571da177e4SLinus Torvalds break; 10581da177e4SLinus Torvalds 10593c05d92eSHerbert Xu in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 10603c05d92eSHerbert Xu !before(end_seq, TCP_SKB_CB(skb)->end_seq); 10613c05d92eSHerbert Xu 10626475be16SDavid S. Miller pcount = tcp_skb_pcount(skb); 10636475be16SDavid S. Miller 10643c05d92eSHerbert Xu if (pcount > 1 && !in_sack && 10653c05d92eSHerbert Xu after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 10666475be16SDavid S. Miller unsigned int pkt_len; 10676475be16SDavid S. Miller 10683c05d92eSHerbert Xu in_sack = !after(start_seq, 10693c05d92eSHerbert Xu TCP_SKB_CB(skb)->seq); 10703c05d92eSHerbert Xu 10713c05d92eSHerbert Xu if (!in_sack) 10726475be16SDavid S. Miller pkt_len = (start_seq - 10736475be16SDavid S. Miller TCP_SKB_CB(skb)->seq); 10746475be16SDavid S. Miller else 10756475be16SDavid S. Miller pkt_len = (end_seq - 10766475be16SDavid S. Miller TCP_SKB_CB(skb)->seq); 10777967168cSHerbert Xu if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size)) 10786475be16SDavid S. Miller break; 10796475be16SDavid S. Miller pcount = tcp_skb_pcount(skb); 10806475be16SDavid S. Miller } 10816475be16SDavid S. Miller 10826475be16SDavid S. Miller fack_count += pcount; 10831da177e4SLinus Torvalds 10846475be16SDavid S. Miller sacked = TCP_SKB_CB(skb)->sacked; 10856475be16SDavid S. Miller 10861da177e4SLinus Torvalds /* Account D-SACK for retransmitted packet. */ 10871da177e4SLinus Torvalds if ((dup_sack && in_sack) && 10881da177e4SLinus Torvalds (sacked & TCPCB_RETRANS) && 10891da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 10901da177e4SLinus Torvalds tp->undo_retrans--; 10911da177e4SLinus Torvalds 10921da177e4SLinus Torvalds /* The frame is ACKed. */ 10931da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) { 10941da177e4SLinus Torvalds if (sacked&TCPCB_RETRANS) { 10951da177e4SLinus Torvalds if ((dup_sack && in_sack) && 10961da177e4SLinus Torvalds (sacked&TCPCB_SACKED_ACKED)) 10971da177e4SLinus Torvalds reord = min(fack_count, reord); 10981da177e4SLinus Torvalds } else { 10991da177e4SLinus Torvalds /* If it was in a hole, we detected reordering. */ 11001da177e4SLinus Torvalds if (fack_count < prior_fackets && 11011da177e4SLinus Torvalds !(sacked&TCPCB_SACKED_ACKED)) 11021da177e4SLinus Torvalds reord = min(fack_count, reord); 11031da177e4SLinus Torvalds } 11041da177e4SLinus Torvalds 11051da177e4SLinus Torvalds /* Nothing to do; acked frame is about to be dropped. */ 11061da177e4SLinus Torvalds continue; 11071da177e4SLinus Torvalds } 11081da177e4SLinus Torvalds 11091da177e4SLinus Torvalds if ((sacked&TCPCB_SACKED_RETRANS) && 11101da177e4SLinus Torvalds after(end_seq, TCP_SKB_CB(skb)->ack_seq) && 11111da177e4SLinus Torvalds (!lost_retrans || after(end_seq, lost_retrans))) 11121da177e4SLinus Torvalds lost_retrans = end_seq; 11131da177e4SLinus Torvalds 11141da177e4SLinus Torvalds if (!in_sack) 11151da177e4SLinus Torvalds continue; 11161da177e4SLinus Torvalds 11171da177e4SLinus Torvalds if (!(sacked&TCPCB_SACKED_ACKED)) { 11181da177e4SLinus Torvalds if (sacked & TCPCB_SACKED_RETRANS) { 11191da177e4SLinus Torvalds /* If the segment is not tagged as lost, 11201da177e4SLinus Torvalds * we do not clear RETRANS, believing 11211da177e4SLinus Torvalds * that retransmission is still in flight. 11221da177e4SLinus Torvalds */ 11231da177e4SLinus Torvalds if (sacked & TCPCB_LOST) { 11241da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 11251da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(skb); 11261da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 11276a438bbeSStephen Hemminger 11286a438bbeSStephen Hemminger /* clear lost hint */ 11296a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 11301da177e4SLinus Torvalds } 11311da177e4SLinus Torvalds } else { 11321da177e4SLinus Torvalds /* New sack for not retransmitted frame, 11331da177e4SLinus Torvalds * which was in hole. It is reordering. 11341da177e4SLinus Torvalds */ 11351da177e4SLinus Torvalds if (!(sacked & TCPCB_RETRANS) && 11361da177e4SLinus Torvalds fack_count < prior_fackets) 11371da177e4SLinus Torvalds reord = min(fack_count, reord); 11381da177e4SLinus Torvalds 11391da177e4SLinus Torvalds if (sacked & TCPCB_LOST) { 11401da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 11411da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(skb); 11426a438bbeSStephen Hemminger 11436a438bbeSStephen Hemminger /* clear lost hint */ 11446a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 11451da177e4SLinus Torvalds } 11461da177e4SLinus Torvalds } 11471da177e4SLinus Torvalds 11481da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; 11491da177e4SLinus Torvalds flag |= FLAG_DATA_SACKED; 11501da177e4SLinus Torvalds tp->sacked_out += tcp_skb_pcount(skb); 11511da177e4SLinus Torvalds 11521da177e4SLinus Torvalds if (fack_count > tp->fackets_out) 11531da177e4SLinus Torvalds tp->fackets_out = fack_count; 11541da177e4SLinus Torvalds } else { 11551da177e4SLinus Torvalds if (dup_sack && (sacked&TCPCB_RETRANS)) 11561da177e4SLinus Torvalds reord = min(fack_count, reord); 11571da177e4SLinus Torvalds } 11581da177e4SLinus Torvalds 11591da177e4SLinus Torvalds /* D-SACK. We can detect redundant retransmission 11601da177e4SLinus Torvalds * in S|R and plain R frames and clear it. 11611da177e4SLinus Torvalds * undo_retrans is decreased above, L|R frames 11621da177e4SLinus Torvalds * are accounted above as well. 11631da177e4SLinus Torvalds */ 11641da177e4SLinus Torvalds if (dup_sack && 11651da177e4SLinus Torvalds (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) { 11661da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 11671da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 11686a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 11691da177e4SLinus Torvalds } 11701da177e4SLinus Torvalds } 11711da177e4SLinus Torvalds } 11721da177e4SLinus Torvalds 11731da177e4SLinus Torvalds /* Check for lost retransmit. This superb idea is 11741da177e4SLinus Torvalds * borrowed from "ratehalving". Event "C". 11751da177e4SLinus Torvalds * Later note: FACK people cheated me again 8), 11761da177e4SLinus Torvalds * we have to account for reordering! Ugly, 11771da177e4SLinus Torvalds * but should help. 11781da177e4SLinus Torvalds */ 11796687e988SArnaldo Carvalho de Melo if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) { 11801da177e4SLinus Torvalds struct sk_buff *skb; 11811da177e4SLinus Torvalds 11821da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 11831da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->seq, lost_retrans)) 11841da177e4SLinus Torvalds break; 11851da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 11861da177e4SLinus Torvalds continue; 11871da177e4SLinus Torvalds if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) && 11881da177e4SLinus Torvalds after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) && 11891da177e4SLinus Torvalds (IsFack(tp) || 11901da177e4SLinus Torvalds !before(lost_retrans, 11911da177e4SLinus Torvalds TCP_SKB_CB(skb)->ack_seq + tp->reordering * 1192c1b4a7e6SDavid S. Miller tp->mss_cache))) { 11931da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 11941da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 11951da177e4SLinus Torvalds 11966a438bbeSStephen Hemminger /* clear lost hint */ 11976a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 11986a438bbeSStephen Hemminger 11991da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&(TCPCB_LOST|TCPCB_SACKED_ACKED))) { 12001da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 12011da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 12021da177e4SLinus Torvalds flag |= FLAG_DATA_SACKED; 12031da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 12041da177e4SLinus Torvalds } 12051da177e4SLinus Torvalds } 12061da177e4SLinus Torvalds } 12071da177e4SLinus Torvalds } 12081da177e4SLinus Torvalds 12091da177e4SLinus Torvalds tp->left_out = tp->sacked_out + tp->lost_out; 12101da177e4SLinus Torvalds 12116687e988SArnaldo Carvalho de Melo if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss) 12126687e988SArnaldo Carvalho de Melo tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0); 12131da177e4SLinus Torvalds 12141da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 12151da177e4SLinus Torvalds BUG_TRAP((int)tp->sacked_out >= 0); 12161da177e4SLinus Torvalds BUG_TRAP((int)tp->lost_out >= 0); 12171da177e4SLinus Torvalds BUG_TRAP((int)tp->retrans_out >= 0); 12181da177e4SLinus Torvalds BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0); 12191da177e4SLinus Torvalds #endif 12201da177e4SLinus Torvalds return flag; 12211da177e4SLinus Torvalds } 12221da177e4SLinus Torvalds 12231da177e4SLinus Torvalds /* RTO occurred, but do not yet enter loss state. Instead, transmit two new 12241da177e4SLinus Torvalds * segments to see from the next ACKs whether any data was really missing. 12251da177e4SLinus Torvalds * If the RTO was spurious, new ACKs should arrive. 12261da177e4SLinus Torvalds */ 12271da177e4SLinus Torvalds void tcp_enter_frto(struct sock *sk) 12281da177e4SLinus Torvalds { 12296687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 12301da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 12311da177e4SLinus Torvalds struct sk_buff *skb; 12321da177e4SLinus Torvalds 12331da177e4SLinus Torvalds tp->frto_counter = 1; 12341da177e4SLinus Torvalds 12356687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state <= TCP_CA_Disorder || 12361da177e4SLinus Torvalds tp->snd_una == tp->high_seq || 12376687e988SArnaldo Carvalho de Melo (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 12386687e988SArnaldo Carvalho de Melo tp->prior_ssthresh = tcp_current_ssthresh(sk); 12396687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 12406687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_FRTO); 12411da177e4SLinus Torvalds } 12421da177e4SLinus Torvalds 12431da177e4SLinus Torvalds /* Have to clear retransmission markers here to keep the bookkeeping 12441da177e4SLinus Torvalds * in shape, even though we are not yet in Loss state. 12451da177e4SLinus Torvalds * If something was really lost, it is eventually caught up 12461da177e4SLinus Torvalds * in tcp_enter_frto_loss. 12471da177e4SLinus Torvalds */ 12481da177e4SLinus Torvalds tp->retrans_out = 0; 12491da177e4SLinus Torvalds tp->undo_marker = tp->snd_una; 12501da177e4SLinus Torvalds tp->undo_retrans = 0; 12511da177e4SLinus Torvalds 12521da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 12531da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS; 12541da177e4SLinus Torvalds } 12551da177e4SLinus Torvalds tcp_sync_left_out(tp); 12561da177e4SLinus Torvalds 12576687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 12581da177e4SLinus Torvalds tp->frto_highmark = tp->snd_nxt; 12591da177e4SLinus Torvalds } 12601da177e4SLinus Torvalds 12611da177e4SLinus Torvalds /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, 12621da177e4SLinus Torvalds * which indicates that we should follow the traditional RTO recovery, 12631da177e4SLinus Torvalds * i.e. mark everything lost and do go-back-N retransmission. 12641da177e4SLinus Torvalds */ 12651da177e4SLinus Torvalds static void tcp_enter_frto_loss(struct sock *sk) 12661da177e4SLinus Torvalds { 12671da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 12681da177e4SLinus Torvalds struct sk_buff *skb; 12691da177e4SLinus Torvalds int cnt = 0; 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds tp->sacked_out = 0; 12721da177e4SLinus Torvalds tp->lost_out = 0; 12731da177e4SLinus Torvalds tp->fackets_out = 0; 12741da177e4SLinus Torvalds 12751da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 12761da177e4SLinus Torvalds cnt += tcp_skb_pcount(skb); 12771da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 12781da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 12791da177e4SLinus Torvalds 12801da177e4SLinus Torvalds /* Do not mark those segments lost that were 12811da177e4SLinus Torvalds * forward transmitted after RTO 12821da177e4SLinus Torvalds */ 12831da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, 12841da177e4SLinus Torvalds tp->frto_highmark)) { 12851da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 12861da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 12871da177e4SLinus Torvalds } 12881da177e4SLinus Torvalds } else { 12891da177e4SLinus Torvalds tp->sacked_out += tcp_skb_pcount(skb); 12901da177e4SLinus Torvalds tp->fackets_out = cnt; 12911da177e4SLinus Torvalds } 12921da177e4SLinus Torvalds } 12931da177e4SLinus Torvalds tcp_sync_left_out(tp); 12941da177e4SLinus Torvalds 12951da177e4SLinus Torvalds tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1; 12961da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 12971da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 12981da177e4SLinus Torvalds tp->undo_marker = 0; 12991da177e4SLinus Torvalds tp->frto_counter = 0; 13001da177e4SLinus Torvalds 13011da177e4SLinus Torvalds tp->reordering = min_t(unsigned int, tp->reordering, 13021da177e4SLinus Torvalds sysctl_tcp_reordering); 13036687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 13041da177e4SLinus Torvalds tp->high_seq = tp->frto_highmark; 13051da177e4SLinus Torvalds TCP_ECN_queue_cwr(tp); 13066a438bbeSStephen Hemminger 13076a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 13081da177e4SLinus Torvalds } 13091da177e4SLinus Torvalds 13101da177e4SLinus Torvalds void tcp_clear_retrans(struct tcp_sock *tp) 13111da177e4SLinus Torvalds { 13121da177e4SLinus Torvalds tp->left_out = 0; 13131da177e4SLinus Torvalds tp->retrans_out = 0; 13141da177e4SLinus Torvalds 13151da177e4SLinus Torvalds tp->fackets_out = 0; 13161da177e4SLinus Torvalds tp->sacked_out = 0; 13171da177e4SLinus Torvalds tp->lost_out = 0; 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds tp->undo_marker = 0; 13201da177e4SLinus Torvalds tp->undo_retrans = 0; 13211da177e4SLinus Torvalds } 13221da177e4SLinus Torvalds 13231da177e4SLinus Torvalds /* Enter Loss state. If "how" is not zero, forget all SACK information 13241da177e4SLinus Torvalds * and reset tags completely, otherwise preserve SACKs. If receiver 13251da177e4SLinus Torvalds * dropped its ofo queue, we will know this due to reneging detection. 13261da177e4SLinus Torvalds */ 13271da177e4SLinus Torvalds void tcp_enter_loss(struct sock *sk, int how) 13281da177e4SLinus Torvalds { 13296687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 13301da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13311da177e4SLinus Torvalds struct sk_buff *skb; 13321da177e4SLinus Torvalds int cnt = 0; 13331da177e4SLinus Torvalds 13341da177e4SLinus Torvalds /* Reduce ssthresh if it has not yet been made inside this window. */ 13356687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 13366687e988SArnaldo Carvalho de Melo (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 13376687e988SArnaldo Carvalho de Melo tp->prior_ssthresh = tcp_current_ssthresh(sk); 13386687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 13396687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_LOSS); 13401da177e4SLinus Torvalds } 13411da177e4SLinus Torvalds tp->snd_cwnd = 1; 13421da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 13431da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 13441da177e4SLinus Torvalds 13459772efb9SStephen Hemminger tp->bytes_acked = 0; 13461da177e4SLinus Torvalds tcp_clear_retrans(tp); 13471da177e4SLinus Torvalds 13481da177e4SLinus Torvalds /* Push undo marker, if it was plain RTO and nothing 13491da177e4SLinus Torvalds * was retransmitted. */ 13501da177e4SLinus Torvalds if (!how) 13511da177e4SLinus Torvalds tp->undo_marker = tp->snd_una; 13521da177e4SLinus Torvalds 13531da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 13541da177e4SLinus Torvalds cnt += tcp_skb_pcount(skb); 13551da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) 13561da177e4SLinus Torvalds tp->undo_marker = 0; 13571da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 13581da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 13591da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 13601da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 13611da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 13621da177e4SLinus Torvalds } else { 13631da177e4SLinus Torvalds tp->sacked_out += tcp_skb_pcount(skb); 13641da177e4SLinus Torvalds tp->fackets_out = cnt; 13651da177e4SLinus Torvalds } 13661da177e4SLinus Torvalds } 13671da177e4SLinus Torvalds tcp_sync_left_out(tp); 13681da177e4SLinus Torvalds 13691da177e4SLinus Torvalds tp->reordering = min_t(unsigned int, tp->reordering, 13701da177e4SLinus Torvalds sysctl_tcp_reordering); 13716687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Loss); 13721da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 13731da177e4SLinus Torvalds TCP_ECN_queue_cwr(tp); 13746a438bbeSStephen Hemminger 13756a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 13761da177e4SLinus Torvalds } 13771da177e4SLinus Torvalds 1378463c84b9SArnaldo Carvalho de Melo static int tcp_check_sack_reneging(struct sock *sk) 13791da177e4SLinus Torvalds { 13801da177e4SLinus Torvalds struct sk_buff *skb; 13811da177e4SLinus Torvalds 13821da177e4SLinus Torvalds /* If ACK arrived pointing to a remembered SACK, 13831da177e4SLinus Torvalds * it means that our remembered SACKs do not reflect 13841da177e4SLinus Torvalds * real state of receiver i.e. 13851da177e4SLinus Torvalds * receiver _host_ is heavily congested (or buggy). 13861da177e4SLinus Torvalds * Do processing similar to RTO timeout. 13871da177e4SLinus Torvalds */ 13881da177e4SLinus Torvalds if ((skb = skb_peek(&sk->sk_write_queue)) != NULL && 13891da177e4SLinus Torvalds (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 13906687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 13911da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 13921da177e4SLinus Torvalds 13931da177e4SLinus Torvalds tcp_enter_loss(sk, 1); 13946687e988SArnaldo Carvalho de Melo icsk->icsk_retransmits++; 13951da177e4SLinus Torvalds tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1396463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 13976687e988SArnaldo Carvalho de Melo icsk->icsk_rto, TCP_RTO_MAX); 13981da177e4SLinus Torvalds return 1; 13991da177e4SLinus Torvalds } 14001da177e4SLinus Torvalds return 0; 14011da177e4SLinus Torvalds } 14021da177e4SLinus Torvalds 14031da177e4SLinus Torvalds static inline int tcp_fackets_out(struct tcp_sock *tp) 14041da177e4SLinus Torvalds { 14051da177e4SLinus Torvalds return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds 1408463c84b9SArnaldo Carvalho de Melo static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 14091da177e4SLinus Torvalds { 1410463c84b9SArnaldo Carvalho de Melo return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); 14111da177e4SLinus Torvalds } 14121da177e4SLinus Torvalds 14131da177e4SLinus Torvalds static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) 14141da177e4SLinus Torvalds { 14151da177e4SLinus Torvalds return tp->packets_out && 1416463c84b9SArnaldo Carvalho de Melo tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue)); 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds 14191da177e4SLinus Torvalds /* Linux NewReno/SACK/FACK/ECN state machine. 14201da177e4SLinus Torvalds * -------------------------------------- 14211da177e4SLinus Torvalds * 14221da177e4SLinus Torvalds * "Open" Normal state, no dubious events, fast path. 14231da177e4SLinus Torvalds * "Disorder" In all the respects it is "Open", 14241da177e4SLinus Torvalds * but requires a bit more attention. It is entered when 14251da177e4SLinus Torvalds * we see some SACKs or dupacks. It is split of "Open" 14261da177e4SLinus Torvalds * mainly to move some processing from fast path to slow one. 14271da177e4SLinus Torvalds * "CWR" CWND was reduced due to some Congestion Notification event. 14281da177e4SLinus Torvalds * It can be ECN, ICMP source quench, local device congestion. 14291da177e4SLinus Torvalds * "Recovery" CWND was reduced, we are fast-retransmitting. 14301da177e4SLinus Torvalds * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 14311da177e4SLinus Torvalds * 14321da177e4SLinus Torvalds * tcp_fastretrans_alert() is entered: 14331da177e4SLinus Torvalds * - each incoming ACK, if state is not "Open" 14341da177e4SLinus Torvalds * - when arrived ACK is unusual, namely: 14351da177e4SLinus Torvalds * * SACK 14361da177e4SLinus Torvalds * * Duplicate ACK. 14371da177e4SLinus Torvalds * * ECN ECE. 14381da177e4SLinus Torvalds * 14391da177e4SLinus Torvalds * Counting packets in flight is pretty simple. 14401da177e4SLinus Torvalds * 14411da177e4SLinus Torvalds * in_flight = packets_out - left_out + retrans_out 14421da177e4SLinus Torvalds * 14431da177e4SLinus Torvalds * packets_out is SND.NXT-SND.UNA counted in packets. 14441da177e4SLinus Torvalds * 14451da177e4SLinus Torvalds * retrans_out is number of retransmitted segments. 14461da177e4SLinus Torvalds * 14471da177e4SLinus Torvalds * left_out is number of segments left network, but not ACKed yet. 14481da177e4SLinus Torvalds * 14491da177e4SLinus Torvalds * left_out = sacked_out + lost_out 14501da177e4SLinus Torvalds * 14511da177e4SLinus Torvalds * sacked_out: Packets, which arrived to receiver out of order 14521da177e4SLinus Torvalds * and hence not ACKed. With SACKs this number is simply 14531da177e4SLinus Torvalds * amount of SACKed data. Even without SACKs 14541da177e4SLinus Torvalds * it is easy to give pretty reliable estimate of this number, 14551da177e4SLinus Torvalds * counting duplicate ACKs. 14561da177e4SLinus Torvalds * 14571da177e4SLinus Torvalds * lost_out: Packets lost by network. TCP has no explicit 14581da177e4SLinus Torvalds * "loss notification" feedback from network (for now). 14591da177e4SLinus Torvalds * It means that this number can be only _guessed_. 14601da177e4SLinus Torvalds * Actually, it is the heuristics to predict lossage that 14611da177e4SLinus Torvalds * distinguishes different algorithms. 14621da177e4SLinus Torvalds * 14631da177e4SLinus Torvalds * F.e. after RTO, when all the queue is considered as lost, 14641da177e4SLinus Torvalds * lost_out = packets_out and in_flight = retrans_out. 14651da177e4SLinus Torvalds * 14661da177e4SLinus Torvalds * Essentially, we have now two algorithms counting 14671da177e4SLinus Torvalds * lost packets. 14681da177e4SLinus Torvalds * 14691da177e4SLinus Torvalds * FACK: It is the simplest heuristics. As soon as we decided 14701da177e4SLinus Torvalds * that something is lost, we decide that _all_ not SACKed 14711da177e4SLinus Torvalds * packets until the most forward SACK are lost. I.e. 14721da177e4SLinus Torvalds * lost_out = fackets_out - sacked_out and left_out = fackets_out. 14731da177e4SLinus Torvalds * It is absolutely correct estimate, if network does not reorder 14741da177e4SLinus Torvalds * packets. And it loses any connection to reality when reordering 14751da177e4SLinus Torvalds * takes place. We use FACK by default until reordering 14761da177e4SLinus Torvalds * is suspected on the path to this destination. 14771da177e4SLinus Torvalds * 14781da177e4SLinus Torvalds * NewReno: when Recovery is entered, we assume that one segment 14791da177e4SLinus Torvalds * is lost (classic Reno). While we are in Recovery and 14801da177e4SLinus Torvalds * a partial ACK arrives, we assume that one more packet 14811da177e4SLinus Torvalds * is lost (NewReno). This heuristics are the same in NewReno 14821da177e4SLinus Torvalds * and SACK. 14831da177e4SLinus Torvalds * 14841da177e4SLinus Torvalds * Imagine, that's all! Forget about all this shamanism about CWND inflation 14851da177e4SLinus Torvalds * deflation etc. CWND is real congestion window, never inflated, changes 14861da177e4SLinus Torvalds * only according to classic VJ rules. 14871da177e4SLinus Torvalds * 14881da177e4SLinus Torvalds * Really tricky (and requiring careful tuning) part of algorithm 14891da177e4SLinus Torvalds * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 14901da177e4SLinus Torvalds * The first determines the moment _when_ we should reduce CWND and, 14911da177e4SLinus Torvalds * hence, slow down forward transmission. In fact, it determines the moment 14921da177e4SLinus Torvalds * when we decide that hole is caused by loss, rather than by a reorder. 14931da177e4SLinus Torvalds * 14941da177e4SLinus Torvalds * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 14951da177e4SLinus Torvalds * holes, caused by lost packets. 14961da177e4SLinus Torvalds * 14971da177e4SLinus Torvalds * And the most logically complicated part of algorithm is undo 14981da177e4SLinus Torvalds * heuristics. We detect false retransmits due to both too early 14991da177e4SLinus Torvalds * fast retransmit (reordering) and underestimated RTO, analyzing 15001da177e4SLinus Torvalds * timestamps and D-SACKs. When we detect that some segments were 15011da177e4SLinus Torvalds * retransmitted by mistake and CWND reduction was wrong, we undo 15021da177e4SLinus Torvalds * window reduction and abort recovery phase. This logic is hidden 15031da177e4SLinus Torvalds * inside several functions named tcp_try_undo_<something>. 15041da177e4SLinus Torvalds */ 15051da177e4SLinus Torvalds 15061da177e4SLinus Torvalds /* This function decides, when we should leave Disordered state 15071da177e4SLinus Torvalds * and enter Recovery phase, reducing congestion window. 15081da177e4SLinus Torvalds * 15091da177e4SLinus Torvalds * Main question: may we further continue forward transmission 15101da177e4SLinus Torvalds * with the same cwnd? 15111da177e4SLinus Torvalds */ 15121da177e4SLinus Torvalds static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) 15131da177e4SLinus Torvalds { 15141da177e4SLinus Torvalds __u32 packets_out; 15151da177e4SLinus Torvalds 15161da177e4SLinus Torvalds /* Trick#1: The loss is proven. */ 15171da177e4SLinus Torvalds if (tp->lost_out) 15181da177e4SLinus Torvalds return 1; 15191da177e4SLinus Torvalds 15201da177e4SLinus Torvalds /* Not-A-Trick#2 : Classic rule... */ 15211da177e4SLinus Torvalds if (tcp_fackets_out(tp) > tp->reordering) 15221da177e4SLinus Torvalds return 1; 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds /* Trick#3 : when we use RFC2988 timer restart, fast 15251da177e4SLinus Torvalds * retransmit can be triggered by timeout of queue head. 15261da177e4SLinus Torvalds */ 15271da177e4SLinus Torvalds if (tcp_head_timedout(sk, tp)) 15281da177e4SLinus Torvalds return 1; 15291da177e4SLinus Torvalds 15301da177e4SLinus Torvalds /* Trick#4: It is still not OK... But will it be useful to delay 15311da177e4SLinus Torvalds * recovery more? 15321da177e4SLinus Torvalds */ 15331da177e4SLinus Torvalds packets_out = tp->packets_out; 15341da177e4SLinus Torvalds if (packets_out <= tp->reordering && 15351da177e4SLinus Torvalds tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && 15361da177e4SLinus Torvalds !tcp_may_send_now(sk, tp)) { 15371da177e4SLinus Torvalds /* We have nothing to send. This connection is limited 15381da177e4SLinus Torvalds * either by receiver window or by application. 15391da177e4SLinus Torvalds */ 15401da177e4SLinus Torvalds return 1; 15411da177e4SLinus Torvalds } 15421da177e4SLinus Torvalds 15431da177e4SLinus Torvalds return 0; 15441da177e4SLinus Torvalds } 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds /* If we receive more dupacks than we expected counting segments 15471da177e4SLinus Torvalds * in assumption of absent reordering, interpret this as reordering. 15481da177e4SLinus Torvalds * The only another reason could be bug in receiver TCP. 15491da177e4SLinus Torvalds */ 15506687e988SArnaldo Carvalho de Melo static void tcp_check_reno_reordering(struct sock *sk, const int addend) 15511da177e4SLinus Torvalds { 15526687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 15531da177e4SLinus Torvalds u32 holes; 15541da177e4SLinus Torvalds 15551da177e4SLinus Torvalds holes = max(tp->lost_out, 1U); 15561da177e4SLinus Torvalds holes = min(holes, tp->packets_out); 15571da177e4SLinus Torvalds 15581da177e4SLinus Torvalds if ((tp->sacked_out + holes) > tp->packets_out) { 15591da177e4SLinus Torvalds tp->sacked_out = tp->packets_out - holes; 15606687e988SArnaldo Carvalho de Melo tcp_update_reordering(sk, tp->packets_out + addend, 0); 15611da177e4SLinus Torvalds } 15621da177e4SLinus Torvalds } 15631da177e4SLinus Torvalds 15641da177e4SLinus Torvalds /* Emulate SACKs for SACKless connection: account for a new dupack. */ 15651da177e4SLinus Torvalds 15666687e988SArnaldo Carvalho de Melo static void tcp_add_reno_sack(struct sock *sk) 15671da177e4SLinus Torvalds { 15686687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 15691da177e4SLinus Torvalds tp->sacked_out++; 15706687e988SArnaldo Carvalho de Melo tcp_check_reno_reordering(sk, 0); 15711da177e4SLinus Torvalds tcp_sync_left_out(tp); 15721da177e4SLinus Torvalds } 15731da177e4SLinus Torvalds 15741da177e4SLinus Torvalds /* Account for ACK, ACKing some data in Reno Recovery phase. */ 15751da177e4SLinus Torvalds 15761da177e4SLinus Torvalds static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked) 15771da177e4SLinus Torvalds { 15781da177e4SLinus Torvalds if (acked > 0) { 15791da177e4SLinus Torvalds /* One ACK acked hole. The rest eat duplicate ACKs. */ 15801da177e4SLinus Torvalds if (acked-1 >= tp->sacked_out) 15811da177e4SLinus Torvalds tp->sacked_out = 0; 15821da177e4SLinus Torvalds else 15831da177e4SLinus Torvalds tp->sacked_out -= acked-1; 15841da177e4SLinus Torvalds } 15856687e988SArnaldo Carvalho de Melo tcp_check_reno_reordering(sk, acked); 15861da177e4SLinus Torvalds tcp_sync_left_out(tp); 15871da177e4SLinus Torvalds } 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 15901da177e4SLinus Torvalds { 15911da177e4SLinus Torvalds tp->sacked_out = 0; 15921da177e4SLinus Torvalds tp->left_out = tp->lost_out; 15931da177e4SLinus Torvalds } 15941da177e4SLinus Torvalds 15951da177e4SLinus Torvalds /* Mark head of queue up as lost. */ 15961da177e4SLinus Torvalds static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, 15971da177e4SLinus Torvalds int packets, u32 high_seq) 15981da177e4SLinus Torvalds { 15991da177e4SLinus Torvalds struct sk_buff *skb; 16006a438bbeSStephen Hemminger int cnt; 16011da177e4SLinus Torvalds 16026a438bbeSStephen Hemminger BUG_TRAP(packets <= tp->packets_out); 16036a438bbeSStephen Hemminger if (tp->lost_skb_hint) { 16046a438bbeSStephen Hemminger skb = tp->lost_skb_hint; 16056a438bbeSStephen Hemminger cnt = tp->lost_cnt_hint; 16066a438bbeSStephen Hemminger } else { 16076a438bbeSStephen Hemminger skb = sk->sk_write_queue.next; 16086a438bbeSStephen Hemminger cnt = 0; 16096a438bbeSStephen Hemminger } 16101da177e4SLinus Torvalds 16116a438bbeSStephen Hemminger sk_stream_for_retrans_queue_from(skb, sk) { 16126a438bbeSStephen Hemminger /* TODO: do this better */ 16136a438bbeSStephen Hemminger /* this is not the most efficient way to do this... */ 16146a438bbeSStephen Hemminger tp->lost_skb_hint = skb; 16156a438bbeSStephen Hemminger tp->lost_cnt_hint = cnt; 16166a438bbeSStephen Hemminger cnt += tcp_skb_pcount(skb); 16176a438bbeSStephen Hemminger if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq)) 16181da177e4SLinus Torvalds break; 16191da177e4SLinus Torvalds if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 16201da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 16211da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 16226a438bbeSStephen Hemminger 16236a438bbeSStephen Hemminger /* clear xmit_retransmit_queue hints 16246a438bbeSStephen Hemminger * if this is beyond hint */ 16256a438bbeSStephen Hemminger if(tp->retransmit_skb_hint != NULL && 16266a438bbeSStephen Hemminger before(TCP_SKB_CB(skb)->seq, 16276a438bbeSStephen Hemminger TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) { 16286a438bbeSStephen Hemminger 16296a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 16306a438bbeSStephen Hemminger } 16311da177e4SLinus Torvalds } 16321da177e4SLinus Torvalds } 16331da177e4SLinus Torvalds tcp_sync_left_out(tp); 16341da177e4SLinus Torvalds } 16351da177e4SLinus Torvalds 16361da177e4SLinus Torvalds /* Account newly detected lost packet(s) */ 16371da177e4SLinus Torvalds 16381da177e4SLinus Torvalds static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) 16391da177e4SLinus Torvalds { 16401da177e4SLinus Torvalds if (IsFack(tp)) { 16411da177e4SLinus Torvalds int lost = tp->fackets_out - tp->reordering; 16421da177e4SLinus Torvalds if (lost <= 0) 16431da177e4SLinus Torvalds lost = 1; 16441da177e4SLinus Torvalds tcp_mark_head_lost(sk, tp, lost, tp->high_seq); 16451da177e4SLinus Torvalds } else { 16461da177e4SLinus Torvalds tcp_mark_head_lost(sk, tp, 1, tp->high_seq); 16471da177e4SLinus Torvalds } 16481da177e4SLinus Torvalds 16491da177e4SLinus Torvalds /* New heuristics: it is possible only after we switched 16501da177e4SLinus Torvalds * to restart timer each time when something is ACKed. 16511da177e4SLinus Torvalds * Hence, we can detect timed out packets during fast 16521da177e4SLinus Torvalds * retransmit without falling to slow start. 16531da177e4SLinus Torvalds */ 165479320d7eSAki M Nyrhinen if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { 16551da177e4SLinus Torvalds struct sk_buff *skb; 16561da177e4SLinus Torvalds 16576a438bbeSStephen Hemminger skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint 16586a438bbeSStephen Hemminger : sk->sk_write_queue.next; 16596a438bbeSStephen Hemminger 16606a438bbeSStephen Hemminger sk_stream_for_retrans_queue_from(skb, sk) { 16616a438bbeSStephen Hemminger if (!tcp_skb_timedout(sk, skb)) 16626a438bbeSStephen Hemminger break; 16636a438bbeSStephen Hemminger 16646a438bbeSStephen Hemminger if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 16651da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 16661da177e4SLinus Torvalds tp->lost_out += tcp_skb_pcount(skb); 16676a438bbeSStephen Hemminger 16686a438bbeSStephen Hemminger /* clear xmit_retrans hint */ 16696a438bbeSStephen Hemminger if (tp->retransmit_skb_hint && 16706a438bbeSStephen Hemminger before(TCP_SKB_CB(skb)->seq, 16716a438bbeSStephen Hemminger TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 16726a438bbeSStephen Hemminger 16736a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 16741da177e4SLinus Torvalds } 16751da177e4SLinus Torvalds } 16766a438bbeSStephen Hemminger 16776a438bbeSStephen Hemminger tp->scoreboard_skb_hint = skb; 16786a438bbeSStephen Hemminger 16791da177e4SLinus Torvalds tcp_sync_left_out(tp); 16801da177e4SLinus Torvalds } 16811da177e4SLinus Torvalds } 16821da177e4SLinus Torvalds 16831da177e4SLinus Torvalds /* CWND moderation, preventing bursts due to too big ACKs 16841da177e4SLinus Torvalds * in dubious situations. 16851da177e4SLinus Torvalds */ 16861da177e4SLinus Torvalds static inline void tcp_moderate_cwnd(struct tcp_sock *tp) 16871da177e4SLinus Torvalds { 16881da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, 16891da177e4SLinus Torvalds tcp_packets_in_flight(tp)+tcp_max_burst(tp)); 16901da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 16911da177e4SLinus Torvalds } 16921da177e4SLinus Torvalds 169372dc5b92SStephen Hemminger /* Lower bound on congestion window is slow start threshold 169472dc5b92SStephen Hemminger * unless congestion avoidance choice decides to overide it. 169572dc5b92SStephen Hemminger */ 169672dc5b92SStephen Hemminger static inline u32 tcp_cwnd_min(const struct sock *sk) 169772dc5b92SStephen Hemminger { 169872dc5b92SStephen Hemminger const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 169972dc5b92SStephen Hemminger 170072dc5b92SStephen Hemminger return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; 170172dc5b92SStephen Hemminger } 170272dc5b92SStephen Hemminger 17031da177e4SLinus Torvalds /* Decrease cwnd each second ack. */ 17046687e988SArnaldo Carvalho de Melo static void tcp_cwnd_down(struct sock *sk) 17051da177e4SLinus Torvalds { 17066687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 17071da177e4SLinus Torvalds int decr = tp->snd_cwnd_cnt + 1; 17081da177e4SLinus Torvalds 17091da177e4SLinus Torvalds tp->snd_cwnd_cnt = decr&1; 17101da177e4SLinus Torvalds decr >>= 1; 17111da177e4SLinus Torvalds 171272dc5b92SStephen Hemminger if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) 17131da177e4SLinus Torvalds tp->snd_cwnd -= decr; 17141da177e4SLinus Torvalds 17151da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 17161da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 17171da177e4SLinus Torvalds } 17181da177e4SLinus Torvalds 17191da177e4SLinus Torvalds /* Nothing was retransmitted or returned timestamp is less 17201da177e4SLinus Torvalds * than timestamp of the first retransmission. 17211da177e4SLinus Torvalds */ 17221da177e4SLinus Torvalds static inline int tcp_packet_delayed(struct tcp_sock *tp) 17231da177e4SLinus Torvalds { 17241da177e4SLinus Torvalds return !tp->retrans_stamp || 17251da177e4SLinus Torvalds (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 17261da177e4SLinus Torvalds (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0); 17271da177e4SLinus Torvalds } 17281da177e4SLinus Torvalds 17291da177e4SLinus Torvalds /* Undo procedures. */ 17301da177e4SLinus Torvalds 17311da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 1 17321da177e4SLinus Torvalds static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) 17331da177e4SLinus Torvalds { 17341da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 17351da177e4SLinus Torvalds printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", 17361da177e4SLinus Torvalds msg, 17371da177e4SLinus Torvalds NIPQUAD(inet->daddr), ntohs(inet->dport), 17381da177e4SLinus Torvalds tp->snd_cwnd, tp->left_out, 17391da177e4SLinus Torvalds tp->snd_ssthresh, tp->prior_ssthresh, 17401da177e4SLinus Torvalds tp->packets_out); 17411da177e4SLinus Torvalds } 17421da177e4SLinus Torvalds #else 17431da177e4SLinus Torvalds #define DBGUNDO(x...) do { } while (0) 17441da177e4SLinus Torvalds #endif 17451da177e4SLinus Torvalds 17466687e988SArnaldo Carvalho de Melo static void tcp_undo_cwr(struct sock *sk, const int undo) 17471da177e4SLinus Torvalds { 17486687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 17496687e988SArnaldo Carvalho de Melo 17501da177e4SLinus Torvalds if (tp->prior_ssthresh) { 17516687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 17526687e988SArnaldo Carvalho de Melo 17536687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_ops->undo_cwnd) 17546687e988SArnaldo Carvalho de Melo tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 17551da177e4SLinus Torvalds else 17561da177e4SLinus Torvalds tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); 17571da177e4SLinus Torvalds 17581da177e4SLinus Torvalds if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { 17591da177e4SLinus Torvalds tp->snd_ssthresh = tp->prior_ssthresh; 17601da177e4SLinus Torvalds TCP_ECN_withdraw_cwr(tp); 17611da177e4SLinus Torvalds } 17621da177e4SLinus Torvalds } else { 17631da177e4SLinus Torvalds tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 17641da177e4SLinus Torvalds } 17651da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 17661da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 17676a438bbeSStephen Hemminger 17686a438bbeSStephen Hemminger /* There is something screwy going on with the retrans hints after 17696a438bbeSStephen Hemminger an undo */ 17706a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 17711da177e4SLinus Torvalds } 17721da177e4SLinus Torvalds 17731da177e4SLinus Torvalds static inline int tcp_may_undo(struct tcp_sock *tp) 17741da177e4SLinus Torvalds { 17751da177e4SLinus Torvalds return tp->undo_marker && 17761da177e4SLinus Torvalds (!tp->undo_retrans || tcp_packet_delayed(tp)); 17771da177e4SLinus Torvalds } 17781da177e4SLinus Torvalds 17791da177e4SLinus Torvalds /* People celebrate: "We love our President!" */ 17801da177e4SLinus Torvalds static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) 17811da177e4SLinus Torvalds { 17821da177e4SLinus Torvalds if (tcp_may_undo(tp)) { 17831da177e4SLinus Torvalds /* Happy end! We did not retransmit anything 17841da177e4SLinus Torvalds * or our original transmission succeeded. 17851da177e4SLinus Torvalds */ 17866687e988SArnaldo Carvalho de Melo DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 17876687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 1); 17886687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 17891da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 17901da177e4SLinus Torvalds else 17911da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); 17921da177e4SLinus Torvalds tp->undo_marker = 0; 17931da177e4SLinus Torvalds } 17941da177e4SLinus Torvalds if (tp->snd_una == tp->high_seq && IsReno(tp)) { 17951da177e4SLinus Torvalds /* Hold old state until something *above* high_seq 17961da177e4SLinus Torvalds * is ACKed. For Reno it is MUST to prevent false 17971da177e4SLinus Torvalds * fast retransmits (RFC2582). SACK TCP is safe. */ 17981da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 17991da177e4SLinus Torvalds return 1; 18001da177e4SLinus Torvalds } 18016687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 18021da177e4SLinus Torvalds return 0; 18031da177e4SLinus Torvalds } 18041da177e4SLinus Torvalds 18051da177e4SLinus Torvalds /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 18061da177e4SLinus Torvalds static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) 18071da177e4SLinus Torvalds { 18081da177e4SLinus Torvalds if (tp->undo_marker && !tp->undo_retrans) { 18091da177e4SLinus Torvalds DBGUNDO(sk, tp, "D-SACK"); 18106687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 1); 18111da177e4SLinus Torvalds tp->undo_marker = 0; 18121da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 18131da177e4SLinus Torvalds } 18141da177e4SLinus Torvalds } 18151da177e4SLinus Torvalds 18161da177e4SLinus Torvalds /* Undo during fast recovery after partial ACK. */ 18171da177e4SLinus Torvalds 18181da177e4SLinus Torvalds static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, 18191da177e4SLinus Torvalds int acked) 18201da177e4SLinus Torvalds { 18211da177e4SLinus Torvalds /* Partial ACK arrived. Force Hoe's retransmit. */ 18221da177e4SLinus Torvalds int failed = IsReno(tp) || tp->fackets_out>tp->reordering; 18231da177e4SLinus Torvalds 18241da177e4SLinus Torvalds if (tcp_may_undo(tp)) { 18251da177e4SLinus Torvalds /* Plain luck! Hole if filled with delayed 18261da177e4SLinus Torvalds * packet, rather than with a retransmit. 18271da177e4SLinus Torvalds */ 18281da177e4SLinus Torvalds if (tp->retrans_out == 0) 18291da177e4SLinus Torvalds tp->retrans_stamp = 0; 18301da177e4SLinus Torvalds 18316687e988SArnaldo Carvalho de Melo tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 18321da177e4SLinus Torvalds 18331da177e4SLinus Torvalds DBGUNDO(sk, tp, "Hoe"); 18346687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 0); 18351da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 18361da177e4SLinus Torvalds 18371da177e4SLinus Torvalds /* So... Do not make Hoe's retransmit yet. 18381da177e4SLinus Torvalds * If the first packet was delayed, the rest 18391da177e4SLinus Torvalds * ones are most probably delayed as well. 18401da177e4SLinus Torvalds */ 18411da177e4SLinus Torvalds failed = 0; 18421da177e4SLinus Torvalds } 18431da177e4SLinus Torvalds return failed; 18441da177e4SLinus Torvalds } 18451da177e4SLinus Torvalds 18461da177e4SLinus Torvalds /* Undo during loss recovery after partial ACK. */ 18471da177e4SLinus Torvalds static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) 18481da177e4SLinus Torvalds { 18491da177e4SLinus Torvalds if (tcp_may_undo(tp)) { 18501da177e4SLinus Torvalds struct sk_buff *skb; 18511da177e4SLinus Torvalds sk_stream_for_retrans_queue(skb, sk) { 18521da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 18531da177e4SLinus Torvalds } 18546a438bbeSStephen Hemminger 18556a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 18566a438bbeSStephen Hemminger 18571da177e4SLinus Torvalds DBGUNDO(sk, tp, "partial loss"); 18581da177e4SLinus Torvalds tp->lost_out = 0; 18591da177e4SLinus Torvalds tp->left_out = tp->sacked_out; 18606687e988SArnaldo Carvalho de Melo tcp_undo_cwr(sk, 1); 18611da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1862463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_retransmits = 0; 18631da177e4SLinus Torvalds tp->undo_marker = 0; 18641da177e4SLinus Torvalds if (!IsReno(tp)) 18656687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 18661da177e4SLinus Torvalds return 1; 18671da177e4SLinus Torvalds } 18681da177e4SLinus Torvalds return 0; 18691da177e4SLinus Torvalds } 18701da177e4SLinus Torvalds 18716687e988SArnaldo Carvalho de Melo static inline void tcp_complete_cwr(struct sock *sk) 18721da177e4SLinus Torvalds { 18736687e988SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 18741da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 18751da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 18766687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 18771da177e4SLinus Torvalds } 18781da177e4SLinus Torvalds 18791da177e4SLinus Torvalds static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) 18801da177e4SLinus Torvalds { 18811da177e4SLinus Torvalds tp->left_out = tp->sacked_out; 18821da177e4SLinus Torvalds 18831da177e4SLinus Torvalds if (tp->retrans_out == 0) 18841da177e4SLinus Torvalds tp->retrans_stamp = 0; 18851da177e4SLinus Torvalds 18861da177e4SLinus Torvalds if (flag&FLAG_ECE) 18876687e988SArnaldo Carvalho de Melo tcp_enter_cwr(sk); 18881da177e4SLinus Torvalds 18896687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 18901da177e4SLinus Torvalds int state = TCP_CA_Open; 18911da177e4SLinus Torvalds 18921da177e4SLinus Torvalds if (tp->left_out || tp->retrans_out || tp->undo_marker) 18931da177e4SLinus Torvalds state = TCP_CA_Disorder; 18941da177e4SLinus Torvalds 18956687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state != state) { 18966687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, state); 18971da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 18981da177e4SLinus Torvalds } 18991da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 19001da177e4SLinus Torvalds } else { 19016687e988SArnaldo Carvalho de Melo tcp_cwnd_down(sk); 19021da177e4SLinus Torvalds } 19031da177e4SLinus Torvalds } 19041da177e4SLinus Torvalds 19055d424d5aSJohn Heffner static void tcp_mtup_probe_failed(struct sock *sk) 19065d424d5aSJohn Heffner { 19075d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 19085d424d5aSJohn Heffner 19095d424d5aSJohn Heffner icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 19105d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 19115d424d5aSJohn Heffner } 19125d424d5aSJohn Heffner 19135d424d5aSJohn Heffner static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) 19145d424d5aSJohn Heffner { 19155d424d5aSJohn Heffner struct tcp_sock *tp = tcp_sk(sk); 19165d424d5aSJohn Heffner struct inet_connection_sock *icsk = inet_csk(sk); 19175d424d5aSJohn Heffner 19185d424d5aSJohn Heffner /* FIXME: breaks with very large cwnd */ 19195d424d5aSJohn Heffner tp->prior_ssthresh = tcp_current_ssthresh(sk); 19205d424d5aSJohn Heffner tp->snd_cwnd = tp->snd_cwnd * 19215d424d5aSJohn Heffner tcp_mss_to_mtu(sk, tp->mss_cache) / 19225d424d5aSJohn Heffner icsk->icsk_mtup.probe_size; 19235d424d5aSJohn Heffner tp->snd_cwnd_cnt = 0; 19245d424d5aSJohn Heffner tp->snd_cwnd_stamp = tcp_time_stamp; 19255d424d5aSJohn Heffner tp->rcv_ssthresh = tcp_current_ssthresh(sk); 19265d424d5aSJohn Heffner 19275d424d5aSJohn Heffner icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 19285d424d5aSJohn Heffner icsk->icsk_mtup.probe_size = 0; 19295d424d5aSJohn Heffner tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 19305d424d5aSJohn Heffner } 19315d424d5aSJohn Heffner 19325d424d5aSJohn Heffner 19331da177e4SLinus Torvalds /* Process an event, which can update packets-in-flight not trivially. 19341da177e4SLinus Torvalds * Main goal of this function is to calculate new estimate for left_out, 19351da177e4SLinus Torvalds * taking into account both packets sitting in receiver's buffer and 19361da177e4SLinus Torvalds * packets lost by network. 19371da177e4SLinus Torvalds * 19381da177e4SLinus Torvalds * Besides that it does CWND reduction, when packet loss is detected 19391da177e4SLinus Torvalds * and changes state of machine. 19401da177e4SLinus Torvalds * 19411da177e4SLinus Torvalds * It does _not_ decide what to send, it is made in function 19421da177e4SLinus Torvalds * tcp_xmit_retransmit_queue(). 19431da177e4SLinus Torvalds */ 19441da177e4SLinus Torvalds static void 19451da177e4SLinus Torvalds tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, 19461da177e4SLinus Torvalds int prior_packets, int flag) 19471da177e4SLinus Torvalds { 19486687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 19491da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 19501da177e4SLinus Torvalds int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); 19511da177e4SLinus Torvalds 19521da177e4SLinus Torvalds /* Some technical things: 19531da177e4SLinus Torvalds * 1. Reno does not count dupacks (sacked_out) automatically. */ 19541da177e4SLinus Torvalds if (!tp->packets_out) 19551da177e4SLinus Torvalds tp->sacked_out = 0; 19561da177e4SLinus Torvalds /* 2. SACK counts snd_fack in packets inaccurately. */ 19571da177e4SLinus Torvalds if (tp->sacked_out == 0) 19581da177e4SLinus Torvalds tp->fackets_out = 0; 19591da177e4SLinus Torvalds 19601da177e4SLinus Torvalds /* Now state machine starts. 19611da177e4SLinus Torvalds * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 19621da177e4SLinus Torvalds if (flag&FLAG_ECE) 19631da177e4SLinus Torvalds tp->prior_ssthresh = 0; 19641da177e4SLinus Torvalds 19651da177e4SLinus Torvalds /* B. In all the states check for reneging SACKs. */ 1966463c84b9SArnaldo Carvalho de Melo if (tp->sacked_out && tcp_check_sack_reneging(sk)) 19671da177e4SLinus Torvalds return; 19681da177e4SLinus Torvalds 19691da177e4SLinus Torvalds /* C. Process data loss notification, provided it is valid. */ 19701da177e4SLinus Torvalds if ((flag&FLAG_DATA_LOST) && 19711da177e4SLinus Torvalds before(tp->snd_una, tp->high_seq) && 19726687e988SArnaldo Carvalho de Melo icsk->icsk_ca_state != TCP_CA_Open && 19731da177e4SLinus Torvalds tp->fackets_out > tp->reordering) { 19741da177e4SLinus Torvalds tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); 19751da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 19781da177e4SLinus Torvalds /* D. Synchronize left_out to current state. */ 19791da177e4SLinus Torvalds tcp_sync_left_out(tp); 19801da177e4SLinus Torvalds 19811da177e4SLinus Torvalds /* E. Check state exit conditions. State can be terminated 19821da177e4SLinus Torvalds * when high_seq is ACKed. */ 19836687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state == TCP_CA_Open) { 19841da177e4SLinus Torvalds if (!sysctl_tcp_frto) 19851da177e4SLinus Torvalds BUG_TRAP(tp->retrans_out == 0); 19861da177e4SLinus Torvalds tp->retrans_stamp = 0; 19871da177e4SLinus Torvalds } else if (!before(tp->snd_una, tp->high_seq)) { 19886687e988SArnaldo Carvalho de Melo switch (icsk->icsk_ca_state) { 19891da177e4SLinus Torvalds case TCP_CA_Loss: 19906687e988SArnaldo Carvalho de Melo icsk->icsk_retransmits = 0; 19911da177e4SLinus Torvalds if (tcp_try_undo_recovery(sk, tp)) 19921da177e4SLinus Torvalds return; 19931da177e4SLinus Torvalds break; 19941da177e4SLinus Torvalds 19951da177e4SLinus Torvalds case TCP_CA_CWR: 19961da177e4SLinus Torvalds /* CWR is to be held something *above* high_seq 19971da177e4SLinus Torvalds * is ACKed for CWR bit to reach receiver. */ 19981da177e4SLinus Torvalds if (tp->snd_una != tp->high_seq) { 19996687e988SArnaldo Carvalho de Melo tcp_complete_cwr(sk); 20006687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 20011da177e4SLinus Torvalds } 20021da177e4SLinus Torvalds break; 20031da177e4SLinus Torvalds 20041da177e4SLinus Torvalds case TCP_CA_Disorder: 20051da177e4SLinus Torvalds tcp_try_undo_dsack(sk, tp); 20061da177e4SLinus Torvalds if (!tp->undo_marker || 20071da177e4SLinus Torvalds /* For SACK case do not Open to allow to undo 20081da177e4SLinus Torvalds * catching for all duplicate ACKs. */ 20091da177e4SLinus Torvalds IsReno(tp) || tp->snd_una != tp->high_seq) { 20101da177e4SLinus Torvalds tp->undo_marker = 0; 20116687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 20121da177e4SLinus Torvalds } 20131da177e4SLinus Torvalds break; 20141da177e4SLinus Torvalds 20151da177e4SLinus Torvalds case TCP_CA_Recovery: 20161da177e4SLinus Torvalds if (IsReno(tp)) 20171da177e4SLinus Torvalds tcp_reset_reno_sack(tp); 20181da177e4SLinus Torvalds if (tcp_try_undo_recovery(sk, tp)) 20191da177e4SLinus Torvalds return; 20206687e988SArnaldo Carvalho de Melo tcp_complete_cwr(sk); 20211da177e4SLinus Torvalds break; 20221da177e4SLinus Torvalds } 20231da177e4SLinus Torvalds } 20241da177e4SLinus Torvalds 20251da177e4SLinus Torvalds /* F. Process state. */ 20266687e988SArnaldo Carvalho de Melo switch (icsk->icsk_ca_state) { 20271da177e4SLinus Torvalds case TCP_CA_Recovery: 20281da177e4SLinus Torvalds if (prior_snd_una == tp->snd_una) { 20291da177e4SLinus Torvalds if (IsReno(tp) && is_dupack) 20306687e988SArnaldo Carvalho de Melo tcp_add_reno_sack(sk); 20311da177e4SLinus Torvalds } else { 20321da177e4SLinus Torvalds int acked = prior_packets - tp->packets_out; 20331da177e4SLinus Torvalds if (IsReno(tp)) 20341da177e4SLinus Torvalds tcp_remove_reno_sacks(sk, tp, acked); 20351da177e4SLinus Torvalds is_dupack = tcp_try_undo_partial(sk, tp, acked); 20361da177e4SLinus Torvalds } 20371da177e4SLinus Torvalds break; 20381da177e4SLinus Torvalds case TCP_CA_Loss: 20391da177e4SLinus Torvalds if (flag&FLAG_DATA_ACKED) 20406687e988SArnaldo Carvalho de Melo icsk->icsk_retransmits = 0; 20411da177e4SLinus Torvalds if (!tcp_try_undo_loss(sk, tp)) { 20421da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 20431da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 20441da177e4SLinus Torvalds return; 20451da177e4SLinus Torvalds } 20466687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state != TCP_CA_Open) 20471da177e4SLinus Torvalds return; 20481da177e4SLinus Torvalds /* Loss is undone; fall through to processing in Open state. */ 20491da177e4SLinus Torvalds default: 20501da177e4SLinus Torvalds if (IsReno(tp)) { 20511da177e4SLinus Torvalds if (tp->snd_una != prior_snd_una) 20521da177e4SLinus Torvalds tcp_reset_reno_sack(tp); 20531da177e4SLinus Torvalds if (is_dupack) 20546687e988SArnaldo Carvalho de Melo tcp_add_reno_sack(sk); 20551da177e4SLinus Torvalds } 20561da177e4SLinus Torvalds 20576687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state == TCP_CA_Disorder) 20581da177e4SLinus Torvalds tcp_try_undo_dsack(sk, tp); 20591da177e4SLinus Torvalds 20601da177e4SLinus Torvalds if (!tcp_time_to_recover(sk, tp)) { 20611da177e4SLinus Torvalds tcp_try_to_open(sk, tp, flag); 20621da177e4SLinus Torvalds return; 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds 20655d424d5aSJohn Heffner /* MTU probe failure: don't reduce cwnd */ 20665d424d5aSJohn Heffner if (icsk->icsk_ca_state < TCP_CA_CWR && 20675d424d5aSJohn Heffner icsk->icsk_mtup.probe_size && 20680e7b1368SJohn Heffner tp->snd_una == tp->mtu_probe.probe_seq_start) { 20695d424d5aSJohn Heffner tcp_mtup_probe_failed(sk); 20705d424d5aSJohn Heffner /* Restores the reduction we did in tcp_mtup_probe() */ 20715d424d5aSJohn Heffner tp->snd_cwnd++; 20725d424d5aSJohn Heffner tcp_simple_retransmit(sk); 20735d424d5aSJohn Heffner return; 20745d424d5aSJohn Heffner } 20755d424d5aSJohn Heffner 20761da177e4SLinus Torvalds /* Otherwise enter Recovery state */ 20771da177e4SLinus Torvalds 20781da177e4SLinus Torvalds if (IsReno(tp)) 20791da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); 20801da177e4SLinus Torvalds else 20811da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); 20821da177e4SLinus Torvalds 20831da177e4SLinus Torvalds tp->high_seq = tp->snd_nxt; 20841da177e4SLinus Torvalds tp->prior_ssthresh = 0; 20851da177e4SLinus Torvalds tp->undo_marker = tp->snd_una; 20861da177e4SLinus Torvalds tp->undo_retrans = tp->retrans_out; 20871da177e4SLinus Torvalds 20886687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_state < TCP_CA_CWR) { 20891da177e4SLinus Torvalds if (!(flag&FLAG_ECE)) 20906687e988SArnaldo Carvalho de Melo tp->prior_ssthresh = tcp_current_ssthresh(sk); 20916687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 20921da177e4SLinus Torvalds TCP_ECN_queue_cwr(tp); 20931da177e4SLinus Torvalds } 20941da177e4SLinus Torvalds 20959772efb9SStephen Hemminger tp->bytes_acked = 0; 20961da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 20976687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Recovery); 20981da177e4SLinus Torvalds } 20991da177e4SLinus Torvalds 21001da177e4SLinus Torvalds if (is_dupack || tcp_head_timedout(sk, tp)) 21011da177e4SLinus Torvalds tcp_update_scoreboard(sk, tp); 21026687e988SArnaldo Carvalho de Melo tcp_cwnd_down(sk); 21031da177e4SLinus Torvalds tcp_xmit_retransmit_queue(sk); 21041da177e4SLinus Torvalds } 21051da177e4SLinus Torvalds 21061da177e4SLinus Torvalds /* Read draft-ietf-tcplw-high-performance before mucking 2107caa20d9aSStephen Hemminger * with this code. (Supersedes RFC1323) 21081da177e4SLinus Torvalds */ 21092d2abbabSStephen Hemminger static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 21101da177e4SLinus Torvalds { 21111da177e4SLinus Torvalds /* RTTM Rule: A TSecr value received in a segment is used to 21121da177e4SLinus Torvalds * update the averaged RTT measurement only if the segment 21131da177e4SLinus Torvalds * acknowledges some new data, i.e., only if it advances the 21141da177e4SLinus Torvalds * left edge of the send window. 21151da177e4SLinus Torvalds * 21161da177e4SLinus Torvalds * See draft-ietf-tcplw-high-performance-00, section 3.3. 21171da177e4SLinus Torvalds * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 21181da177e4SLinus Torvalds * 21191da177e4SLinus Torvalds * Changed: reset backoff as soon as we see the first valid sample. 2120caa20d9aSStephen Hemminger * If we do not, we get strongly overestimated rto. With timestamps 21211da177e4SLinus Torvalds * samples are accepted even from very old segments: f.e., when rtt=1 21221da177e4SLinus Torvalds * increases to 8, we retransmit 5 times and after 8 seconds delayed 21231da177e4SLinus Torvalds * answer arrives rto becomes 120 seconds! If at least one of segments 21241da177e4SLinus Torvalds * in window is lost... Voila. --ANK (010210) 21251da177e4SLinus Torvalds */ 2126463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 2127463c84b9SArnaldo Carvalho de Melo const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 21282d2abbabSStephen Hemminger tcp_rtt_estimator(sk, seq_rtt); 2129463c84b9SArnaldo Carvalho de Melo tcp_set_rto(sk); 2130463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_backoff = 0; 2131463c84b9SArnaldo Carvalho de Melo tcp_bound_rto(sk); 21321da177e4SLinus Torvalds } 21331da177e4SLinus Torvalds 21342d2abbabSStephen Hemminger static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) 21351da177e4SLinus Torvalds { 21361da177e4SLinus Torvalds /* We don't have a timestamp. Can only use 21371da177e4SLinus Torvalds * packets that are not retransmitted to determine 21381da177e4SLinus Torvalds * rtt estimates. Also, we must not reset the 21391da177e4SLinus Torvalds * backoff for rto until we get a non-retransmitted 21401da177e4SLinus Torvalds * packet. This allows us to deal with a situation 21411da177e4SLinus Torvalds * where the network delay has increased suddenly. 21421da177e4SLinus Torvalds * I.e. Karn's algorithm. (SIGCOMM '87, p5.) 21431da177e4SLinus Torvalds */ 21441da177e4SLinus Torvalds 21451da177e4SLinus Torvalds if (flag & FLAG_RETRANS_DATA_ACKED) 21461da177e4SLinus Torvalds return; 21471da177e4SLinus Torvalds 21482d2abbabSStephen Hemminger tcp_rtt_estimator(sk, seq_rtt); 2149463c84b9SArnaldo Carvalho de Melo tcp_set_rto(sk); 2150463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_backoff = 0; 2151463c84b9SArnaldo Carvalho de Melo tcp_bound_rto(sk); 21521da177e4SLinus Torvalds } 21531da177e4SLinus Torvalds 2154463c84b9SArnaldo Carvalho de Melo static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 21552d2abbabSStephen Hemminger const s32 seq_rtt) 21561da177e4SLinus Torvalds { 2157463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 21581da177e4SLinus Torvalds /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 21591da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 21602d2abbabSStephen Hemminger tcp_ack_saw_tstamp(sk, flag); 21611da177e4SLinus Torvalds else if (seq_rtt >= 0) 21622d2abbabSStephen Hemminger tcp_ack_no_tstamp(sk, seq_rtt, flag); 21631da177e4SLinus Torvalds } 21641da177e4SLinus Torvalds 216540efc6faSStephen Hemminger static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 2166317a76f9SStephen Hemminger u32 in_flight, int good) 21671da177e4SLinus Torvalds { 21686687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 21696687e988SArnaldo Carvalho de Melo icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good); 21706687e988SArnaldo Carvalho de Melo tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 21711da177e4SLinus Torvalds } 21721da177e4SLinus Torvalds 21731da177e4SLinus Torvalds /* Restart timer after forward progress on connection. 21741da177e4SLinus Torvalds * RFC2988 recommends to restart timer to now+rto. 21751da177e4SLinus Torvalds */ 21761da177e4SLinus Torvalds 217740efc6faSStephen Hemminger static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) 21781da177e4SLinus Torvalds { 21791da177e4SLinus Torvalds if (!tp->packets_out) { 2180463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 21811da177e4SLinus Torvalds } else { 21823f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 21831da177e4SLinus Torvalds } 21841da177e4SLinus Torvalds } 21851da177e4SLinus Torvalds 21861da177e4SLinus Torvalds static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, 21871da177e4SLinus Torvalds __u32 now, __s32 *seq_rtt) 21881da177e4SLinus Torvalds { 21891da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 21901da177e4SLinus Torvalds struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 21911da177e4SLinus Torvalds __u32 seq = tp->snd_una; 21921da177e4SLinus Torvalds __u32 packets_acked; 21931da177e4SLinus Torvalds int acked = 0; 21941da177e4SLinus Torvalds 21951da177e4SLinus Torvalds /* If we get here, the whole TSO packet has not been 21961da177e4SLinus Torvalds * acked. 21971da177e4SLinus Torvalds */ 21981da177e4SLinus Torvalds BUG_ON(!after(scb->end_seq, seq)); 21991da177e4SLinus Torvalds 22001da177e4SLinus Torvalds packets_acked = tcp_skb_pcount(skb); 22011da177e4SLinus Torvalds if (tcp_trim_head(sk, skb, seq - scb->seq)) 22021da177e4SLinus Torvalds return 0; 22031da177e4SLinus Torvalds packets_acked -= tcp_skb_pcount(skb); 22041da177e4SLinus Torvalds 22051da177e4SLinus Torvalds if (packets_acked) { 22061da177e4SLinus Torvalds __u8 sacked = scb->sacked; 22071da177e4SLinus Torvalds 22081da177e4SLinus Torvalds acked |= FLAG_DATA_ACKED; 22091da177e4SLinus Torvalds if (sacked) { 22101da177e4SLinus Torvalds if (sacked & TCPCB_RETRANS) { 22111da177e4SLinus Torvalds if (sacked & TCPCB_SACKED_RETRANS) 22121da177e4SLinus Torvalds tp->retrans_out -= packets_acked; 22131da177e4SLinus Torvalds acked |= FLAG_RETRANS_DATA_ACKED; 22141da177e4SLinus Torvalds *seq_rtt = -1; 22151da177e4SLinus Torvalds } else if (*seq_rtt < 0) 22161da177e4SLinus Torvalds *seq_rtt = now - scb->when; 22171da177e4SLinus Torvalds if (sacked & TCPCB_SACKED_ACKED) 22181da177e4SLinus Torvalds tp->sacked_out -= packets_acked; 22191da177e4SLinus Torvalds if (sacked & TCPCB_LOST) 22201da177e4SLinus Torvalds tp->lost_out -= packets_acked; 22211da177e4SLinus Torvalds if (sacked & TCPCB_URG) { 22221da177e4SLinus Torvalds if (tp->urg_mode && 22231da177e4SLinus Torvalds !before(seq, tp->snd_up)) 22241da177e4SLinus Torvalds tp->urg_mode = 0; 22251da177e4SLinus Torvalds } 22261da177e4SLinus Torvalds } else if (*seq_rtt < 0) 22271da177e4SLinus Torvalds *seq_rtt = now - scb->when; 22281da177e4SLinus Torvalds 22291da177e4SLinus Torvalds if (tp->fackets_out) { 22301da177e4SLinus Torvalds __u32 dval = min(tp->fackets_out, packets_acked); 22311da177e4SLinus Torvalds tp->fackets_out -= dval; 22321da177e4SLinus Torvalds } 22331da177e4SLinus Torvalds tp->packets_out -= packets_acked; 22341da177e4SLinus Torvalds 22351da177e4SLinus Torvalds BUG_ON(tcp_skb_pcount(skb) == 0); 22361da177e4SLinus Torvalds BUG_ON(!before(scb->seq, scb->end_seq)); 22371da177e4SLinus Torvalds } 22381da177e4SLinus Torvalds 22391da177e4SLinus Torvalds return acked; 22401da177e4SLinus Torvalds } 22411da177e4SLinus Torvalds 22428ea333ebSJohn Heffner static u32 tcp_usrtt(struct timeval *tv) 22432d2abbabSStephen Hemminger { 22448ea333ebSJohn Heffner struct timeval now; 22452d2abbabSStephen Hemminger 22462d2abbabSStephen Hemminger do_gettimeofday(&now); 22478ea333ebSJohn Heffner return (now.tv_sec - tv->tv_sec) * 1000000 + (now.tv_usec - tv->tv_usec); 22482d2abbabSStephen Hemminger } 22491da177e4SLinus Torvalds 22501da177e4SLinus Torvalds /* Remove acknowledged frames from the retransmission queue. */ 22512d2abbabSStephen Hemminger static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) 22521da177e4SLinus Torvalds { 22531da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 22542d2abbabSStephen Hemminger const struct inet_connection_sock *icsk = inet_csk(sk); 22551da177e4SLinus Torvalds struct sk_buff *skb; 22561da177e4SLinus Torvalds __u32 now = tcp_time_stamp; 22571da177e4SLinus Torvalds int acked = 0; 22581da177e4SLinus Torvalds __s32 seq_rtt = -1; 2259317a76f9SStephen Hemminger u32 pkts_acked = 0; 22602d2abbabSStephen Hemminger void (*rtt_sample)(struct sock *sk, u32 usrtt) 22612d2abbabSStephen Hemminger = icsk->icsk_ca_ops->rtt_sample; 226280246ab3SDavid S. Miller struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; 22631da177e4SLinus Torvalds 22641da177e4SLinus Torvalds while ((skb = skb_peek(&sk->sk_write_queue)) && 22651da177e4SLinus Torvalds skb != sk->sk_send_head) { 22661da177e4SLinus Torvalds struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 22671da177e4SLinus Torvalds __u8 sacked = scb->sacked; 22681da177e4SLinus Torvalds 22691da177e4SLinus Torvalds /* If our packet is before the ack sequence we can 22701da177e4SLinus Torvalds * discard it as it's confirmed to have arrived at 22711da177e4SLinus Torvalds * the other end. 22721da177e4SLinus Torvalds */ 22731da177e4SLinus Torvalds if (after(scb->end_seq, tp->snd_una)) { 2274cb83199aSDavid S. Miller if (tcp_skb_pcount(skb) > 1 && 2275cb83199aSDavid S. Miller after(tp->snd_una, scb->seq)) 22761da177e4SLinus Torvalds acked |= tcp_tso_acked(sk, skb, 22771da177e4SLinus Torvalds now, &seq_rtt); 22781da177e4SLinus Torvalds break; 22791da177e4SLinus Torvalds } 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds /* Initial outgoing SYN's get put onto the write_queue 22821da177e4SLinus Torvalds * just like anything else we transmit. It is not 22831da177e4SLinus Torvalds * true data, and if we misinform our callers that 22841da177e4SLinus Torvalds * this ACK acks real data, we will erroneously exit 22851da177e4SLinus Torvalds * connection startup slow start one packet too 22861da177e4SLinus Torvalds * quickly. This is severely frowned upon behavior. 22871da177e4SLinus Torvalds */ 22881da177e4SLinus Torvalds if (!(scb->flags & TCPCB_FLAG_SYN)) { 22891da177e4SLinus Torvalds acked |= FLAG_DATA_ACKED; 2290317a76f9SStephen Hemminger ++pkts_acked; 22911da177e4SLinus Torvalds } else { 22921da177e4SLinus Torvalds acked |= FLAG_SYN_ACKED; 22931da177e4SLinus Torvalds tp->retrans_stamp = 0; 22941da177e4SLinus Torvalds } 22951da177e4SLinus Torvalds 22965d424d5aSJohn Heffner /* MTU probing checks */ 22975d424d5aSJohn Heffner if (icsk->icsk_mtup.probe_size) { 22980e7b1368SJohn Heffner if (!after(tp->mtu_probe.probe_seq_end, TCP_SKB_CB(skb)->end_seq)) { 22995d424d5aSJohn Heffner tcp_mtup_probe_success(sk, skb); 23005d424d5aSJohn Heffner } 23015d424d5aSJohn Heffner } 23025d424d5aSJohn Heffner 23031da177e4SLinus Torvalds if (sacked) { 23041da177e4SLinus Torvalds if (sacked & TCPCB_RETRANS) { 23051da177e4SLinus Torvalds if(sacked & TCPCB_SACKED_RETRANS) 23061da177e4SLinus Torvalds tp->retrans_out -= tcp_skb_pcount(skb); 23071da177e4SLinus Torvalds acked |= FLAG_RETRANS_DATA_ACKED; 23081da177e4SLinus Torvalds seq_rtt = -1; 23092d2abbabSStephen Hemminger } else if (seq_rtt < 0) { 23101da177e4SLinus Torvalds seq_rtt = now - scb->when; 23118ea333ebSJohn Heffner skb_get_timestamp(skb, &tv); 2312a61bbcf2SPatrick McHardy } 23131da177e4SLinus Torvalds if (sacked & TCPCB_SACKED_ACKED) 23141da177e4SLinus Torvalds tp->sacked_out -= tcp_skb_pcount(skb); 23151da177e4SLinus Torvalds if (sacked & TCPCB_LOST) 23161da177e4SLinus Torvalds tp->lost_out -= tcp_skb_pcount(skb); 23171da177e4SLinus Torvalds if (sacked & TCPCB_URG) { 23181da177e4SLinus Torvalds if (tp->urg_mode && 23191da177e4SLinus Torvalds !before(scb->end_seq, tp->snd_up)) 23201da177e4SLinus Torvalds tp->urg_mode = 0; 23211da177e4SLinus Torvalds } 23222d2abbabSStephen Hemminger } else if (seq_rtt < 0) { 23231da177e4SLinus Torvalds seq_rtt = now - scb->when; 23248ea333ebSJohn Heffner skb_get_timestamp(skb, &tv); 23252d2abbabSStephen Hemminger } 23261da177e4SLinus Torvalds tcp_dec_pcount_approx(&tp->fackets_out, skb); 23271da177e4SLinus Torvalds tcp_packets_out_dec(tp, skb); 23288728b834SDavid S. Miller __skb_unlink(skb, &sk->sk_write_queue); 23291da177e4SLinus Torvalds sk_stream_free_skb(sk, skb); 23306a438bbeSStephen Hemminger clear_all_retrans_hints(tp); 23311da177e4SLinus Torvalds } 23321da177e4SLinus Torvalds 23331da177e4SLinus Torvalds if (acked&FLAG_ACKED) { 23342d2abbabSStephen Hemminger tcp_ack_update_rtt(sk, acked, seq_rtt); 23351da177e4SLinus Torvalds tcp_ack_packets_out(sk, tp); 23368ea333ebSJohn Heffner if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) 23378ea333ebSJohn Heffner (*rtt_sample)(sk, tcp_usrtt(&tv)); 2338317a76f9SStephen Hemminger 23396687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_ops->pkts_acked) 23406687e988SArnaldo Carvalho de Melo icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked); 23411da177e4SLinus Torvalds } 23421da177e4SLinus Torvalds 23431da177e4SLinus Torvalds #if FASTRETRANS_DEBUG > 0 23441da177e4SLinus Torvalds BUG_TRAP((int)tp->sacked_out >= 0); 23451da177e4SLinus Torvalds BUG_TRAP((int)tp->lost_out >= 0); 23461da177e4SLinus Torvalds BUG_TRAP((int)tp->retrans_out >= 0); 23471da177e4SLinus Torvalds if (!tp->packets_out && tp->rx_opt.sack_ok) { 23486687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 23491da177e4SLinus Torvalds if (tp->lost_out) { 23501da177e4SLinus Torvalds printk(KERN_DEBUG "Leak l=%u %d\n", 23516687e988SArnaldo Carvalho de Melo tp->lost_out, icsk->icsk_ca_state); 23521da177e4SLinus Torvalds tp->lost_out = 0; 23531da177e4SLinus Torvalds } 23541da177e4SLinus Torvalds if (tp->sacked_out) { 23551da177e4SLinus Torvalds printk(KERN_DEBUG "Leak s=%u %d\n", 23566687e988SArnaldo Carvalho de Melo tp->sacked_out, icsk->icsk_ca_state); 23571da177e4SLinus Torvalds tp->sacked_out = 0; 23581da177e4SLinus Torvalds } 23591da177e4SLinus Torvalds if (tp->retrans_out) { 23601da177e4SLinus Torvalds printk(KERN_DEBUG "Leak r=%u %d\n", 23616687e988SArnaldo Carvalho de Melo tp->retrans_out, icsk->icsk_ca_state); 23621da177e4SLinus Torvalds tp->retrans_out = 0; 23631da177e4SLinus Torvalds } 23641da177e4SLinus Torvalds } 23651da177e4SLinus Torvalds #endif 23661da177e4SLinus Torvalds *seq_rtt_p = seq_rtt; 23671da177e4SLinus Torvalds return acked; 23681da177e4SLinus Torvalds } 23691da177e4SLinus Torvalds 23701da177e4SLinus Torvalds static void tcp_ack_probe(struct sock *sk) 23711da177e4SLinus Torvalds { 2372463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 2373463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 23741da177e4SLinus Torvalds 23751da177e4SLinus Torvalds /* Was it a usable window open? */ 23761da177e4SLinus Torvalds 23771da177e4SLinus Torvalds if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, 23781da177e4SLinus Torvalds tp->snd_una + tp->snd_wnd)) { 2379463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 2380463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 23811da177e4SLinus Torvalds /* Socket must be waked up by subsequent tcp_data_snd_check(). 23821da177e4SLinus Torvalds * This function is not for random using! 23831da177e4SLinus Torvalds */ 23841da177e4SLinus Torvalds } else { 2385463c84b9SArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 23863f421baaSArnaldo Carvalho de Melo min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 23873f421baaSArnaldo Carvalho de Melo TCP_RTO_MAX); 23881da177e4SLinus Torvalds } 23891da177e4SLinus Torvalds } 23901da177e4SLinus Torvalds 23916687e988SArnaldo Carvalho de Melo static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 23921da177e4SLinus Torvalds { 23931da177e4SLinus Torvalds return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 23946687e988SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ca_state != TCP_CA_Open); 23951da177e4SLinus Torvalds } 23961da177e4SLinus Torvalds 23976687e988SArnaldo Carvalho de Melo static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 23981da177e4SLinus Torvalds { 23996687e988SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 24001da177e4SLinus Torvalds return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 24016687e988SArnaldo Carvalho de Melo !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); 24021da177e4SLinus Torvalds } 24031da177e4SLinus Torvalds 24041da177e4SLinus Torvalds /* Check that window update is acceptable. 24051da177e4SLinus Torvalds * The function assumes that snd_una<=ack<=snd_next. 24061da177e4SLinus Torvalds */ 2407463c84b9SArnaldo Carvalho de Melo static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, 2408463c84b9SArnaldo Carvalho de Melo const u32 ack_seq, const u32 nwin) 24091da177e4SLinus Torvalds { 24101da177e4SLinus Torvalds return (after(ack, tp->snd_una) || 24111da177e4SLinus Torvalds after(ack_seq, tp->snd_wl1) || 24121da177e4SLinus Torvalds (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd)); 24131da177e4SLinus Torvalds } 24141da177e4SLinus Torvalds 24151da177e4SLinus Torvalds /* Update our send window. 24161da177e4SLinus Torvalds * 24171da177e4SLinus Torvalds * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 24181da177e4SLinus Torvalds * and in FreeBSD. NetBSD's one is even worse.) is wrong. 24191da177e4SLinus Torvalds */ 24201da177e4SLinus Torvalds static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, 24211da177e4SLinus Torvalds struct sk_buff *skb, u32 ack, u32 ack_seq) 24221da177e4SLinus Torvalds { 24231da177e4SLinus Torvalds int flag = 0; 24241da177e4SLinus Torvalds u32 nwin = ntohs(skb->h.th->window); 24251da177e4SLinus Torvalds 24261da177e4SLinus Torvalds if (likely(!skb->h.th->syn)) 24271da177e4SLinus Torvalds nwin <<= tp->rx_opt.snd_wscale; 24281da177e4SLinus Torvalds 24291da177e4SLinus Torvalds if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 24301da177e4SLinus Torvalds flag |= FLAG_WIN_UPDATE; 24311da177e4SLinus Torvalds tcp_update_wl(tp, ack, ack_seq); 24321da177e4SLinus Torvalds 24331da177e4SLinus Torvalds if (tp->snd_wnd != nwin) { 24341da177e4SLinus Torvalds tp->snd_wnd = nwin; 24351da177e4SLinus Torvalds 24361da177e4SLinus Torvalds /* Note, it is the only place, where 24371da177e4SLinus Torvalds * fast path is recovered for sending TCP. 24381da177e4SLinus Torvalds */ 24392ad41065SHerbert Xu tp->pred_flags = 0; 24401da177e4SLinus Torvalds tcp_fast_path_check(sk, tp); 24411da177e4SLinus Torvalds 24421da177e4SLinus Torvalds if (nwin > tp->max_window) { 24431da177e4SLinus Torvalds tp->max_window = nwin; 2444d83d8461SArnaldo Carvalho de Melo tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 24451da177e4SLinus Torvalds } 24461da177e4SLinus Torvalds } 24471da177e4SLinus Torvalds } 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds tp->snd_una = ack; 24501da177e4SLinus Torvalds 24511da177e4SLinus Torvalds return flag; 24521da177e4SLinus Torvalds } 24531da177e4SLinus Torvalds 24541da177e4SLinus Torvalds static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) 24551da177e4SLinus Torvalds { 24561da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 24571da177e4SLinus Torvalds 24581da177e4SLinus Torvalds tcp_sync_left_out(tp); 24591da177e4SLinus Torvalds 24601da177e4SLinus Torvalds if (tp->snd_una == prior_snd_una || 24611da177e4SLinus Torvalds !before(tp->snd_una, tp->frto_highmark)) { 24621da177e4SLinus Torvalds /* RTO was caused by loss, start retransmitting in 24631da177e4SLinus Torvalds * go-back-N slow start 24641da177e4SLinus Torvalds */ 24651da177e4SLinus Torvalds tcp_enter_frto_loss(sk); 24661da177e4SLinus Torvalds return; 24671da177e4SLinus Torvalds } 24681da177e4SLinus Torvalds 24691da177e4SLinus Torvalds if (tp->frto_counter == 1) { 24701da177e4SLinus Torvalds /* First ACK after RTO advances the window: allow two new 24711da177e4SLinus Torvalds * segments out. 24721da177e4SLinus Torvalds */ 24731da177e4SLinus Torvalds tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 24741da177e4SLinus Torvalds } else { 24751da177e4SLinus Torvalds /* Also the second ACK after RTO advances the window. 24761da177e4SLinus Torvalds * The RTO was likely spurious. Reduce cwnd and continue 24771da177e4SLinus Torvalds * in congestion avoidance 24781da177e4SLinus Torvalds */ 24791da177e4SLinus Torvalds tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 24801da177e4SLinus Torvalds tcp_moderate_cwnd(tp); 24811da177e4SLinus Torvalds } 24821da177e4SLinus Torvalds 24831da177e4SLinus Torvalds /* F-RTO affects on two new ACKs following RTO. 2484caa20d9aSStephen Hemminger * At latest on third ACK the TCP behavior is back to normal. 24851da177e4SLinus Torvalds */ 24861da177e4SLinus Torvalds tp->frto_counter = (tp->frto_counter + 1) % 3; 24871da177e4SLinus Torvalds } 24881da177e4SLinus Torvalds 24891da177e4SLinus Torvalds /* This routine deals with incoming acks, but not outgoing ones. */ 24901da177e4SLinus Torvalds static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 24911da177e4SLinus Torvalds { 24926687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 24931da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 24941da177e4SLinus Torvalds u32 prior_snd_una = tp->snd_una; 24951da177e4SLinus Torvalds u32 ack_seq = TCP_SKB_CB(skb)->seq; 24961da177e4SLinus Torvalds u32 ack = TCP_SKB_CB(skb)->ack_seq; 24971da177e4SLinus Torvalds u32 prior_in_flight; 24981da177e4SLinus Torvalds s32 seq_rtt; 24991da177e4SLinus Torvalds int prior_packets; 25001da177e4SLinus Torvalds 25011da177e4SLinus Torvalds /* If the ack is newer than sent or older than previous acks 25021da177e4SLinus Torvalds * then we can probably ignore it. 25031da177e4SLinus Torvalds */ 25041da177e4SLinus Torvalds if (after(ack, tp->snd_nxt)) 25051da177e4SLinus Torvalds goto uninteresting_ack; 25061da177e4SLinus Torvalds 25071da177e4SLinus Torvalds if (before(ack, prior_snd_una)) 25081da177e4SLinus Torvalds goto old_ack; 25091da177e4SLinus Torvalds 25103fdf3f0cSDaikichi Osuga if (sysctl_tcp_abc) { 25113fdf3f0cSDaikichi Osuga if (icsk->icsk_ca_state < TCP_CA_CWR) 25129772efb9SStephen Hemminger tp->bytes_acked += ack - prior_snd_una; 25133fdf3f0cSDaikichi Osuga else if (icsk->icsk_ca_state == TCP_CA_Loss) 25143fdf3f0cSDaikichi Osuga /* we assume just one segment left network */ 25153fdf3f0cSDaikichi Osuga tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); 25163fdf3f0cSDaikichi Osuga } 25179772efb9SStephen Hemminger 25181da177e4SLinus Torvalds if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 25191da177e4SLinus Torvalds /* Window is constant, pure forward advance. 25201da177e4SLinus Torvalds * No more checks are required. 25211da177e4SLinus Torvalds * Note, we use the fact that SND.UNA>=SND.WL2. 25221da177e4SLinus Torvalds */ 25231da177e4SLinus Torvalds tcp_update_wl(tp, ack, ack_seq); 25241da177e4SLinus Torvalds tp->snd_una = ack; 25251da177e4SLinus Torvalds flag |= FLAG_WIN_UPDATE; 25261da177e4SLinus Torvalds 25276687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_FAST_ACK); 2528317a76f9SStephen Hemminger 25291da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 25301da177e4SLinus Torvalds } else { 25311da177e4SLinus Torvalds if (ack_seq != TCP_SKB_CB(skb)->end_seq) 25321da177e4SLinus Torvalds flag |= FLAG_DATA; 25331da177e4SLinus Torvalds else 25341da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 25351da177e4SLinus Torvalds 25361da177e4SLinus Torvalds flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq); 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked) 25391da177e4SLinus Torvalds flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 25401da177e4SLinus Torvalds 25411da177e4SLinus Torvalds if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th)) 25421da177e4SLinus Torvalds flag |= FLAG_ECE; 25431da177e4SLinus Torvalds 25446687e988SArnaldo Carvalho de Melo tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 25451da177e4SLinus Torvalds } 25461da177e4SLinus Torvalds 25471da177e4SLinus Torvalds /* We passed data and got it acked, remove any soft error 25481da177e4SLinus Torvalds * log. Something worked... 25491da177e4SLinus Torvalds */ 25501da177e4SLinus Torvalds sk->sk_err_soft = 0; 25511da177e4SLinus Torvalds tp->rcv_tstamp = tcp_time_stamp; 25521da177e4SLinus Torvalds prior_packets = tp->packets_out; 25531da177e4SLinus Torvalds if (!prior_packets) 25541da177e4SLinus Torvalds goto no_queue; 25551da177e4SLinus Torvalds 25561da177e4SLinus Torvalds prior_in_flight = tcp_packets_in_flight(tp); 25571da177e4SLinus Torvalds 25581da177e4SLinus Torvalds /* See if we can take anything off of the retransmit queue. */ 25592d2abbabSStephen Hemminger flag |= tcp_clean_rtx_queue(sk, &seq_rtt); 25601da177e4SLinus Torvalds 25611da177e4SLinus Torvalds if (tp->frto_counter) 25621da177e4SLinus Torvalds tcp_process_frto(sk, prior_snd_una); 25631da177e4SLinus Torvalds 25646687e988SArnaldo Carvalho de Melo if (tcp_ack_is_dubious(sk, flag)) { 2565caa20d9aSStephen Hemminger /* Advance CWND, if state allows this. */ 25666687e988SArnaldo Carvalho de Melo if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) 25676687e988SArnaldo Carvalho de Melo tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); 25681da177e4SLinus Torvalds tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); 25691da177e4SLinus Torvalds } else { 2570317a76f9SStephen Hemminger if ((flag & FLAG_DATA_ACKED)) 25716687e988SArnaldo Carvalho de Melo tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1); 25721da177e4SLinus Torvalds } 25731da177e4SLinus Torvalds 25741da177e4SLinus Torvalds if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP)) 25751da177e4SLinus Torvalds dst_confirm(sk->sk_dst_cache); 25761da177e4SLinus Torvalds 25771da177e4SLinus Torvalds return 1; 25781da177e4SLinus Torvalds 25791da177e4SLinus Torvalds no_queue: 25806687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 25811da177e4SLinus Torvalds 25821da177e4SLinus Torvalds /* If this ack opens up a zero window, clear backoff. It was 25831da177e4SLinus Torvalds * being used to time the probes, and is probably far higher than 25841da177e4SLinus Torvalds * it needs to be for normal retransmission. 25851da177e4SLinus Torvalds */ 25861da177e4SLinus Torvalds if (sk->sk_send_head) 25871da177e4SLinus Torvalds tcp_ack_probe(sk); 25881da177e4SLinus Torvalds return 1; 25891da177e4SLinus Torvalds 25901da177e4SLinus Torvalds old_ack: 25911da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->sacked) 25921da177e4SLinus Torvalds tcp_sacktag_write_queue(sk, skb, prior_snd_una); 25931da177e4SLinus Torvalds 25941da177e4SLinus Torvalds uninteresting_ack: 25951da177e4SLinus Torvalds SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 25961da177e4SLinus Torvalds return 0; 25971da177e4SLinus Torvalds } 25981da177e4SLinus Torvalds 25991da177e4SLinus Torvalds 26001da177e4SLinus Torvalds /* Look for tcp options. Normally only called on SYN and SYNACK packets. 26011da177e4SLinus Torvalds * But, this can also be called on packets in the established flow when 26021da177e4SLinus Torvalds * the fast version below fails. 26031da177e4SLinus Torvalds */ 26041da177e4SLinus Torvalds void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab) 26051da177e4SLinus Torvalds { 26061da177e4SLinus Torvalds unsigned char *ptr; 26071da177e4SLinus Torvalds struct tcphdr *th = skb->h.th; 26081da177e4SLinus Torvalds int length=(th->doff*4)-sizeof(struct tcphdr); 26091da177e4SLinus Torvalds 26101da177e4SLinus Torvalds ptr = (unsigned char *)(th + 1); 26111da177e4SLinus Torvalds opt_rx->saw_tstamp = 0; 26121da177e4SLinus Torvalds 26131da177e4SLinus Torvalds while(length>0) { 26141da177e4SLinus Torvalds int opcode=*ptr++; 26151da177e4SLinus Torvalds int opsize; 26161da177e4SLinus Torvalds 26171da177e4SLinus Torvalds switch (opcode) { 26181da177e4SLinus Torvalds case TCPOPT_EOL: 26191da177e4SLinus Torvalds return; 26201da177e4SLinus Torvalds case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 26211da177e4SLinus Torvalds length--; 26221da177e4SLinus Torvalds continue; 26231da177e4SLinus Torvalds default: 26241da177e4SLinus Torvalds opsize=*ptr++; 26251da177e4SLinus Torvalds if (opsize < 2) /* "silly options" */ 26261da177e4SLinus Torvalds return; 26271da177e4SLinus Torvalds if (opsize > length) 26281da177e4SLinus Torvalds return; /* don't parse partial options */ 26291da177e4SLinus Torvalds switch(opcode) { 26301da177e4SLinus Torvalds case TCPOPT_MSS: 26311da177e4SLinus Torvalds if(opsize==TCPOLEN_MSS && th->syn && !estab) { 26324f3608b7SAl Viro u16 in_mss = ntohs(get_unaligned((__be16 *)ptr)); 26331da177e4SLinus Torvalds if (in_mss) { 26341da177e4SLinus Torvalds if (opt_rx->user_mss && opt_rx->user_mss < in_mss) 26351da177e4SLinus Torvalds in_mss = opt_rx->user_mss; 26361da177e4SLinus Torvalds opt_rx->mss_clamp = in_mss; 26371da177e4SLinus Torvalds } 26381da177e4SLinus Torvalds } 26391da177e4SLinus Torvalds break; 26401da177e4SLinus Torvalds case TCPOPT_WINDOW: 26411da177e4SLinus Torvalds if(opsize==TCPOLEN_WINDOW && th->syn && !estab) 26421da177e4SLinus Torvalds if (sysctl_tcp_window_scaling) { 26431da177e4SLinus Torvalds __u8 snd_wscale = *(__u8 *) ptr; 26441da177e4SLinus Torvalds opt_rx->wscale_ok = 1; 26451da177e4SLinus Torvalds if (snd_wscale > 14) { 26461da177e4SLinus Torvalds if(net_ratelimit()) 26471da177e4SLinus Torvalds printk(KERN_INFO "tcp_parse_options: Illegal window " 26481da177e4SLinus Torvalds "scaling value %d >14 received.\n", 26491da177e4SLinus Torvalds snd_wscale); 26501da177e4SLinus Torvalds snd_wscale = 14; 26511da177e4SLinus Torvalds } 26521da177e4SLinus Torvalds opt_rx->snd_wscale = snd_wscale; 26531da177e4SLinus Torvalds } 26541da177e4SLinus Torvalds break; 26551da177e4SLinus Torvalds case TCPOPT_TIMESTAMP: 26561da177e4SLinus Torvalds if(opsize==TCPOLEN_TIMESTAMP) { 26571da177e4SLinus Torvalds if ((estab && opt_rx->tstamp_ok) || 26581da177e4SLinus Torvalds (!estab && sysctl_tcp_timestamps)) { 26591da177e4SLinus Torvalds opt_rx->saw_tstamp = 1; 26604f3608b7SAl Viro opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr)); 26614f3608b7SAl Viro opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4))); 26621da177e4SLinus Torvalds } 26631da177e4SLinus Torvalds } 26641da177e4SLinus Torvalds break; 26651da177e4SLinus Torvalds case TCPOPT_SACK_PERM: 26661da177e4SLinus Torvalds if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) { 26671da177e4SLinus Torvalds if (sysctl_tcp_sack) { 26681da177e4SLinus Torvalds opt_rx->sack_ok = 1; 26691da177e4SLinus Torvalds tcp_sack_reset(opt_rx); 26701da177e4SLinus Torvalds } 26711da177e4SLinus Torvalds } 26721da177e4SLinus Torvalds break; 26731da177e4SLinus Torvalds 26741da177e4SLinus Torvalds case TCPOPT_SACK: 26751da177e4SLinus Torvalds if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 26761da177e4SLinus Torvalds !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 26771da177e4SLinus Torvalds opt_rx->sack_ok) { 26781da177e4SLinus Torvalds TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 26791da177e4SLinus Torvalds } 2680cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 2681cfb6eeb4SYOSHIFUJI Hideaki case TCPOPT_MD5SIG: 2682cfb6eeb4SYOSHIFUJI Hideaki /* 2683cfb6eeb4SYOSHIFUJI Hideaki * The MD5 Hash has already been 2684cfb6eeb4SYOSHIFUJI Hideaki * checked (see tcp_v{4,6}_do_rcv()). 2685cfb6eeb4SYOSHIFUJI Hideaki */ 2686cfb6eeb4SYOSHIFUJI Hideaki break; 2687cfb6eeb4SYOSHIFUJI Hideaki #endif 26881da177e4SLinus Torvalds }; 26891da177e4SLinus Torvalds ptr+=opsize-2; 26901da177e4SLinus Torvalds length-=opsize; 26911da177e4SLinus Torvalds }; 26921da177e4SLinus Torvalds } 26931da177e4SLinus Torvalds } 26941da177e4SLinus Torvalds 26951da177e4SLinus Torvalds /* Fast parse options. This hopes to only see timestamps. 26961da177e4SLinus Torvalds * If it is wrong it falls back on tcp_parse_options(). 26971da177e4SLinus Torvalds */ 269840efc6faSStephen Hemminger static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 26991da177e4SLinus Torvalds struct tcp_sock *tp) 27001da177e4SLinus Torvalds { 27011da177e4SLinus Torvalds if (th->doff == sizeof(struct tcphdr)>>2) { 27021da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 0; 27031da177e4SLinus Torvalds return 0; 27041da177e4SLinus Torvalds } else if (tp->rx_opt.tstamp_ok && 27051da177e4SLinus Torvalds th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 27064f3608b7SAl Viro __be32 *ptr = (__be32 *)(th + 1); 27074f3608b7SAl Viro if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 27081da177e4SLinus Torvalds | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 27091da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 1; 27101da177e4SLinus Torvalds ++ptr; 27111da177e4SLinus Torvalds tp->rx_opt.rcv_tsval = ntohl(*ptr); 27121da177e4SLinus Torvalds ++ptr; 27131da177e4SLinus Torvalds tp->rx_opt.rcv_tsecr = ntohl(*ptr); 27141da177e4SLinus Torvalds return 1; 27151da177e4SLinus Torvalds } 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds tcp_parse_options(skb, &tp->rx_opt, 1); 27181da177e4SLinus Torvalds return 1; 27191da177e4SLinus Torvalds } 27201da177e4SLinus Torvalds 27211da177e4SLinus Torvalds static inline void tcp_store_ts_recent(struct tcp_sock *tp) 27221da177e4SLinus Torvalds { 27231da177e4SLinus Torvalds tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 27241da177e4SLinus Torvalds tp->rx_opt.ts_recent_stamp = xtime.tv_sec; 27251da177e4SLinus Torvalds } 27261da177e4SLinus Torvalds 27271da177e4SLinus Torvalds static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 27281da177e4SLinus Torvalds { 27291da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 27301da177e4SLinus Torvalds /* PAWS bug workaround wrt. ACK frames, the PAWS discard 27311da177e4SLinus Torvalds * extra check below makes sure this can only happen 27321da177e4SLinus Torvalds * for pure ACK frames. -DaveM 27331da177e4SLinus Torvalds * 27341da177e4SLinus Torvalds * Not only, also it occurs for expired timestamps. 27351da177e4SLinus Torvalds */ 27361da177e4SLinus Torvalds 27371da177e4SLinus Torvalds if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || 27381da177e4SLinus Torvalds xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) 27391da177e4SLinus Torvalds tcp_store_ts_recent(tp); 27401da177e4SLinus Torvalds } 27411da177e4SLinus Torvalds } 27421da177e4SLinus Torvalds 27431da177e4SLinus Torvalds /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 27441da177e4SLinus Torvalds * 27451da177e4SLinus Torvalds * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 27461da177e4SLinus Torvalds * it can pass through stack. So, the following predicate verifies that 27471da177e4SLinus Torvalds * this segment is not used for anything but congestion avoidance or 27481da177e4SLinus Torvalds * fast retransmit. Moreover, we even are able to eliminate most of such 27491da177e4SLinus Torvalds * second order effects, if we apply some small "replay" window (~RTO) 27501da177e4SLinus Torvalds * to timestamp space. 27511da177e4SLinus Torvalds * 27521da177e4SLinus Torvalds * All these measures still do not guarantee that we reject wrapped ACKs 27531da177e4SLinus Torvalds * on networks with high bandwidth, when sequence space is recycled fastly, 27541da177e4SLinus Torvalds * but it guarantees that such events will be very rare and do not affect 27551da177e4SLinus Torvalds * connection seriously. This doesn't look nice, but alas, PAWS is really 27561da177e4SLinus Torvalds * buggy extension. 27571da177e4SLinus Torvalds * 27581da177e4SLinus Torvalds * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 27591da177e4SLinus Torvalds * states that events when retransmit arrives after original data are rare. 27601da177e4SLinus Torvalds * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 27611da177e4SLinus Torvalds * the biggest problem on large power networks even with minor reordering. 27621da177e4SLinus Torvalds * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 27631da177e4SLinus Torvalds * up to bandwidth of 18Gigabit/sec. 8) ] 27641da177e4SLinus Torvalds */ 27651da177e4SLinus Torvalds 2766463c84b9SArnaldo Carvalho de Melo static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 27671da177e4SLinus Torvalds { 2768463c84b9SArnaldo Carvalho de Melo struct tcp_sock *tp = tcp_sk(sk); 27691da177e4SLinus Torvalds struct tcphdr *th = skb->h.th; 27701da177e4SLinus Torvalds u32 seq = TCP_SKB_CB(skb)->seq; 27711da177e4SLinus Torvalds u32 ack = TCP_SKB_CB(skb)->ack_seq; 27721da177e4SLinus Torvalds 27731da177e4SLinus Torvalds return (/* 1. Pure ACK with correct sequence number. */ 27741da177e4SLinus Torvalds (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 27751da177e4SLinus Torvalds 27761da177e4SLinus Torvalds /* 2. ... and duplicate ACK. */ 27771da177e4SLinus Torvalds ack == tp->snd_una && 27781da177e4SLinus Torvalds 27791da177e4SLinus Torvalds /* 3. ... and does not update window. */ 27801da177e4SLinus Torvalds !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 27811da177e4SLinus Torvalds 27821da177e4SLinus Torvalds /* 4. ... and sits in replay window. */ 2783463c84b9SArnaldo Carvalho de Melo (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 27841da177e4SLinus Torvalds } 27851da177e4SLinus Torvalds 2786463c84b9SArnaldo Carvalho de Melo static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) 27871da177e4SLinus Torvalds { 2788463c84b9SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 27891da177e4SLinus Torvalds return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 27901da177e4SLinus Torvalds xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 2791463c84b9SArnaldo Carvalho de Melo !tcp_disordered_ack(sk, skb)); 27921da177e4SLinus Torvalds } 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds /* Check segment sequence number for validity. 27951da177e4SLinus Torvalds * 27961da177e4SLinus Torvalds * Segment controls are considered valid, if the segment 27971da177e4SLinus Torvalds * fits to the window after truncation to the window. Acceptability 27981da177e4SLinus Torvalds * of data (and SYN, FIN, of course) is checked separately. 27991da177e4SLinus Torvalds * See tcp_data_queue(), for example. 28001da177e4SLinus Torvalds * 28011da177e4SLinus Torvalds * Also, controls (RST is main one) are accepted using RCV.WUP instead 28021da177e4SLinus Torvalds * of RCV.NXT. Peer still did not advance his SND.UNA when we 28031da177e4SLinus Torvalds * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 28041da177e4SLinus Torvalds * (borrowed from freebsd) 28051da177e4SLinus Torvalds */ 28061da177e4SLinus Torvalds 28071da177e4SLinus Torvalds static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) 28081da177e4SLinus Torvalds { 28091da177e4SLinus Torvalds return !before(end_seq, tp->rcv_wup) && 28101da177e4SLinus Torvalds !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 28111da177e4SLinus Torvalds } 28121da177e4SLinus Torvalds 28131da177e4SLinus Torvalds /* When we get a reset we do this. */ 28141da177e4SLinus Torvalds static void tcp_reset(struct sock *sk) 28151da177e4SLinus Torvalds { 28161da177e4SLinus Torvalds /* We want the right error as BSD sees it (and indeed as we do). */ 28171da177e4SLinus Torvalds switch (sk->sk_state) { 28181da177e4SLinus Torvalds case TCP_SYN_SENT: 28191da177e4SLinus Torvalds sk->sk_err = ECONNREFUSED; 28201da177e4SLinus Torvalds break; 28211da177e4SLinus Torvalds case TCP_CLOSE_WAIT: 28221da177e4SLinus Torvalds sk->sk_err = EPIPE; 28231da177e4SLinus Torvalds break; 28241da177e4SLinus Torvalds case TCP_CLOSE: 28251da177e4SLinus Torvalds return; 28261da177e4SLinus Torvalds default: 28271da177e4SLinus Torvalds sk->sk_err = ECONNRESET; 28281da177e4SLinus Torvalds } 28291da177e4SLinus Torvalds 28301da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) 28311da177e4SLinus Torvalds sk->sk_error_report(sk); 28321da177e4SLinus Torvalds 28331da177e4SLinus Torvalds tcp_done(sk); 28341da177e4SLinus Torvalds } 28351da177e4SLinus Torvalds 28361da177e4SLinus Torvalds /* 28371da177e4SLinus Torvalds * Process the FIN bit. This now behaves as it is supposed to work 28381da177e4SLinus Torvalds * and the FIN takes effect when it is validly part of sequence 28391da177e4SLinus Torvalds * space. Not before when we get holes. 28401da177e4SLinus Torvalds * 28411da177e4SLinus Torvalds * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 28421da177e4SLinus Torvalds * (and thence onto LAST-ACK and finally, CLOSE, we never enter 28431da177e4SLinus Torvalds * TIME-WAIT) 28441da177e4SLinus Torvalds * 28451da177e4SLinus Torvalds * If we are in FINWAIT-1, a received FIN indicates simultaneous 28461da177e4SLinus Torvalds * close and we go into CLOSING (and later onto TIME-WAIT) 28471da177e4SLinus Torvalds * 28481da177e4SLinus Torvalds * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 28491da177e4SLinus Torvalds */ 28501da177e4SLinus Torvalds static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) 28511da177e4SLinus Torvalds { 28521da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 28531da177e4SLinus Torvalds 2854463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 28551da177e4SLinus Torvalds 28561da177e4SLinus Torvalds sk->sk_shutdown |= RCV_SHUTDOWN; 28571da177e4SLinus Torvalds sock_set_flag(sk, SOCK_DONE); 28581da177e4SLinus Torvalds 28591da177e4SLinus Torvalds switch (sk->sk_state) { 28601da177e4SLinus Torvalds case TCP_SYN_RECV: 28611da177e4SLinus Torvalds case TCP_ESTABLISHED: 28621da177e4SLinus Torvalds /* Move to CLOSE_WAIT */ 28631da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE_WAIT); 2864463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.pingpong = 1; 28651da177e4SLinus Torvalds break; 28661da177e4SLinus Torvalds 28671da177e4SLinus Torvalds case TCP_CLOSE_WAIT: 28681da177e4SLinus Torvalds case TCP_CLOSING: 28691da177e4SLinus Torvalds /* Received a retransmission of the FIN, do 28701da177e4SLinus Torvalds * nothing. 28711da177e4SLinus Torvalds */ 28721da177e4SLinus Torvalds break; 28731da177e4SLinus Torvalds case TCP_LAST_ACK: 28741da177e4SLinus Torvalds /* RFC793: Remain in the LAST-ACK state. */ 28751da177e4SLinus Torvalds break; 28761da177e4SLinus Torvalds 28771da177e4SLinus Torvalds case TCP_FIN_WAIT1: 28781da177e4SLinus Torvalds /* This case occurs when a simultaneous close 28791da177e4SLinus Torvalds * happens, we must ack the received FIN and 28801da177e4SLinus Torvalds * enter the CLOSING state. 28811da177e4SLinus Torvalds */ 28821da177e4SLinus Torvalds tcp_send_ack(sk); 28831da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSING); 28841da177e4SLinus Torvalds break; 28851da177e4SLinus Torvalds case TCP_FIN_WAIT2: 28861da177e4SLinus Torvalds /* Received a FIN -- send ACK and enter TIME_WAIT. */ 28871da177e4SLinus Torvalds tcp_send_ack(sk); 28881da177e4SLinus Torvalds tcp_time_wait(sk, TCP_TIME_WAIT, 0); 28891da177e4SLinus Torvalds break; 28901da177e4SLinus Torvalds default: 28911da177e4SLinus Torvalds /* Only TCP_LISTEN and TCP_CLOSE are left, in these 28921da177e4SLinus Torvalds * cases we should never reach this piece of code. 28931da177e4SLinus Torvalds */ 28941da177e4SLinus Torvalds printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", 28951da177e4SLinus Torvalds __FUNCTION__, sk->sk_state); 28961da177e4SLinus Torvalds break; 28971da177e4SLinus Torvalds }; 28981da177e4SLinus Torvalds 28991da177e4SLinus Torvalds /* It _is_ possible, that we have something out-of-order _after_ FIN. 29001da177e4SLinus Torvalds * Probably, we should reset in this case. For now drop them. 29011da177e4SLinus Torvalds */ 29021da177e4SLinus Torvalds __skb_queue_purge(&tp->out_of_order_queue); 29031da177e4SLinus Torvalds if (tp->rx_opt.sack_ok) 29041da177e4SLinus Torvalds tcp_sack_reset(&tp->rx_opt); 29051da177e4SLinus Torvalds sk_stream_mem_reclaim(sk); 29061da177e4SLinus Torvalds 29071da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) { 29081da177e4SLinus Torvalds sk->sk_state_change(sk); 29091da177e4SLinus Torvalds 29101da177e4SLinus Torvalds /* Do not send POLL_HUP for half duplex close. */ 29111da177e4SLinus Torvalds if (sk->sk_shutdown == SHUTDOWN_MASK || 29121da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE) 29131da177e4SLinus Torvalds sk_wake_async(sk, 1, POLL_HUP); 29141da177e4SLinus Torvalds else 29151da177e4SLinus Torvalds sk_wake_async(sk, 1, POLL_IN); 29161da177e4SLinus Torvalds } 29171da177e4SLinus Torvalds } 29181da177e4SLinus Torvalds 291940efc6faSStephen Hemminger static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) 29201da177e4SLinus Torvalds { 29211da177e4SLinus Torvalds if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 29221da177e4SLinus Torvalds if (before(seq, sp->start_seq)) 29231da177e4SLinus Torvalds sp->start_seq = seq; 29241da177e4SLinus Torvalds if (after(end_seq, sp->end_seq)) 29251da177e4SLinus Torvalds sp->end_seq = end_seq; 29261da177e4SLinus Torvalds return 1; 29271da177e4SLinus Torvalds } 29281da177e4SLinus Torvalds return 0; 29291da177e4SLinus Torvalds } 29301da177e4SLinus Torvalds 293140efc6faSStephen Hemminger static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) 29321da177e4SLinus Torvalds { 29331da177e4SLinus Torvalds if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { 29341da177e4SLinus Torvalds if (before(seq, tp->rcv_nxt)) 29351da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); 29361da177e4SLinus Torvalds else 29371da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT); 29381da177e4SLinus Torvalds 29391da177e4SLinus Torvalds tp->rx_opt.dsack = 1; 29401da177e4SLinus Torvalds tp->duplicate_sack[0].start_seq = seq; 29411da177e4SLinus Torvalds tp->duplicate_sack[0].end_seq = end_seq; 29421da177e4SLinus Torvalds tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + 1, 4 - tp->rx_opt.tstamp_ok); 29431da177e4SLinus Torvalds } 29441da177e4SLinus Torvalds } 29451da177e4SLinus Torvalds 294640efc6faSStephen Hemminger static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) 29471da177e4SLinus Torvalds { 29481da177e4SLinus Torvalds if (!tp->rx_opt.dsack) 29491da177e4SLinus Torvalds tcp_dsack_set(tp, seq, end_seq); 29501da177e4SLinus Torvalds else 29511da177e4SLinus Torvalds tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 29521da177e4SLinus Torvalds } 29531da177e4SLinus Torvalds 29541da177e4SLinus Torvalds static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 29551da177e4SLinus Torvalds { 29561da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29571da177e4SLinus Torvalds 29581da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 29591da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 29601da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 2961463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 29621da177e4SLinus Torvalds 29631da177e4SLinus Torvalds if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { 29641da177e4SLinus Torvalds u32 end_seq = TCP_SKB_CB(skb)->end_seq; 29651da177e4SLinus Torvalds 29661da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 29671da177e4SLinus Torvalds end_seq = tp->rcv_nxt; 29681da177e4SLinus Torvalds tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq); 29691da177e4SLinus Torvalds } 29701da177e4SLinus Torvalds } 29711da177e4SLinus Torvalds 29721da177e4SLinus Torvalds tcp_send_ack(sk); 29731da177e4SLinus Torvalds } 29741da177e4SLinus Torvalds 29751da177e4SLinus Torvalds /* These routines update the SACK block as out-of-order packets arrive or 29761da177e4SLinus Torvalds * in-order packets close up the sequence space. 29771da177e4SLinus Torvalds */ 29781da177e4SLinus Torvalds static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 29791da177e4SLinus Torvalds { 29801da177e4SLinus Torvalds int this_sack; 29811da177e4SLinus Torvalds struct tcp_sack_block *sp = &tp->selective_acks[0]; 29821da177e4SLinus Torvalds struct tcp_sack_block *swalk = sp+1; 29831da177e4SLinus Torvalds 29841da177e4SLinus Torvalds /* See if the recent change to the first SACK eats into 29851da177e4SLinus Torvalds * or hits the sequence space of other SACK blocks, if so coalesce. 29861da177e4SLinus Torvalds */ 29871da177e4SLinus Torvalds for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; ) { 29881da177e4SLinus Torvalds if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 29891da177e4SLinus Torvalds int i; 29901da177e4SLinus Torvalds 29911da177e4SLinus Torvalds /* Zap SWALK, by moving every further SACK up by one slot. 29921da177e4SLinus Torvalds * Decrease num_sacks. 29931da177e4SLinus Torvalds */ 29941da177e4SLinus Torvalds tp->rx_opt.num_sacks--; 29951da177e4SLinus Torvalds tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); 29961da177e4SLinus Torvalds for(i=this_sack; i < tp->rx_opt.num_sacks; i++) 29971da177e4SLinus Torvalds sp[i] = sp[i+1]; 29981da177e4SLinus Torvalds continue; 29991da177e4SLinus Torvalds } 30001da177e4SLinus Torvalds this_sack++, swalk++; 30011da177e4SLinus Torvalds } 30021da177e4SLinus Torvalds } 30031da177e4SLinus Torvalds 300440efc6faSStephen Hemminger static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2) 30051da177e4SLinus Torvalds { 30061da177e4SLinus Torvalds __u32 tmp; 30071da177e4SLinus Torvalds 30081da177e4SLinus Torvalds tmp = sack1->start_seq; 30091da177e4SLinus Torvalds sack1->start_seq = sack2->start_seq; 30101da177e4SLinus Torvalds sack2->start_seq = tmp; 30111da177e4SLinus Torvalds 30121da177e4SLinus Torvalds tmp = sack1->end_seq; 30131da177e4SLinus Torvalds sack1->end_seq = sack2->end_seq; 30141da177e4SLinus Torvalds sack2->end_seq = tmp; 30151da177e4SLinus Torvalds } 30161da177e4SLinus Torvalds 30171da177e4SLinus Torvalds static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 30181da177e4SLinus Torvalds { 30191da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30201da177e4SLinus Torvalds struct tcp_sack_block *sp = &tp->selective_acks[0]; 30211da177e4SLinus Torvalds int cur_sacks = tp->rx_opt.num_sacks; 30221da177e4SLinus Torvalds int this_sack; 30231da177e4SLinus Torvalds 30241da177e4SLinus Torvalds if (!cur_sacks) 30251da177e4SLinus Torvalds goto new_sack; 30261da177e4SLinus Torvalds 30271da177e4SLinus Torvalds for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) { 30281da177e4SLinus Torvalds if (tcp_sack_extend(sp, seq, end_seq)) { 30291da177e4SLinus Torvalds /* Rotate this_sack to the first one. */ 30301da177e4SLinus Torvalds for (; this_sack>0; this_sack--, sp--) 30311da177e4SLinus Torvalds tcp_sack_swap(sp, sp-1); 30321da177e4SLinus Torvalds if (cur_sacks > 1) 30331da177e4SLinus Torvalds tcp_sack_maybe_coalesce(tp); 30341da177e4SLinus Torvalds return; 30351da177e4SLinus Torvalds } 30361da177e4SLinus Torvalds } 30371da177e4SLinus Torvalds 30381da177e4SLinus Torvalds /* Could not find an adjacent existing SACK, build a new one, 30391da177e4SLinus Torvalds * put it at the front, and shift everyone else down. We 30401da177e4SLinus Torvalds * always know there is at least one SACK present already here. 30411da177e4SLinus Torvalds * 30421da177e4SLinus Torvalds * If the sack array is full, forget about the last one. 30431da177e4SLinus Torvalds */ 30441da177e4SLinus Torvalds if (this_sack >= 4) { 30451da177e4SLinus Torvalds this_sack--; 30461da177e4SLinus Torvalds tp->rx_opt.num_sacks--; 30471da177e4SLinus Torvalds sp--; 30481da177e4SLinus Torvalds } 30491da177e4SLinus Torvalds for(; this_sack > 0; this_sack--, sp--) 30501da177e4SLinus Torvalds *sp = *(sp-1); 30511da177e4SLinus Torvalds 30521da177e4SLinus Torvalds new_sack: 30531da177e4SLinus Torvalds /* Build the new head SACK, and we're done. */ 30541da177e4SLinus Torvalds sp->start_seq = seq; 30551da177e4SLinus Torvalds sp->end_seq = end_seq; 30561da177e4SLinus Torvalds tp->rx_opt.num_sacks++; 30571da177e4SLinus Torvalds tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); 30581da177e4SLinus Torvalds } 30591da177e4SLinus Torvalds 30601da177e4SLinus Torvalds /* RCV.NXT advances, some SACKs should be eaten. */ 30611da177e4SLinus Torvalds 30621da177e4SLinus Torvalds static void tcp_sack_remove(struct tcp_sock *tp) 30631da177e4SLinus Torvalds { 30641da177e4SLinus Torvalds struct tcp_sack_block *sp = &tp->selective_acks[0]; 30651da177e4SLinus Torvalds int num_sacks = tp->rx_opt.num_sacks; 30661da177e4SLinus Torvalds int this_sack; 30671da177e4SLinus Torvalds 30681da177e4SLinus Torvalds /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 3069b03efcfbSDavid S. Miller if (skb_queue_empty(&tp->out_of_order_queue)) { 30701da177e4SLinus Torvalds tp->rx_opt.num_sacks = 0; 30711da177e4SLinus Torvalds tp->rx_opt.eff_sacks = tp->rx_opt.dsack; 30721da177e4SLinus Torvalds return; 30731da177e4SLinus Torvalds } 30741da177e4SLinus Torvalds 30751da177e4SLinus Torvalds for(this_sack = 0; this_sack < num_sacks; ) { 30761da177e4SLinus Torvalds /* Check if the start of the sack is covered by RCV.NXT. */ 30771da177e4SLinus Torvalds if (!before(tp->rcv_nxt, sp->start_seq)) { 30781da177e4SLinus Torvalds int i; 30791da177e4SLinus Torvalds 30801da177e4SLinus Torvalds /* RCV.NXT must cover all the block! */ 30811da177e4SLinus Torvalds BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq)); 30821da177e4SLinus Torvalds 30831da177e4SLinus Torvalds /* Zap this SACK, by moving forward any other SACKS. */ 30841da177e4SLinus Torvalds for (i=this_sack+1; i < num_sacks; i++) 30851da177e4SLinus Torvalds tp->selective_acks[i-1] = tp->selective_acks[i]; 30861da177e4SLinus Torvalds num_sacks--; 30871da177e4SLinus Torvalds continue; 30881da177e4SLinus Torvalds } 30891da177e4SLinus Torvalds this_sack++; 30901da177e4SLinus Torvalds sp++; 30911da177e4SLinus Torvalds } 30921da177e4SLinus Torvalds if (num_sacks != tp->rx_opt.num_sacks) { 30931da177e4SLinus Torvalds tp->rx_opt.num_sacks = num_sacks; 30941da177e4SLinus Torvalds tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok); 30951da177e4SLinus Torvalds } 30961da177e4SLinus Torvalds } 30971da177e4SLinus Torvalds 30981da177e4SLinus Torvalds /* This one checks to see if we can put data from the 30991da177e4SLinus Torvalds * out_of_order queue into the receive_queue. 31001da177e4SLinus Torvalds */ 31011da177e4SLinus Torvalds static void tcp_ofo_queue(struct sock *sk) 31021da177e4SLinus Torvalds { 31031da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31041da177e4SLinus Torvalds __u32 dsack_high = tp->rcv_nxt; 31051da177e4SLinus Torvalds struct sk_buff *skb; 31061da177e4SLinus Torvalds 31071da177e4SLinus Torvalds while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { 31081da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 31091da177e4SLinus Torvalds break; 31101da177e4SLinus Torvalds 31111da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 31121da177e4SLinus Torvalds __u32 dsack = dsack_high; 31131da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 31141da177e4SLinus Torvalds dsack_high = TCP_SKB_CB(skb)->end_seq; 31151da177e4SLinus Torvalds tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack); 31161da177e4SLinus Torvalds } 31171da177e4SLinus Torvalds 31181da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 31191da177e4SLinus Torvalds SOCK_DEBUG(sk, "ofo packet was already received \n"); 31208728b834SDavid S. Miller __skb_unlink(skb, &tp->out_of_order_queue); 31211da177e4SLinus Torvalds __kfree_skb(skb); 31221da177e4SLinus Torvalds continue; 31231da177e4SLinus Torvalds } 31241da177e4SLinus Torvalds SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 31251da177e4SLinus Torvalds tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 31261da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq); 31271da177e4SLinus Torvalds 31288728b834SDavid S. Miller __skb_unlink(skb, &tp->out_of_order_queue); 31291da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_receive_queue, skb); 31301da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 31311da177e4SLinus Torvalds if(skb->h.th->fin) 31321da177e4SLinus Torvalds tcp_fin(skb, sk, skb->h.th); 31331da177e4SLinus Torvalds } 31341da177e4SLinus Torvalds } 31351da177e4SLinus Torvalds 31361da177e4SLinus Torvalds static int tcp_prune_queue(struct sock *sk); 31371da177e4SLinus Torvalds 31381da177e4SLinus Torvalds static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 31391da177e4SLinus Torvalds { 31401da177e4SLinus Torvalds struct tcphdr *th = skb->h.th; 31411da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 31421da177e4SLinus Torvalds int eaten = -1; 31431da177e4SLinus Torvalds 31441da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 31451da177e4SLinus Torvalds goto drop; 31461da177e4SLinus Torvalds 31471da177e4SLinus Torvalds __skb_pull(skb, th->doff*4); 31481da177e4SLinus Torvalds 31491da177e4SLinus Torvalds TCP_ECN_accept_cwr(tp, skb); 31501da177e4SLinus Torvalds 31511da177e4SLinus Torvalds if (tp->rx_opt.dsack) { 31521da177e4SLinus Torvalds tp->rx_opt.dsack = 0; 31531da177e4SLinus Torvalds tp->rx_opt.eff_sacks = min_t(unsigned int, tp->rx_opt.num_sacks, 31541da177e4SLinus Torvalds 4 - tp->rx_opt.tstamp_ok); 31551da177e4SLinus Torvalds } 31561da177e4SLinus Torvalds 31571da177e4SLinus Torvalds /* Queue data for delivery to the user. 31581da177e4SLinus Torvalds * Packets in sequence go to the receive queue. 31591da177e4SLinus Torvalds * Out of sequence packets to the out_of_order_queue. 31601da177e4SLinus Torvalds */ 31611da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 31621da177e4SLinus Torvalds if (tcp_receive_window(tp) == 0) 31631da177e4SLinus Torvalds goto out_of_window; 31641da177e4SLinus Torvalds 31651da177e4SLinus Torvalds /* Ok. In sequence. In window. */ 31661da177e4SLinus Torvalds if (tp->ucopy.task == current && 31671da177e4SLinus Torvalds tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && 31681da177e4SLinus Torvalds sock_owned_by_user(sk) && !tp->urg_data) { 31691da177e4SLinus Torvalds int chunk = min_t(unsigned int, skb->len, 31701da177e4SLinus Torvalds tp->ucopy.len); 31711da177e4SLinus Torvalds 31721da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 31731da177e4SLinus Torvalds 31741da177e4SLinus Torvalds local_bh_enable(); 31751da177e4SLinus Torvalds if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 31761da177e4SLinus Torvalds tp->ucopy.len -= chunk; 31771da177e4SLinus Torvalds tp->copied_seq += chunk; 31781da177e4SLinus Torvalds eaten = (chunk == skb->len && !th->fin); 31791da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 31801da177e4SLinus Torvalds } 31811da177e4SLinus Torvalds local_bh_disable(); 31821da177e4SLinus Torvalds } 31831da177e4SLinus Torvalds 31841da177e4SLinus Torvalds if (eaten <= 0) { 31851da177e4SLinus Torvalds queue_and_out: 31861da177e4SLinus Torvalds if (eaten < 0 && 31871da177e4SLinus Torvalds (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 31881da177e4SLinus Torvalds !sk_stream_rmem_schedule(sk, skb))) { 31891da177e4SLinus Torvalds if (tcp_prune_queue(sk) < 0 || 31901da177e4SLinus Torvalds !sk_stream_rmem_schedule(sk, skb)) 31911da177e4SLinus Torvalds goto drop; 31921da177e4SLinus Torvalds } 31931da177e4SLinus Torvalds sk_stream_set_owner_r(skb, sk); 31941da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_receive_queue, skb); 31951da177e4SLinus Torvalds } 31961da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 31971da177e4SLinus Torvalds if(skb->len) 31981da177e4SLinus Torvalds tcp_event_data_recv(sk, tp, skb); 31991da177e4SLinus Torvalds if(th->fin) 32001da177e4SLinus Torvalds tcp_fin(skb, sk, th); 32011da177e4SLinus Torvalds 3202b03efcfbSDavid S. Miller if (!skb_queue_empty(&tp->out_of_order_queue)) { 32031da177e4SLinus Torvalds tcp_ofo_queue(sk); 32041da177e4SLinus Torvalds 32051da177e4SLinus Torvalds /* RFC2581. 4.2. SHOULD send immediate ACK, when 32061da177e4SLinus Torvalds * gap in queue is filled. 32071da177e4SLinus Torvalds */ 3208b03efcfbSDavid S. Miller if (skb_queue_empty(&tp->out_of_order_queue)) 3209463c84b9SArnaldo Carvalho de Melo inet_csk(sk)->icsk_ack.pingpong = 0; 32101da177e4SLinus Torvalds } 32111da177e4SLinus Torvalds 32121da177e4SLinus Torvalds if (tp->rx_opt.num_sacks) 32131da177e4SLinus Torvalds tcp_sack_remove(tp); 32141da177e4SLinus Torvalds 32151da177e4SLinus Torvalds tcp_fast_path_check(sk, tp); 32161da177e4SLinus Torvalds 32171da177e4SLinus Torvalds if (eaten > 0) 32181da177e4SLinus Torvalds __kfree_skb(skb); 32191da177e4SLinus Torvalds else if (!sock_flag(sk, SOCK_DEAD)) 32201da177e4SLinus Torvalds sk->sk_data_ready(sk, 0); 32211da177e4SLinus Torvalds return; 32221da177e4SLinus Torvalds } 32231da177e4SLinus Torvalds 32241da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 32251da177e4SLinus Torvalds /* A retransmit, 2nd most common case. Force an immediate ack. */ 32261da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 32271da177e4SLinus Torvalds tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 32281da177e4SLinus Torvalds 32291da177e4SLinus Torvalds out_of_window: 3230463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 3231463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 32321da177e4SLinus Torvalds drop: 32331da177e4SLinus Torvalds __kfree_skb(skb); 32341da177e4SLinus Torvalds return; 32351da177e4SLinus Torvalds } 32361da177e4SLinus Torvalds 32371da177e4SLinus Torvalds /* Out of window. F.e. zero window probe. */ 32381da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 32391da177e4SLinus Torvalds goto out_of_window; 32401da177e4SLinus Torvalds 3241463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 32421da177e4SLinus Torvalds 32431da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 32441da177e4SLinus Torvalds /* Partial packet, seq < rcv_next < end_seq */ 32451da177e4SLinus Torvalds SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 32461da177e4SLinus Torvalds tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 32471da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq); 32481da177e4SLinus Torvalds 32491da177e4SLinus Torvalds tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 32501da177e4SLinus Torvalds 32511da177e4SLinus Torvalds /* If window is closed, drop tail of packet. But after 32521da177e4SLinus Torvalds * remembering D-SACK for its head made in previous line. 32531da177e4SLinus Torvalds */ 32541da177e4SLinus Torvalds if (!tcp_receive_window(tp)) 32551da177e4SLinus Torvalds goto out_of_window; 32561da177e4SLinus Torvalds goto queue_and_out; 32571da177e4SLinus Torvalds } 32581da177e4SLinus Torvalds 32591da177e4SLinus Torvalds TCP_ECN_check_ce(tp, skb); 32601da177e4SLinus Torvalds 32611da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 32621da177e4SLinus Torvalds !sk_stream_rmem_schedule(sk, skb)) { 32631da177e4SLinus Torvalds if (tcp_prune_queue(sk) < 0 || 32641da177e4SLinus Torvalds !sk_stream_rmem_schedule(sk, skb)) 32651da177e4SLinus Torvalds goto drop; 32661da177e4SLinus Torvalds } 32671da177e4SLinus Torvalds 32681da177e4SLinus Torvalds /* Disable header prediction. */ 32691da177e4SLinus Torvalds tp->pred_flags = 0; 3270463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 32711da177e4SLinus Torvalds 32721da177e4SLinus Torvalds SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 32731da177e4SLinus Torvalds tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 32741da177e4SLinus Torvalds 32751da177e4SLinus Torvalds sk_stream_set_owner_r(skb, sk); 32761da177e4SLinus Torvalds 32771da177e4SLinus Torvalds if (!skb_peek(&tp->out_of_order_queue)) { 32781da177e4SLinus Torvalds /* Initial out of order segment, build 1 SACK. */ 32791da177e4SLinus Torvalds if (tp->rx_opt.sack_ok) { 32801da177e4SLinus Torvalds tp->rx_opt.num_sacks = 1; 32811da177e4SLinus Torvalds tp->rx_opt.dsack = 0; 32821da177e4SLinus Torvalds tp->rx_opt.eff_sacks = 1; 32831da177e4SLinus Torvalds tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 32841da177e4SLinus Torvalds tp->selective_acks[0].end_seq = 32851da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq; 32861da177e4SLinus Torvalds } 32871da177e4SLinus Torvalds __skb_queue_head(&tp->out_of_order_queue,skb); 32881da177e4SLinus Torvalds } else { 32891da177e4SLinus Torvalds struct sk_buff *skb1 = tp->out_of_order_queue.prev; 32901da177e4SLinus Torvalds u32 seq = TCP_SKB_CB(skb)->seq; 32911da177e4SLinus Torvalds u32 end_seq = TCP_SKB_CB(skb)->end_seq; 32921da177e4SLinus Torvalds 32931da177e4SLinus Torvalds if (seq == TCP_SKB_CB(skb1)->end_seq) { 32948728b834SDavid S. Miller __skb_append(skb1, skb, &tp->out_of_order_queue); 32951da177e4SLinus Torvalds 32961da177e4SLinus Torvalds if (!tp->rx_opt.num_sacks || 32971da177e4SLinus Torvalds tp->selective_acks[0].end_seq != seq) 32981da177e4SLinus Torvalds goto add_sack; 32991da177e4SLinus Torvalds 33001da177e4SLinus Torvalds /* Common case: data arrive in order after hole. */ 33011da177e4SLinus Torvalds tp->selective_acks[0].end_seq = end_seq; 33021da177e4SLinus Torvalds return; 33031da177e4SLinus Torvalds } 33041da177e4SLinus Torvalds 33051da177e4SLinus Torvalds /* Find place to insert this segment. */ 33061da177e4SLinus Torvalds do { 33071da177e4SLinus Torvalds if (!after(TCP_SKB_CB(skb1)->seq, seq)) 33081da177e4SLinus Torvalds break; 33091da177e4SLinus Torvalds } while ((skb1 = skb1->prev) != 33101da177e4SLinus Torvalds (struct sk_buff*)&tp->out_of_order_queue); 33111da177e4SLinus Torvalds 33121da177e4SLinus Torvalds /* Do skb overlap to previous one? */ 33131da177e4SLinus Torvalds if (skb1 != (struct sk_buff*)&tp->out_of_order_queue && 33141da177e4SLinus Torvalds before(seq, TCP_SKB_CB(skb1)->end_seq)) { 33151da177e4SLinus Torvalds if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 33161da177e4SLinus Torvalds /* All the bits are present. Drop. */ 33171da177e4SLinus Torvalds __kfree_skb(skb); 33181da177e4SLinus Torvalds tcp_dsack_set(tp, seq, end_seq); 33191da177e4SLinus Torvalds goto add_sack; 33201da177e4SLinus Torvalds } 33211da177e4SLinus Torvalds if (after(seq, TCP_SKB_CB(skb1)->seq)) { 33221da177e4SLinus Torvalds /* Partial overlap. */ 33231da177e4SLinus Torvalds tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq); 33241da177e4SLinus Torvalds } else { 33251da177e4SLinus Torvalds skb1 = skb1->prev; 33261da177e4SLinus Torvalds } 33271da177e4SLinus Torvalds } 33281da177e4SLinus Torvalds __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 33291da177e4SLinus Torvalds 33301da177e4SLinus Torvalds /* And clean segments covered by new one as whole. */ 33311da177e4SLinus Torvalds while ((skb1 = skb->next) != 33321da177e4SLinus Torvalds (struct sk_buff*)&tp->out_of_order_queue && 33331da177e4SLinus Torvalds after(end_seq, TCP_SKB_CB(skb1)->seq)) { 33341da177e4SLinus Torvalds if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 33351da177e4SLinus Torvalds tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq); 33361da177e4SLinus Torvalds break; 33371da177e4SLinus Torvalds } 33388728b834SDavid S. Miller __skb_unlink(skb1, &tp->out_of_order_queue); 33391da177e4SLinus Torvalds tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); 33401da177e4SLinus Torvalds __kfree_skb(skb1); 33411da177e4SLinus Torvalds } 33421da177e4SLinus Torvalds 33431da177e4SLinus Torvalds add_sack: 33441da177e4SLinus Torvalds if (tp->rx_opt.sack_ok) 33451da177e4SLinus Torvalds tcp_sack_new_ofo_skb(sk, seq, end_seq); 33461da177e4SLinus Torvalds } 33471da177e4SLinus Torvalds } 33481da177e4SLinus Torvalds 33491da177e4SLinus Torvalds /* Collapse contiguous sequence of skbs head..tail with 33501da177e4SLinus Torvalds * sequence numbers start..end. 33511da177e4SLinus Torvalds * Segments with FIN/SYN are not collapsed (only because this 33521da177e4SLinus Torvalds * simplifies code) 33531da177e4SLinus Torvalds */ 33541da177e4SLinus Torvalds static void 33558728b834SDavid S. Miller tcp_collapse(struct sock *sk, struct sk_buff_head *list, 33568728b834SDavid S. Miller struct sk_buff *head, struct sk_buff *tail, 33578728b834SDavid S. Miller u32 start, u32 end) 33581da177e4SLinus Torvalds { 33591da177e4SLinus Torvalds struct sk_buff *skb; 33601da177e4SLinus Torvalds 3361caa20d9aSStephen Hemminger /* First, check that queue is collapsible and find 33621da177e4SLinus Torvalds * the point where collapsing can be useful. */ 33631da177e4SLinus Torvalds for (skb = head; skb != tail; ) { 33641da177e4SLinus Torvalds /* No new bits? It is possible on ofo queue. */ 33651da177e4SLinus Torvalds if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 33661da177e4SLinus Torvalds struct sk_buff *next = skb->next; 33678728b834SDavid S. Miller __skb_unlink(skb, list); 33681da177e4SLinus Torvalds __kfree_skb(skb); 33691da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 33701da177e4SLinus Torvalds skb = next; 33711da177e4SLinus Torvalds continue; 33721da177e4SLinus Torvalds } 33731da177e4SLinus Torvalds 33741da177e4SLinus Torvalds /* The first skb to collapse is: 33751da177e4SLinus Torvalds * - not SYN/FIN and 33761da177e4SLinus Torvalds * - bloated or contains data before "start" or 33771da177e4SLinus Torvalds * overlaps to the next one. 33781da177e4SLinus Torvalds */ 33791da177e4SLinus Torvalds if (!skb->h.th->syn && !skb->h.th->fin && 33801da177e4SLinus Torvalds (tcp_win_from_space(skb->truesize) > skb->len || 33811da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->seq, start) || 33821da177e4SLinus Torvalds (skb->next != tail && 33831da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq))) 33841da177e4SLinus Torvalds break; 33851da177e4SLinus Torvalds 33861da177e4SLinus Torvalds /* Decided to skip this, advance start seq. */ 33871da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->end_seq; 33881da177e4SLinus Torvalds skb = skb->next; 33891da177e4SLinus Torvalds } 33901da177e4SLinus Torvalds if (skb == tail || skb->h.th->syn || skb->h.th->fin) 33911da177e4SLinus Torvalds return; 33921da177e4SLinus Torvalds 33931da177e4SLinus Torvalds while (before(start, end)) { 33941da177e4SLinus Torvalds struct sk_buff *nskb; 33951da177e4SLinus Torvalds int header = skb_headroom(skb); 33961da177e4SLinus Torvalds int copy = SKB_MAX_ORDER(header, 0); 33971da177e4SLinus Torvalds 33981da177e4SLinus Torvalds /* Too big header? This can happen with IPv6. */ 33991da177e4SLinus Torvalds if (copy < 0) 34001da177e4SLinus Torvalds return; 34011da177e4SLinus Torvalds if (end-start < copy) 34021da177e4SLinus Torvalds copy = end-start; 34031da177e4SLinus Torvalds nskb = alloc_skb(copy+header, GFP_ATOMIC); 34041da177e4SLinus Torvalds if (!nskb) 34051da177e4SLinus Torvalds return; 34061da177e4SLinus Torvalds skb_reserve(nskb, header); 34071da177e4SLinus Torvalds memcpy(nskb->head, skb->head, header); 34081da177e4SLinus Torvalds nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head); 34091da177e4SLinus Torvalds nskb->h.raw = nskb->head + (skb->h.raw-skb->head); 34101da177e4SLinus Torvalds nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head); 34111da177e4SLinus Torvalds memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 34121da177e4SLinus Torvalds TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 34138728b834SDavid S. Miller __skb_insert(nskb, skb->prev, skb, list); 34141da177e4SLinus Torvalds sk_stream_set_owner_r(nskb, sk); 34151da177e4SLinus Torvalds 34161da177e4SLinus Torvalds /* Copy data, releasing collapsed skbs. */ 34171da177e4SLinus Torvalds while (copy > 0) { 34181da177e4SLinus Torvalds int offset = start - TCP_SKB_CB(skb)->seq; 34191da177e4SLinus Torvalds int size = TCP_SKB_CB(skb)->end_seq - start; 34201da177e4SLinus Torvalds 342109a62660SKris Katterjohn BUG_ON(offset < 0); 34221da177e4SLinus Torvalds if (size > 0) { 34231da177e4SLinus Torvalds size = min(copy, size); 34241da177e4SLinus Torvalds if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 34251da177e4SLinus Torvalds BUG(); 34261da177e4SLinus Torvalds TCP_SKB_CB(nskb)->end_seq += size; 34271da177e4SLinus Torvalds copy -= size; 34281da177e4SLinus Torvalds start += size; 34291da177e4SLinus Torvalds } 34301da177e4SLinus Torvalds if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 34311da177e4SLinus Torvalds struct sk_buff *next = skb->next; 34328728b834SDavid S. Miller __skb_unlink(skb, list); 34331da177e4SLinus Torvalds __kfree_skb(skb); 34341da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 34351da177e4SLinus Torvalds skb = next; 34361da177e4SLinus Torvalds if (skb == tail || skb->h.th->syn || skb->h.th->fin) 34371da177e4SLinus Torvalds return; 34381da177e4SLinus Torvalds } 34391da177e4SLinus Torvalds } 34401da177e4SLinus Torvalds } 34411da177e4SLinus Torvalds } 34421da177e4SLinus Torvalds 34431da177e4SLinus Torvalds /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 34441da177e4SLinus Torvalds * and tcp_collapse() them until all the queue is collapsed. 34451da177e4SLinus Torvalds */ 34461da177e4SLinus Torvalds static void tcp_collapse_ofo_queue(struct sock *sk) 34471da177e4SLinus Torvalds { 34481da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 34491da177e4SLinus Torvalds struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 34501da177e4SLinus Torvalds struct sk_buff *head; 34511da177e4SLinus Torvalds u32 start, end; 34521da177e4SLinus Torvalds 34531da177e4SLinus Torvalds if (skb == NULL) 34541da177e4SLinus Torvalds return; 34551da177e4SLinus Torvalds 34561da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->seq; 34571da177e4SLinus Torvalds end = TCP_SKB_CB(skb)->end_seq; 34581da177e4SLinus Torvalds head = skb; 34591da177e4SLinus Torvalds 34601da177e4SLinus Torvalds for (;;) { 34611da177e4SLinus Torvalds skb = skb->next; 34621da177e4SLinus Torvalds 34631da177e4SLinus Torvalds /* Segment is terminated when we see gap or when 34641da177e4SLinus Torvalds * we are at the end of all the queue. */ 34651da177e4SLinus Torvalds if (skb == (struct sk_buff *)&tp->out_of_order_queue || 34661da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->seq, end) || 34671da177e4SLinus Torvalds before(TCP_SKB_CB(skb)->end_seq, start)) { 34688728b834SDavid S. Miller tcp_collapse(sk, &tp->out_of_order_queue, 34698728b834SDavid S. Miller head, skb, start, end); 34701da177e4SLinus Torvalds head = skb; 34711da177e4SLinus Torvalds if (skb == (struct sk_buff *)&tp->out_of_order_queue) 34721da177e4SLinus Torvalds break; 34731da177e4SLinus Torvalds /* Start new segment */ 34741da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->seq; 34751da177e4SLinus Torvalds end = TCP_SKB_CB(skb)->end_seq; 34761da177e4SLinus Torvalds } else { 34771da177e4SLinus Torvalds if (before(TCP_SKB_CB(skb)->seq, start)) 34781da177e4SLinus Torvalds start = TCP_SKB_CB(skb)->seq; 34791da177e4SLinus Torvalds if (after(TCP_SKB_CB(skb)->end_seq, end)) 34801da177e4SLinus Torvalds end = TCP_SKB_CB(skb)->end_seq; 34811da177e4SLinus Torvalds } 34821da177e4SLinus Torvalds } 34831da177e4SLinus Torvalds } 34841da177e4SLinus Torvalds 34851da177e4SLinus Torvalds /* Reduce allocated memory if we can, trying to get 34861da177e4SLinus Torvalds * the socket within its memory limits again. 34871da177e4SLinus Torvalds * 34881da177e4SLinus Torvalds * Return less than zero if we should start dropping frames 34891da177e4SLinus Torvalds * until the socket owning process reads some of the data 34901da177e4SLinus Torvalds * to stabilize the situation. 34911da177e4SLinus Torvalds */ 34921da177e4SLinus Torvalds static int tcp_prune_queue(struct sock *sk) 34931da177e4SLinus Torvalds { 34941da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 34951da177e4SLinus Torvalds 34961da177e4SLinus Torvalds SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 34971da177e4SLinus Torvalds 34981da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 34991da177e4SLinus Torvalds 35001da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 35011da177e4SLinus Torvalds tcp_clamp_window(sk, tp); 35021da177e4SLinus Torvalds else if (tcp_memory_pressure) 35031da177e4SLinus Torvalds tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 35041da177e4SLinus Torvalds 35051da177e4SLinus Torvalds tcp_collapse_ofo_queue(sk); 35068728b834SDavid S. Miller tcp_collapse(sk, &sk->sk_receive_queue, 35078728b834SDavid S. Miller sk->sk_receive_queue.next, 35081da177e4SLinus Torvalds (struct sk_buff*)&sk->sk_receive_queue, 35091da177e4SLinus Torvalds tp->copied_seq, tp->rcv_nxt); 35101da177e4SLinus Torvalds sk_stream_mem_reclaim(sk); 35111da177e4SLinus Torvalds 35121da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 35131da177e4SLinus Torvalds return 0; 35141da177e4SLinus Torvalds 35151da177e4SLinus Torvalds /* Collapsing did not help, destructive actions follow. 35161da177e4SLinus Torvalds * This must not ever occur. */ 35171da177e4SLinus Torvalds 35181da177e4SLinus Torvalds /* First, purge the out_of_order queue. */ 3519b03efcfbSDavid S. Miller if (!skb_queue_empty(&tp->out_of_order_queue)) { 3520b03efcfbSDavid S. Miller NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 35211da177e4SLinus Torvalds __skb_queue_purge(&tp->out_of_order_queue); 35221da177e4SLinus Torvalds 35231da177e4SLinus Torvalds /* Reset SACK state. A conforming SACK implementation will 35241da177e4SLinus Torvalds * do the same at a timeout based retransmit. When a connection 35251da177e4SLinus Torvalds * is in a sad state like this, we care only about integrity 35261da177e4SLinus Torvalds * of the connection not performance. 35271da177e4SLinus Torvalds */ 35281da177e4SLinus Torvalds if (tp->rx_opt.sack_ok) 35291da177e4SLinus Torvalds tcp_sack_reset(&tp->rx_opt); 35301da177e4SLinus Torvalds sk_stream_mem_reclaim(sk); 35311da177e4SLinus Torvalds } 35321da177e4SLinus Torvalds 35331da177e4SLinus Torvalds if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 35341da177e4SLinus Torvalds return 0; 35351da177e4SLinus Torvalds 35361da177e4SLinus Torvalds /* If we are really being abused, tell the caller to silently 35371da177e4SLinus Torvalds * drop receive data on the floor. It will get retransmitted 35381da177e4SLinus Torvalds * and hopefully then we'll have sufficient space. 35391da177e4SLinus Torvalds */ 35401da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 35411da177e4SLinus Torvalds 35421da177e4SLinus Torvalds /* Massive buffer overcommit. */ 35431da177e4SLinus Torvalds tp->pred_flags = 0; 35441da177e4SLinus Torvalds return -1; 35451da177e4SLinus Torvalds } 35461da177e4SLinus Torvalds 35471da177e4SLinus Torvalds 35481da177e4SLinus Torvalds /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 35491da177e4SLinus Torvalds * As additional protections, we do not touch cwnd in retransmission phases, 35501da177e4SLinus Torvalds * and if application hit its sndbuf limit recently. 35511da177e4SLinus Torvalds */ 35521da177e4SLinus Torvalds void tcp_cwnd_application_limited(struct sock *sk) 35531da177e4SLinus Torvalds { 35541da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 35551da177e4SLinus Torvalds 35566687e988SArnaldo Carvalho de Melo if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 35571da177e4SLinus Torvalds sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 35581da177e4SLinus Torvalds /* Limited by application or receiver window. */ 3559d254bcdbSIlpo Järvinen u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 3560d254bcdbSIlpo Järvinen u32 win_used = max(tp->snd_cwnd_used, init_win); 35611da177e4SLinus Torvalds if (win_used < tp->snd_cwnd) { 35626687e988SArnaldo Carvalho de Melo tp->snd_ssthresh = tcp_current_ssthresh(sk); 35631da177e4SLinus Torvalds tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 35641da177e4SLinus Torvalds } 35651da177e4SLinus Torvalds tp->snd_cwnd_used = 0; 35661da177e4SLinus Torvalds } 35671da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 35681da177e4SLinus Torvalds } 35691da177e4SLinus Torvalds 357040efc6faSStephen Hemminger static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) 35710d9901dfSDavid S. Miller { 35720d9901dfSDavid S. Miller /* If the user specified a specific send buffer setting, do 35730d9901dfSDavid S. Miller * not modify it. 35740d9901dfSDavid S. Miller */ 35750d9901dfSDavid S. Miller if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 35760d9901dfSDavid S. Miller return 0; 35770d9901dfSDavid S. Miller 35780d9901dfSDavid S. Miller /* If we are under global TCP memory pressure, do not expand. */ 35790d9901dfSDavid S. Miller if (tcp_memory_pressure) 35800d9901dfSDavid S. Miller return 0; 35810d9901dfSDavid S. Miller 35820d9901dfSDavid S. Miller /* If we are under soft global TCP memory pressure, do not expand. */ 35830d9901dfSDavid S. Miller if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 35840d9901dfSDavid S. Miller return 0; 35850d9901dfSDavid S. Miller 35860d9901dfSDavid S. Miller /* If we filled the congestion window, do not expand. */ 35870d9901dfSDavid S. Miller if (tp->packets_out >= tp->snd_cwnd) 35880d9901dfSDavid S. Miller return 0; 35890d9901dfSDavid S. Miller 35900d9901dfSDavid S. Miller return 1; 35910d9901dfSDavid S. Miller } 35921da177e4SLinus Torvalds 35931da177e4SLinus Torvalds /* When incoming ACK allowed to free some skb from write_queue, 35941da177e4SLinus Torvalds * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 35951da177e4SLinus Torvalds * on the exit from tcp input handler. 35961da177e4SLinus Torvalds * 35971da177e4SLinus Torvalds * PROBLEM: sndbuf expansion does not work well with largesend. 35981da177e4SLinus Torvalds */ 35991da177e4SLinus Torvalds static void tcp_new_space(struct sock *sk) 36001da177e4SLinus Torvalds { 36011da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 36021da177e4SLinus Torvalds 36030d9901dfSDavid S. Miller if (tcp_should_expand_sndbuf(sk, tp)) { 3604c1b4a7e6SDavid S. Miller int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 36051da177e4SLinus Torvalds MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), 36061da177e4SLinus Torvalds demanded = max_t(unsigned int, tp->snd_cwnd, 36071da177e4SLinus Torvalds tp->reordering + 1); 36081da177e4SLinus Torvalds sndmem *= 2*demanded; 36091da177e4SLinus Torvalds if (sndmem > sk->sk_sndbuf) 36101da177e4SLinus Torvalds sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 36111da177e4SLinus Torvalds tp->snd_cwnd_stamp = tcp_time_stamp; 36121da177e4SLinus Torvalds } 36131da177e4SLinus Torvalds 36141da177e4SLinus Torvalds sk->sk_write_space(sk); 36151da177e4SLinus Torvalds } 36161da177e4SLinus Torvalds 361740efc6faSStephen Hemminger static void tcp_check_space(struct sock *sk) 36181da177e4SLinus Torvalds { 36191da177e4SLinus Torvalds if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 36201da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 36211da177e4SLinus Torvalds if (sk->sk_socket && 36221da177e4SLinus Torvalds test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 36231da177e4SLinus Torvalds tcp_new_space(sk); 36241da177e4SLinus Torvalds } 36251da177e4SLinus Torvalds } 36261da177e4SLinus Torvalds 362740efc6faSStephen Hemminger static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) 36281da177e4SLinus Torvalds { 362955c97f3eSDavid S. Miller tcp_push_pending_frames(sk, tp); 36301da177e4SLinus Torvalds tcp_check_space(sk); 36311da177e4SLinus Torvalds } 36321da177e4SLinus Torvalds 36331da177e4SLinus Torvalds /* 36341da177e4SLinus Torvalds * Check if sending an ack is needed. 36351da177e4SLinus Torvalds */ 36361da177e4SLinus Torvalds static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 36371da177e4SLinus Torvalds { 36381da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 36391da177e4SLinus Torvalds 36401da177e4SLinus Torvalds /* More than one full frame received... */ 3641463c84b9SArnaldo Carvalho de Melo if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss 36421da177e4SLinus Torvalds /* ... and right edge of window advances far enough. 36431da177e4SLinus Torvalds * (tcp_recvmsg() will send ACK otherwise). Or... 36441da177e4SLinus Torvalds */ 36451da177e4SLinus Torvalds && __tcp_select_window(sk) >= tp->rcv_wnd) || 36461da177e4SLinus Torvalds /* We ACK each frame or... */ 3647463c84b9SArnaldo Carvalho de Melo tcp_in_quickack_mode(sk) || 36481da177e4SLinus Torvalds /* We have out of order data. */ 36491da177e4SLinus Torvalds (ofo_possible && 36501da177e4SLinus Torvalds skb_peek(&tp->out_of_order_queue))) { 36511da177e4SLinus Torvalds /* Then ack it now */ 36521da177e4SLinus Torvalds tcp_send_ack(sk); 36531da177e4SLinus Torvalds } else { 36541da177e4SLinus Torvalds /* Else, send delayed ack. */ 36551da177e4SLinus Torvalds tcp_send_delayed_ack(sk); 36561da177e4SLinus Torvalds } 36571da177e4SLinus Torvalds } 36581da177e4SLinus Torvalds 365940efc6faSStephen Hemminger static inline void tcp_ack_snd_check(struct sock *sk) 36601da177e4SLinus Torvalds { 3661463c84b9SArnaldo Carvalho de Melo if (!inet_csk_ack_scheduled(sk)) { 36621da177e4SLinus Torvalds /* We sent a data segment already. */ 36631da177e4SLinus Torvalds return; 36641da177e4SLinus Torvalds } 36651da177e4SLinus Torvalds __tcp_ack_snd_check(sk, 1); 36661da177e4SLinus Torvalds } 36671da177e4SLinus Torvalds 36681da177e4SLinus Torvalds /* 36691da177e4SLinus Torvalds * This routine is only called when we have urgent data 3670caa20d9aSStephen Hemminger * signaled. Its the 'slow' part of tcp_urg. It could be 36711da177e4SLinus Torvalds * moved inline now as tcp_urg is only called from one 36721da177e4SLinus Torvalds * place. We handle URGent data wrong. We have to - as 36731da177e4SLinus Torvalds * BSD still doesn't use the correction from RFC961. 36741da177e4SLinus Torvalds * For 1003.1g we should support a new option TCP_STDURG to permit 36751da177e4SLinus Torvalds * either form (or just set the sysctl tcp_stdurg). 36761da177e4SLinus Torvalds */ 36771da177e4SLinus Torvalds 36781da177e4SLinus Torvalds static void tcp_check_urg(struct sock * sk, struct tcphdr * th) 36791da177e4SLinus Torvalds { 36801da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 36811da177e4SLinus Torvalds u32 ptr = ntohs(th->urg_ptr); 36821da177e4SLinus Torvalds 36831da177e4SLinus Torvalds if (ptr && !sysctl_tcp_stdurg) 36841da177e4SLinus Torvalds ptr--; 36851da177e4SLinus Torvalds ptr += ntohl(th->seq); 36861da177e4SLinus Torvalds 36871da177e4SLinus Torvalds /* Ignore urgent data that we've already seen and read. */ 36881da177e4SLinus Torvalds if (after(tp->copied_seq, ptr)) 36891da177e4SLinus Torvalds return; 36901da177e4SLinus Torvalds 36911da177e4SLinus Torvalds /* Do not replay urg ptr. 36921da177e4SLinus Torvalds * 36931da177e4SLinus Torvalds * NOTE: interesting situation not covered by specs. 36941da177e4SLinus Torvalds * Misbehaving sender may send urg ptr, pointing to segment, 36951da177e4SLinus Torvalds * which we already have in ofo queue. We are not able to fetch 36961da177e4SLinus Torvalds * such data and will stay in TCP_URG_NOTYET until will be eaten 36971da177e4SLinus Torvalds * by recvmsg(). Seems, we are not obliged to handle such wicked 36981da177e4SLinus Torvalds * situations. But it is worth to think about possibility of some 36991da177e4SLinus Torvalds * DoSes using some hypothetical application level deadlock. 37001da177e4SLinus Torvalds */ 37011da177e4SLinus Torvalds if (before(ptr, tp->rcv_nxt)) 37021da177e4SLinus Torvalds return; 37031da177e4SLinus Torvalds 37041da177e4SLinus Torvalds /* Do we already have a newer (or duplicate) urgent pointer? */ 37051da177e4SLinus Torvalds if (tp->urg_data && !after(ptr, tp->urg_seq)) 37061da177e4SLinus Torvalds return; 37071da177e4SLinus Torvalds 37081da177e4SLinus Torvalds /* Tell the world about our new urgent pointer. */ 37091da177e4SLinus Torvalds sk_send_sigurg(sk); 37101da177e4SLinus Torvalds 37111da177e4SLinus Torvalds /* We may be adding urgent data when the last byte read was 37121da177e4SLinus Torvalds * urgent. To do this requires some care. We cannot just ignore 37131da177e4SLinus Torvalds * tp->copied_seq since we would read the last urgent byte again 37141da177e4SLinus Torvalds * as data, nor can we alter copied_seq until this data arrives 3715caa20d9aSStephen Hemminger * or we break the semantics of SIOCATMARK (and thus sockatmark()) 37161da177e4SLinus Torvalds * 37171da177e4SLinus Torvalds * NOTE. Double Dutch. Rendering to plain English: author of comment 37181da177e4SLinus Torvalds * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 37191da177e4SLinus Torvalds * and expect that both A and B disappear from stream. This is _wrong_. 37201da177e4SLinus Torvalds * Though this happens in BSD with high probability, this is occasional. 37211da177e4SLinus Torvalds * Any application relying on this is buggy. Note also, that fix "works" 37221da177e4SLinus Torvalds * only in this artificial test. Insert some normal data between A and B and we will 37231da177e4SLinus Torvalds * decline of BSD again. Verdict: it is better to remove to trap 37241da177e4SLinus Torvalds * buggy users. 37251da177e4SLinus Torvalds */ 37261da177e4SLinus Torvalds if (tp->urg_seq == tp->copied_seq && tp->urg_data && 37271da177e4SLinus Torvalds !sock_flag(sk, SOCK_URGINLINE) && 37281da177e4SLinus Torvalds tp->copied_seq != tp->rcv_nxt) { 37291da177e4SLinus Torvalds struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 37301da177e4SLinus Torvalds tp->copied_seq++; 37311da177e4SLinus Torvalds if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 37328728b834SDavid S. Miller __skb_unlink(skb, &sk->sk_receive_queue); 37331da177e4SLinus Torvalds __kfree_skb(skb); 37341da177e4SLinus Torvalds } 37351da177e4SLinus Torvalds } 37361da177e4SLinus Torvalds 37371da177e4SLinus Torvalds tp->urg_data = TCP_URG_NOTYET; 37381da177e4SLinus Torvalds tp->urg_seq = ptr; 37391da177e4SLinus Torvalds 37401da177e4SLinus Torvalds /* Disable header prediction. */ 37411da177e4SLinus Torvalds tp->pred_flags = 0; 37421da177e4SLinus Torvalds } 37431da177e4SLinus Torvalds 37441da177e4SLinus Torvalds /* This is the 'fast' part of urgent handling. */ 37451da177e4SLinus Torvalds static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) 37461da177e4SLinus Torvalds { 37471da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 37481da177e4SLinus Torvalds 37491da177e4SLinus Torvalds /* Check if we get a new urgent pointer - normally not. */ 37501da177e4SLinus Torvalds if (th->urg) 37511da177e4SLinus Torvalds tcp_check_urg(sk,th); 37521da177e4SLinus Torvalds 37531da177e4SLinus Torvalds /* Do we wait for any urgent data? - normally not... */ 37541da177e4SLinus Torvalds if (tp->urg_data == TCP_URG_NOTYET) { 37551da177e4SLinus Torvalds u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 37561da177e4SLinus Torvalds th->syn; 37571da177e4SLinus Torvalds 37581da177e4SLinus Torvalds /* Is the urgent pointer pointing into this packet? */ 37591da177e4SLinus Torvalds if (ptr < skb->len) { 37601da177e4SLinus Torvalds u8 tmp; 37611da177e4SLinus Torvalds if (skb_copy_bits(skb, ptr, &tmp, 1)) 37621da177e4SLinus Torvalds BUG(); 37631da177e4SLinus Torvalds tp->urg_data = TCP_URG_VALID | tmp; 37641da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) 37651da177e4SLinus Torvalds sk->sk_data_ready(sk, 0); 37661da177e4SLinus Torvalds } 37671da177e4SLinus Torvalds } 37681da177e4SLinus Torvalds } 37691da177e4SLinus Torvalds 37701da177e4SLinus Torvalds static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 37711da177e4SLinus Torvalds { 37721da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 37731da177e4SLinus Torvalds int chunk = skb->len - hlen; 37741da177e4SLinus Torvalds int err; 37751da177e4SLinus Torvalds 37761da177e4SLinus Torvalds local_bh_enable(); 37771da177e4SLinus Torvalds if (skb->ip_summed==CHECKSUM_UNNECESSARY) 37781da177e4SLinus Torvalds err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); 37791da177e4SLinus Torvalds else 37801da177e4SLinus Torvalds err = skb_copy_and_csum_datagram_iovec(skb, hlen, 37811da177e4SLinus Torvalds tp->ucopy.iov); 37821da177e4SLinus Torvalds 37831da177e4SLinus Torvalds if (!err) { 37841da177e4SLinus Torvalds tp->ucopy.len -= chunk; 37851da177e4SLinus Torvalds tp->copied_seq += chunk; 37861da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 37871da177e4SLinus Torvalds } 37881da177e4SLinus Torvalds 37891da177e4SLinus Torvalds local_bh_disable(); 37901da177e4SLinus Torvalds return err; 37911da177e4SLinus Torvalds } 37921da177e4SLinus Torvalds 3793*b51655b9SAl Viro static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) 37941da177e4SLinus Torvalds { 3795*b51655b9SAl Viro __sum16 result; 37961da177e4SLinus Torvalds 37971da177e4SLinus Torvalds if (sock_owned_by_user(sk)) { 37981da177e4SLinus Torvalds local_bh_enable(); 37991da177e4SLinus Torvalds result = __tcp_checksum_complete(skb); 38001da177e4SLinus Torvalds local_bh_disable(); 38011da177e4SLinus Torvalds } else { 38021da177e4SLinus Torvalds result = __tcp_checksum_complete(skb); 38031da177e4SLinus Torvalds } 38041da177e4SLinus Torvalds return result; 38051da177e4SLinus Torvalds } 38061da177e4SLinus Torvalds 380740efc6faSStephen Hemminger static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) 38081da177e4SLinus Torvalds { 38091da177e4SLinus Torvalds return skb->ip_summed != CHECKSUM_UNNECESSARY && 38101da177e4SLinus Torvalds __tcp_checksum_complete_user(sk, skb); 38111da177e4SLinus Torvalds } 38121da177e4SLinus Torvalds 38131a2449a8SChris Leech #ifdef CONFIG_NET_DMA 38141a2449a8SChris Leech static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) 38151a2449a8SChris Leech { 38161a2449a8SChris Leech struct tcp_sock *tp = tcp_sk(sk); 38171a2449a8SChris Leech int chunk = skb->len - hlen; 38181a2449a8SChris Leech int dma_cookie; 38191a2449a8SChris Leech int copied_early = 0; 38201a2449a8SChris Leech 38211a2449a8SChris Leech if (tp->ucopy.wakeup) 38221a2449a8SChris Leech return 0; 38231a2449a8SChris Leech 38241a2449a8SChris Leech if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 38251a2449a8SChris Leech tp->ucopy.dma_chan = get_softnet_dma(); 38261a2449a8SChris Leech 38271a2449a8SChris Leech if (tp->ucopy.dma_chan && skb->ip_summed == CHECKSUM_UNNECESSARY) { 38281a2449a8SChris Leech 38291a2449a8SChris Leech dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, 38301a2449a8SChris Leech skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); 38311a2449a8SChris Leech 38321a2449a8SChris Leech if (dma_cookie < 0) 38331a2449a8SChris Leech goto out; 38341a2449a8SChris Leech 38351a2449a8SChris Leech tp->ucopy.dma_cookie = dma_cookie; 38361a2449a8SChris Leech copied_early = 1; 38371a2449a8SChris Leech 38381a2449a8SChris Leech tp->ucopy.len -= chunk; 38391a2449a8SChris Leech tp->copied_seq += chunk; 38401a2449a8SChris Leech tcp_rcv_space_adjust(sk); 38411a2449a8SChris Leech 38421a2449a8SChris Leech if ((tp->ucopy.len == 0) || 38431a2449a8SChris Leech (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) || 38441a2449a8SChris Leech (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 38451a2449a8SChris Leech tp->ucopy.wakeup = 1; 38461a2449a8SChris Leech sk->sk_data_ready(sk, 0); 38471a2449a8SChris Leech } 38481a2449a8SChris Leech } else if (chunk > 0) { 38491a2449a8SChris Leech tp->ucopy.wakeup = 1; 38501a2449a8SChris Leech sk->sk_data_ready(sk, 0); 38511a2449a8SChris Leech } 38521a2449a8SChris Leech out: 38531a2449a8SChris Leech return copied_early; 38541a2449a8SChris Leech } 38551a2449a8SChris Leech #endif /* CONFIG_NET_DMA */ 38561a2449a8SChris Leech 38571da177e4SLinus Torvalds /* 38581da177e4SLinus Torvalds * TCP receive function for the ESTABLISHED state. 38591da177e4SLinus Torvalds * 38601da177e4SLinus Torvalds * It is split into a fast path and a slow path. The fast path is 38611da177e4SLinus Torvalds * disabled when: 38621da177e4SLinus Torvalds * - A zero window was announced from us - zero window probing 38631da177e4SLinus Torvalds * is only handled properly in the slow path. 38641da177e4SLinus Torvalds * - Out of order segments arrived. 38651da177e4SLinus Torvalds * - Urgent data is expected. 38661da177e4SLinus Torvalds * - There is no buffer space left 38671da177e4SLinus Torvalds * - Unexpected TCP flags/window values/header lengths are received 38681da177e4SLinus Torvalds * (detected by checking the TCP header against pred_flags) 38691da177e4SLinus Torvalds * - Data is sent in both directions. Fast path only supports pure senders 38701da177e4SLinus Torvalds * or pure receivers (this means either the sequence number or the ack 38711da177e4SLinus Torvalds * value must stay constant) 38721da177e4SLinus Torvalds * - Unexpected TCP option. 38731da177e4SLinus Torvalds * 38741da177e4SLinus Torvalds * When these conditions are not satisfied it drops into a standard 38751da177e4SLinus Torvalds * receive procedure patterned after RFC793 to handle all cases. 38761da177e4SLinus Torvalds * The first three cases are guaranteed by proper pred_flags setting, 38771da177e4SLinus Torvalds * the rest is checked inline. Fast processing is turned on in 38781da177e4SLinus Torvalds * tcp_data_queue when everything is OK. 38791da177e4SLinus Torvalds */ 38801da177e4SLinus Torvalds int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 38811da177e4SLinus Torvalds struct tcphdr *th, unsigned len) 38821da177e4SLinus Torvalds { 38831da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 38841da177e4SLinus Torvalds 38851da177e4SLinus Torvalds /* 38861da177e4SLinus Torvalds * Header prediction. 38871da177e4SLinus Torvalds * The code loosely follows the one in the famous 38881da177e4SLinus Torvalds * "30 instruction TCP receive" Van Jacobson mail. 38891da177e4SLinus Torvalds * 38901da177e4SLinus Torvalds * Van's trick is to deposit buffers into socket queue 38911da177e4SLinus Torvalds * on a device interrupt, to call tcp_recv function 38921da177e4SLinus Torvalds * on the receive process context and checksum and copy 38931da177e4SLinus Torvalds * the buffer to user space. smart... 38941da177e4SLinus Torvalds * 38951da177e4SLinus Torvalds * Our current scheme is not silly either but we take the 38961da177e4SLinus Torvalds * extra cost of the net_bh soft interrupt processing... 38971da177e4SLinus Torvalds * We do checksum and copy also but from device to kernel. 38981da177e4SLinus Torvalds */ 38991da177e4SLinus Torvalds 39001da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 0; 39011da177e4SLinus Torvalds 39021da177e4SLinus Torvalds /* pred_flags is 0xS?10 << 16 + snd_wnd 3903caa20d9aSStephen Hemminger * if header_prediction is to be made 39041da177e4SLinus Torvalds * 'S' will always be tp->tcp_header_len >> 2 39051da177e4SLinus Torvalds * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 39061da177e4SLinus Torvalds * turn it off (when there are holes in the receive 39071da177e4SLinus Torvalds * space for instance) 39081da177e4SLinus Torvalds * PSH flag is ignored. 39091da177e4SLinus Torvalds */ 39101da177e4SLinus Torvalds 39111da177e4SLinus Torvalds if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 39121da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 39131da177e4SLinus Torvalds int tcp_header_len = tp->tcp_header_len; 39141da177e4SLinus Torvalds 39151da177e4SLinus Torvalds /* Timestamp header prediction: tcp_header_len 39161da177e4SLinus Torvalds * is automatically equal to th->doff*4 due to pred_flags 39171da177e4SLinus Torvalds * match. 39181da177e4SLinus Torvalds */ 39191da177e4SLinus Torvalds 39201da177e4SLinus Torvalds /* Check timestamp */ 39211da177e4SLinus Torvalds if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 39224f3608b7SAl Viro __be32 *ptr = (__be32 *)(th + 1); 39231da177e4SLinus Torvalds 39241da177e4SLinus Torvalds /* No? Slow path! */ 39254f3608b7SAl Viro if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 39261da177e4SLinus Torvalds | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) 39271da177e4SLinus Torvalds goto slow_path; 39281da177e4SLinus Torvalds 39291da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 1; 39301da177e4SLinus Torvalds ++ptr; 39311da177e4SLinus Torvalds tp->rx_opt.rcv_tsval = ntohl(*ptr); 39321da177e4SLinus Torvalds ++ptr; 39331da177e4SLinus Torvalds tp->rx_opt.rcv_tsecr = ntohl(*ptr); 39341da177e4SLinus Torvalds 39351da177e4SLinus Torvalds /* If PAWS failed, check it more carefully in slow path */ 39361da177e4SLinus Torvalds if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 39371da177e4SLinus Torvalds goto slow_path; 39381da177e4SLinus Torvalds 39391da177e4SLinus Torvalds /* DO NOT update ts_recent here, if checksum fails 39401da177e4SLinus Torvalds * and timestamp was corrupted part, it will result 39411da177e4SLinus Torvalds * in a hung connection since we will drop all 39421da177e4SLinus Torvalds * future packets due to the PAWS test. 39431da177e4SLinus Torvalds */ 39441da177e4SLinus Torvalds } 39451da177e4SLinus Torvalds 39461da177e4SLinus Torvalds if (len <= tcp_header_len) { 39471da177e4SLinus Torvalds /* Bulk data transfer: sender */ 39481da177e4SLinus Torvalds if (len == tcp_header_len) { 39491da177e4SLinus Torvalds /* Predicted packet is in window by definition. 39501da177e4SLinus Torvalds * seq == rcv_nxt and rcv_wup <= rcv_nxt. 39511da177e4SLinus Torvalds * Hence, check seq<=rcv_wup reduces to: 39521da177e4SLinus Torvalds */ 39531da177e4SLinus Torvalds if (tcp_header_len == 39541da177e4SLinus Torvalds (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 39551da177e4SLinus Torvalds tp->rcv_nxt == tp->rcv_wup) 39561da177e4SLinus Torvalds tcp_store_ts_recent(tp); 39571da177e4SLinus Torvalds 39581da177e4SLinus Torvalds /* We know that such packets are checksummed 39591da177e4SLinus Torvalds * on entry. 39601da177e4SLinus Torvalds */ 39611da177e4SLinus Torvalds tcp_ack(sk, skb, 0); 39621da177e4SLinus Torvalds __kfree_skb(skb); 396355c97f3eSDavid S. Miller tcp_data_snd_check(sk, tp); 39641da177e4SLinus Torvalds return 0; 39651da177e4SLinus Torvalds } else { /* Header too small */ 39661da177e4SLinus Torvalds TCP_INC_STATS_BH(TCP_MIB_INERRS); 39671da177e4SLinus Torvalds goto discard; 39681da177e4SLinus Torvalds } 39691da177e4SLinus Torvalds } else { 39701da177e4SLinus Torvalds int eaten = 0; 39711a2449a8SChris Leech int copied_early = 0; 39721da177e4SLinus Torvalds 39731a2449a8SChris Leech if (tp->copied_seq == tp->rcv_nxt && 39741a2449a8SChris Leech len - tcp_header_len <= tp->ucopy.len) { 39751a2449a8SChris Leech #ifdef CONFIG_NET_DMA 39761a2449a8SChris Leech if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 39771a2449a8SChris Leech copied_early = 1; 39781a2449a8SChris Leech eaten = 1; 39791a2449a8SChris Leech } 39801a2449a8SChris Leech #endif 39811a2449a8SChris Leech if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { 39821da177e4SLinus Torvalds __set_current_state(TASK_RUNNING); 39831da177e4SLinus Torvalds 39841a2449a8SChris Leech if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 39851a2449a8SChris Leech eaten = 1; 39861a2449a8SChris Leech } 39871a2449a8SChris Leech if (eaten) { 39881da177e4SLinus Torvalds /* Predicted packet is in window by definition. 39891da177e4SLinus Torvalds * seq == rcv_nxt and rcv_wup <= rcv_nxt. 39901da177e4SLinus Torvalds * Hence, check seq<=rcv_wup reduces to: 39911da177e4SLinus Torvalds */ 39921da177e4SLinus Torvalds if (tcp_header_len == 39931da177e4SLinus Torvalds (sizeof(struct tcphdr) + 39941da177e4SLinus Torvalds TCPOLEN_TSTAMP_ALIGNED) && 39951da177e4SLinus Torvalds tp->rcv_nxt == tp->rcv_wup) 39961da177e4SLinus Torvalds tcp_store_ts_recent(tp); 39971da177e4SLinus Torvalds 3998463c84b9SArnaldo Carvalho de Melo tcp_rcv_rtt_measure_ts(sk, skb); 39991da177e4SLinus Torvalds 40001da177e4SLinus Torvalds __skb_pull(skb, tcp_header_len); 40011da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 40021da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 40031da177e4SLinus Torvalds } 40041a2449a8SChris Leech if (copied_early) 40051a2449a8SChris Leech tcp_cleanup_rbuf(sk, skb->len); 40061da177e4SLinus Torvalds } 40071da177e4SLinus Torvalds if (!eaten) { 40081da177e4SLinus Torvalds if (tcp_checksum_complete_user(sk, skb)) 40091da177e4SLinus Torvalds goto csum_error; 40101da177e4SLinus Torvalds 40111da177e4SLinus Torvalds /* Predicted packet is in window by definition. 40121da177e4SLinus Torvalds * seq == rcv_nxt and rcv_wup <= rcv_nxt. 40131da177e4SLinus Torvalds * Hence, check seq<=rcv_wup reduces to: 40141da177e4SLinus Torvalds */ 40151da177e4SLinus Torvalds if (tcp_header_len == 40161da177e4SLinus Torvalds (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 40171da177e4SLinus Torvalds tp->rcv_nxt == tp->rcv_wup) 40181da177e4SLinus Torvalds tcp_store_ts_recent(tp); 40191da177e4SLinus Torvalds 4020463c84b9SArnaldo Carvalho de Melo tcp_rcv_rtt_measure_ts(sk, skb); 40211da177e4SLinus Torvalds 40221da177e4SLinus Torvalds if ((int)skb->truesize > sk->sk_forward_alloc) 40231da177e4SLinus Torvalds goto step5; 40241da177e4SLinus Torvalds 40251da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 40261da177e4SLinus Torvalds 40271da177e4SLinus Torvalds /* Bulk data transfer: receiver */ 40281da177e4SLinus Torvalds __skb_pull(skb,tcp_header_len); 40291da177e4SLinus Torvalds __skb_queue_tail(&sk->sk_receive_queue, skb); 40301da177e4SLinus Torvalds sk_stream_set_owner_r(skb, sk); 40311da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 40321da177e4SLinus Torvalds } 40331da177e4SLinus Torvalds 40341da177e4SLinus Torvalds tcp_event_data_recv(sk, tp, skb); 40351da177e4SLinus Torvalds 40361da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 40371da177e4SLinus Torvalds /* Well, only one small jumplet in fast path... */ 40381da177e4SLinus Torvalds tcp_ack(sk, skb, FLAG_DATA); 403955c97f3eSDavid S. Miller tcp_data_snd_check(sk, tp); 4040463c84b9SArnaldo Carvalho de Melo if (!inet_csk_ack_scheduled(sk)) 40411da177e4SLinus Torvalds goto no_ack; 40421da177e4SLinus Torvalds } 40431da177e4SLinus Torvalds 40441da177e4SLinus Torvalds __tcp_ack_snd_check(sk, 0); 40451da177e4SLinus Torvalds no_ack: 40461a2449a8SChris Leech #ifdef CONFIG_NET_DMA 40471a2449a8SChris Leech if (copied_early) 40481a2449a8SChris Leech __skb_queue_tail(&sk->sk_async_wait_queue, skb); 40491a2449a8SChris Leech else 40501a2449a8SChris Leech #endif 40511da177e4SLinus Torvalds if (eaten) 40521da177e4SLinus Torvalds __kfree_skb(skb); 40531da177e4SLinus Torvalds else 40541da177e4SLinus Torvalds sk->sk_data_ready(sk, 0); 40551da177e4SLinus Torvalds return 0; 40561da177e4SLinus Torvalds } 40571da177e4SLinus Torvalds } 40581da177e4SLinus Torvalds 40591da177e4SLinus Torvalds slow_path: 40601da177e4SLinus Torvalds if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb)) 40611da177e4SLinus Torvalds goto csum_error; 40621da177e4SLinus Torvalds 40631da177e4SLinus Torvalds /* 40641da177e4SLinus Torvalds * RFC1323: H1. Apply PAWS check first. 40651da177e4SLinus Torvalds */ 40661da177e4SLinus Torvalds if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4067463c84b9SArnaldo Carvalho de Melo tcp_paws_discard(sk, skb)) { 40681da177e4SLinus Torvalds if (!th->rst) { 40691da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 40701da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 40711da177e4SLinus Torvalds goto discard; 40721da177e4SLinus Torvalds } 40731da177e4SLinus Torvalds /* Resets are accepted even if PAWS failed. 40741da177e4SLinus Torvalds 40751da177e4SLinus Torvalds ts_recent update must be made after we are sure 40761da177e4SLinus Torvalds that the packet is in window. 40771da177e4SLinus Torvalds */ 40781da177e4SLinus Torvalds } 40791da177e4SLinus Torvalds 40801da177e4SLinus Torvalds /* 40811da177e4SLinus Torvalds * Standard slow path. 40821da177e4SLinus Torvalds */ 40831da177e4SLinus Torvalds 40841da177e4SLinus Torvalds if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 40851da177e4SLinus Torvalds /* RFC793, page 37: "In all states except SYN-SENT, all reset 40861da177e4SLinus Torvalds * (RST) segments are validated by checking their SEQ-fields." 40871da177e4SLinus Torvalds * And page 69: "If an incoming segment is not acceptable, 40881da177e4SLinus Torvalds * an acknowledgment should be sent in reply (unless the RST bit 40891da177e4SLinus Torvalds * is set, if so drop the segment and return)". 40901da177e4SLinus Torvalds */ 40911da177e4SLinus Torvalds if (!th->rst) 40921da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 40931da177e4SLinus Torvalds goto discard; 40941da177e4SLinus Torvalds } 40951da177e4SLinus Torvalds 40961da177e4SLinus Torvalds if(th->rst) { 40971da177e4SLinus Torvalds tcp_reset(sk); 40981da177e4SLinus Torvalds goto discard; 40991da177e4SLinus Torvalds } 41001da177e4SLinus Torvalds 41011da177e4SLinus Torvalds tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 41021da177e4SLinus Torvalds 41031da177e4SLinus Torvalds if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 41041da177e4SLinus Torvalds TCP_INC_STATS_BH(TCP_MIB_INERRS); 41051da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 41061da177e4SLinus Torvalds tcp_reset(sk); 41071da177e4SLinus Torvalds return 1; 41081da177e4SLinus Torvalds } 41091da177e4SLinus Torvalds 41101da177e4SLinus Torvalds step5: 41111da177e4SLinus Torvalds if(th->ack) 41121da177e4SLinus Torvalds tcp_ack(sk, skb, FLAG_SLOWPATH); 41131da177e4SLinus Torvalds 4114463c84b9SArnaldo Carvalho de Melo tcp_rcv_rtt_measure_ts(sk, skb); 41151da177e4SLinus Torvalds 41161da177e4SLinus Torvalds /* Process urgent data. */ 41171da177e4SLinus Torvalds tcp_urg(sk, skb, th); 41181da177e4SLinus Torvalds 41191da177e4SLinus Torvalds /* step 7: process the segment text */ 41201da177e4SLinus Torvalds tcp_data_queue(sk, skb); 41211da177e4SLinus Torvalds 412255c97f3eSDavid S. Miller tcp_data_snd_check(sk, tp); 41231da177e4SLinus Torvalds tcp_ack_snd_check(sk); 41241da177e4SLinus Torvalds return 0; 41251da177e4SLinus Torvalds 41261da177e4SLinus Torvalds csum_error: 41271da177e4SLinus Torvalds TCP_INC_STATS_BH(TCP_MIB_INERRS); 41281da177e4SLinus Torvalds 41291da177e4SLinus Torvalds discard: 41301da177e4SLinus Torvalds __kfree_skb(skb); 41311da177e4SLinus Torvalds return 0; 41321da177e4SLinus Torvalds } 41331da177e4SLinus Torvalds 41341da177e4SLinus Torvalds static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 41351da177e4SLinus Torvalds struct tcphdr *th, unsigned len) 41361da177e4SLinus Torvalds { 41371da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 4138d83d8461SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 41391da177e4SLinus Torvalds int saved_clamp = tp->rx_opt.mss_clamp; 41401da177e4SLinus Torvalds 41411da177e4SLinus Torvalds tcp_parse_options(skb, &tp->rx_opt, 0); 41421da177e4SLinus Torvalds 41431da177e4SLinus Torvalds if (th->ack) { 41441da177e4SLinus Torvalds /* rfc793: 41451da177e4SLinus Torvalds * "If the state is SYN-SENT then 41461da177e4SLinus Torvalds * first check the ACK bit 41471da177e4SLinus Torvalds * If the ACK bit is set 41481da177e4SLinus Torvalds * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 41491da177e4SLinus Torvalds * a reset (unless the RST bit is set, if so drop 41501da177e4SLinus Torvalds * the segment and return)" 41511da177e4SLinus Torvalds * 41521da177e4SLinus Torvalds * We do not send data with SYN, so that RFC-correct 41531da177e4SLinus Torvalds * test reduces to: 41541da177e4SLinus Torvalds */ 41551da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 41561da177e4SLinus Torvalds goto reset_and_undo; 41571da177e4SLinus Torvalds 41581da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 41591da177e4SLinus Torvalds !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 41601da177e4SLinus Torvalds tcp_time_stamp)) { 41611da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 41621da177e4SLinus Torvalds goto reset_and_undo; 41631da177e4SLinus Torvalds } 41641da177e4SLinus Torvalds 41651da177e4SLinus Torvalds /* Now ACK is acceptable. 41661da177e4SLinus Torvalds * 41671da177e4SLinus Torvalds * "If the RST bit is set 41681da177e4SLinus Torvalds * If the ACK was acceptable then signal the user "error: 41691da177e4SLinus Torvalds * connection reset", drop the segment, enter CLOSED state, 41701da177e4SLinus Torvalds * delete TCB, and return." 41711da177e4SLinus Torvalds */ 41721da177e4SLinus Torvalds 41731da177e4SLinus Torvalds if (th->rst) { 41741da177e4SLinus Torvalds tcp_reset(sk); 41751da177e4SLinus Torvalds goto discard; 41761da177e4SLinus Torvalds } 41771da177e4SLinus Torvalds 41781da177e4SLinus Torvalds /* rfc793: 41791da177e4SLinus Torvalds * "fifth, if neither of the SYN or RST bits is set then 41801da177e4SLinus Torvalds * drop the segment and return." 41811da177e4SLinus Torvalds * 41821da177e4SLinus Torvalds * See note below! 41831da177e4SLinus Torvalds * --ANK(990513) 41841da177e4SLinus Torvalds */ 41851da177e4SLinus Torvalds if (!th->syn) 41861da177e4SLinus Torvalds goto discard_and_undo; 41871da177e4SLinus Torvalds 41881da177e4SLinus Torvalds /* rfc793: 41891da177e4SLinus Torvalds * "If the SYN bit is on ... 41901da177e4SLinus Torvalds * are acceptable then ... 41911da177e4SLinus Torvalds * (our SYN has been ACKed), change the connection 41921da177e4SLinus Torvalds * state to ESTABLISHED..." 41931da177e4SLinus Torvalds */ 41941da177e4SLinus Torvalds 41951da177e4SLinus Torvalds TCP_ECN_rcv_synack(tp, th); 41961da177e4SLinus Torvalds 41971da177e4SLinus Torvalds tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 41981da177e4SLinus Torvalds tcp_ack(sk, skb, FLAG_SLOWPATH); 41991da177e4SLinus Torvalds 42001da177e4SLinus Torvalds /* Ok.. it's good. Set up sequence numbers and 42011da177e4SLinus Torvalds * move to established. 42021da177e4SLinus Torvalds */ 42031da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 42041da177e4SLinus Torvalds tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 42051da177e4SLinus Torvalds 42061da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is 42071da177e4SLinus Torvalds * never scaled. 42081da177e4SLinus Torvalds */ 42091da177e4SLinus Torvalds tp->snd_wnd = ntohs(th->window); 42101da177e4SLinus Torvalds tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); 42111da177e4SLinus Torvalds 42121da177e4SLinus Torvalds if (!tp->rx_opt.wscale_ok) { 42131da177e4SLinus Torvalds tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 42141da177e4SLinus Torvalds tp->window_clamp = min(tp->window_clamp, 65535U); 42151da177e4SLinus Torvalds } 42161da177e4SLinus Torvalds 42171da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp) { 42181da177e4SLinus Torvalds tp->rx_opt.tstamp_ok = 1; 42191da177e4SLinus Torvalds tp->tcp_header_len = 42201da177e4SLinus Torvalds sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 42211da177e4SLinus Torvalds tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 42221da177e4SLinus Torvalds tcp_store_ts_recent(tp); 42231da177e4SLinus Torvalds } else { 42241da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr); 42251da177e4SLinus Torvalds } 42261da177e4SLinus Torvalds 42271da177e4SLinus Torvalds if (tp->rx_opt.sack_ok && sysctl_tcp_fack) 42281da177e4SLinus Torvalds tp->rx_opt.sack_ok |= 2; 42291da177e4SLinus Torvalds 42305d424d5aSJohn Heffner tcp_mtup_init(sk); 4231d83d8461SArnaldo Carvalho de Melo tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 42321da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 42331da177e4SLinus Torvalds 42341da177e4SLinus Torvalds /* Remember, tcp_poll() does not lock socket! 42351da177e4SLinus Torvalds * Change state from SYN-SENT only after copied_seq 42361da177e4SLinus Torvalds * is initialized. */ 42371da177e4SLinus Torvalds tp->copied_seq = tp->rcv_nxt; 42381da177e4SLinus Torvalds mb(); 42391da177e4SLinus Torvalds tcp_set_state(sk, TCP_ESTABLISHED); 42401da177e4SLinus Torvalds 42416b877699SVenkat Yekkirala security_inet_conn_established(sk, skb); 42426b877699SVenkat Yekkirala 42431da177e4SLinus Torvalds /* Make sure socket is routed, for correct metrics. */ 42448292a17aSArnaldo Carvalho de Melo icsk->icsk_af_ops->rebuild_header(sk); 42451da177e4SLinus Torvalds 42461da177e4SLinus Torvalds tcp_init_metrics(sk); 42471da177e4SLinus Torvalds 42486687e988SArnaldo Carvalho de Melo tcp_init_congestion_control(sk); 4249317a76f9SStephen Hemminger 42501da177e4SLinus Torvalds /* Prevent spurious tcp_cwnd_restart() on first data 42511da177e4SLinus Torvalds * packet. 42521da177e4SLinus Torvalds */ 42531da177e4SLinus Torvalds tp->lsndtime = tcp_time_stamp; 42541da177e4SLinus Torvalds 42551da177e4SLinus Torvalds tcp_init_buffer_space(sk); 42561da177e4SLinus Torvalds 42571da177e4SLinus Torvalds if (sock_flag(sk, SOCK_KEEPOPEN)) 4258463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 42591da177e4SLinus Torvalds 42601da177e4SLinus Torvalds if (!tp->rx_opt.snd_wscale) 42611da177e4SLinus Torvalds __tcp_fast_path_on(tp, tp->snd_wnd); 42621da177e4SLinus Torvalds else 42631da177e4SLinus Torvalds tp->pred_flags = 0; 42641da177e4SLinus Torvalds 42651da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) { 42661da177e4SLinus Torvalds sk->sk_state_change(sk); 42671da177e4SLinus Torvalds sk_wake_async(sk, 0, POLL_OUT); 42681da177e4SLinus Torvalds } 42691da177e4SLinus Torvalds 4270295f7324SArnaldo Carvalho de Melo if (sk->sk_write_pending || 4271295f7324SArnaldo Carvalho de Melo icsk->icsk_accept_queue.rskq_defer_accept || 4272295f7324SArnaldo Carvalho de Melo icsk->icsk_ack.pingpong) { 42731da177e4SLinus Torvalds /* Save one ACK. Data will be ready after 42741da177e4SLinus Torvalds * several ticks, if write_pending is set. 42751da177e4SLinus Torvalds * 42761da177e4SLinus Torvalds * It may be deleted, but with this feature tcpdumps 42771da177e4SLinus Torvalds * look so _wonderfully_ clever, that I was not able 42781da177e4SLinus Torvalds * to stand against the temptation 8) --ANK 42791da177e4SLinus Torvalds */ 4280463c84b9SArnaldo Carvalho de Melo inet_csk_schedule_ack(sk); 4281295f7324SArnaldo Carvalho de Melo icsk->icsk_ack.lrcvtime = tcp_time_stamp; 4282295f7324SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 4283463c84b9SArnaldo Carvalho de Melo tcp_incr_quickack(sk); 4284463c84b9SArnaldo Carvalho de Melo tcp_enter_quickack_mode(sk); 42853f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 42863f421baaSArnaldo Carvalho de Melo TCP_DELACK_MAX, TCP_RTO_MAX); 42871da177e4SLinus Torvalds 42881da177e4SLinus Torvalds discard: 42891da177e4SLinus Torvalds __kfree_skb(skb); 42901da177e4SLinus Torvalds return 0; 42911da177e4SLinus Torvalds } else { 42921da177e4SLinus Torvalds tcp_send_ack(sk); 42931da177e4SLinus Torvalds } 42941da177e4SLinus Torvalds return -1; 42951da177e4SLinus Torvalds } 42961da177e4SLinus Torvalds 42971da177e4SLinus Torvalds /* No ACK in the segment */ 42981da177e4SLinus Torvalds 42991da177e4SLinus Torvalds if (th->rst) { 43001da177e4SLinus Torvalds /* rfc793: 43011da177e4SLinus Torvalds * "If the RST bit is set 43021da177e4SLinus Torvalds * 43031da177e4SLinus Torvalds * Otherwise (no ACK) drop the segment and return." 43041da177e4SLinus Torvalds */ 43051da177e4SLinus Torvalds 43061da177e4SLinus Torvalds goto discard_and_undo; 43071da177e4SLinus Torvalds } 43081da177e4SLinus Torvalds 43091da177e4SLinus Torvalds /* PAWS check. */ 43101da177e4SLinus Torvalds if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_check(&tp->rx_opt, 0)) 43111da177e4SLinus Torvalds goto discard_and_undo; 43121da177e4SLinus Torvalds 43131da177e4SLinus Torvalds if (th->syn) { 43141da177e4SLinus Torvalds /* We see SYN without ACK. It is attempt of 43151da177e4SLinus Torvalds * simultaneous connect with crossed SYNs. 43161da177e4SLinus Torvalds * Particularly, it can be connect to self. 43171da177e4SLinus Torvalds */ 43181da177e4SLinus Torvalds tcp_set_state(sk, TCP_SYN_RECV); 43191da177e4SLinus Torvalds 43201da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp) { 43211da177e4SLinus Torvalds tp->rx_opt.tstamp_ok = 1; 43221da177e4SLinus Torvalds tcp_store_ts_recent(tp); 43231da177e4SLinus Torvalds tp->tcp_header_len = 43241da177e4SLinus Torvalds sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 43251da177e4SLinus Torvalds } else { 43261da177e4SLinus Torvalds tp->tcp_header_len = sizeof(struct tcphdr); 43271da177e4SLinus Torvalds } 43281da177e4SLinus Torvalds 43291da177e4SLinus Torvalds tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 43301da177e4SLinus Torvalds tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 43311da177e4SLinus Torvalds 43321da177e4SLinus Torvalds /* RFC1323: The window in SYN & SYN/ACK segments is 43331da177e4SLinus Torvalds * never scaled. 43341da177e4SLinus Torvalds */ 43351da177e4SLinus Torvalds tp->snd_wnd = ntohs(th->window); 43361da177e4SLinus Torvalds tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 43371da177e4SLinus Torvalds tp->max_window = tp->snd_wnd; 43381da177e4SLinus Torvalds 43391da177e4SLinus Torvalds TCP_ECN_rcv_syn(tp, th); 43401da177e4SLinus Torvalds 43415d424d5aSJohn Heffner tcp_mtup_init(sk); 4342d83d8461SArnaldo Carvalho de Melo tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 43431da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 43441da177e4SLinus Torvalds 43451da177e4SLinus Torvalds 43461da177e4SLinus Torvalds tcp_send_synack(sk); 43471da177e4SLinus Torvalds #if 0 43481da177e4SLinus Torvalds /* Note, we could accept data and URG from this segment. 43491da177e4SLinus Torvalds * There are no obstacles to make this. 43501da177e4SLinus Torvalds * 43511da177e4SLinus Torvalds * However, if we ignore data in ACKless segments sometimes, 43521da177e4SLinus Torvalds * we have no reasons to accept it sometimes. 43531da177e4SLinus Torvalds * Also, seems the code doing it in step6 of tcp_rcv_state_process 43541da177e4SLinus Torvalds * is not flawless. So, discard packet for sanity. 43551da177e4SLinus Torvalds * Uncomment this return to process the data. 43561da177e4SLinus Torvalds */ 43571da177e4SLinus Torvalds return -1; 43581da177e4SLinus Torvalds #else 43591da177e4SLinus Torvalds goto discard; 43601da177e4SLinus Torvalds #endif 43611da177e4SLinus Torvalds } 43621da177e4SLinus Torvalds /* "fifth, if neither of the SYN or RST bits is set then 43631da177e4SLinus Torvalds * drop the segment and return." 43641da177e4SLinus Torvalds */ 43651da177e4SLinus Torvalds 43661da177e4SLinus Torvalds discard_and_undo: 43671da177e4SLinus Torvalds tcp_clear_options(&tp->rx_opt); 43681da177e4SLinus Torvalds tp->rx_opt.mss_clamp = saved_clamp; 43691da177e4SLinus Torvalds goto discard; 43701da177e4SLinus Torvalds 43711da177e4SLinus Torvalds reset_and_undo: 43721da177e4SLinus Torvalds tcp_clear_options(&tp->rx_opt); 43731da177e4SLinus Torvalds tp->rx_opt.mss_clamp = saved_clamp; 43741da177e4SLinus Torvalds return 1; 43751da177e4SLinus Torvalds } 43761da177e4SLinus Torvalds 43771da177e4SLinus Torvalds 43781da177e4SLinus Torvalds /* 43791da177e4SLinus Torvalds * This function implements the receiving procedure of RFC 793 for 43801da177e4SLinus Torvalds * all states except ESTABLISHED and TIME_WAIT. 43811da177e4SLinus Torvalds * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 43821da177e4SLinus Torvalds * address independent. 43831da177e4SLinus Torvalds */ 43841da177e4SLinus Torvalds 43851da177e4SLinus Torvalds int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 43861da177e4SLinus Torvalds struct tcphdr *th, unsigned len) 43871da177e4SLinus Torvalds { 43881da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 43898292a17aSArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 43901da177e4SLinus Torvalds int queued = 0; 43911da177e4SLinus Torvalds 43921da177e4SLinus Torvalds tp->rx_opt.saw_tstamp = 0; 43931da177e4SLinus Torvalds 43941da177e4SLinus Torvalds switch (sk->sk_state) { 43951da177e4SLinus Torvalds case TCP_CLOSE: 43961da177e4SLinus Torvalds goto discard; 43971da177e4SLinus Torvalds 43981da177e4SLinus Torvalds case TCP_LISTEN: 43991da177e4SLinus Torvalds if(th->ack) 44001da177e4SLinus Torvalds return 1; 44011da177e4SLinus Torvalds 44021da177e4SLinus Torvalds if(th->rst) 44031da177e4SLinus Torvalds goto discard; 44041da177e4SLinus Torvalds 44051da177e4SLinus Torvalds if(th->syn) { 44068292a17aSArnaldo Carvalho de Melo if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 44071da177e4SLinus Torvalds return 1; 44081da177e4SLinus Torvalds 44091da177e4SLinus Torvalds /* Now we have several options: In theory there is 44101da177e4SLinus Torvalds * nothing else in the frame. KA9Q has an option to 44111da177e4SLinus Torvalds * send data with the syn, BSD accepts data with the 44121da177e4SLinus Torvalds * syn up to the [to be] advertised window and 44131da177e4SLinus Torvalds * Solaris 2.1 gives you a protocol error. For now 44141da177e4SLinus Torvalds * we just ignore it, that fits the spec precisely 44151da177e4SLinus Torvalds * and avoids incompatibilities. It would be nice in 44161da177e4SLinus Torvalds * future to drop through and process the data. 44171da177e4SLinus Torvalds * 44181da177e4SLinus Torvalds * Now that TTCP is starting to be used we ought to 44191da177e4SLinus Torvalds * queue this data. 44201da177e4SLinus Torvalds * But, this leaves one open to an easy denial of 44211da177e4SLinus Torvalds * service attack, and SYN cookies can't defend 44221da177e4SLinus Torvalds * against this problem. So, we drop the data 44231da177e4SLinus Torvalds * in the interest of security over speed. 44241da177e4SLinus Torvalds */ 44251da177e4SLinus Torvalds goto discard; 44261da177e4SLinus Torvalds } 44271da177e4SLinus Torvalds goto discard; 44281da177e4SLinus Torvalds 44291da177e4SLinus Torvalds case TCP_SYN_SENT: 44301da177e4SLinus Torvalds queued = tcp_rcv_synsent_state_process(sk, skb, th, len); 44311da177e4SLinus Torvalds if (queued >= 0) 44321da177e4SLinus Torvalds return queued; 44331da177e4SLinus Torvalds 44341da177e4SLinus Torvalds /* Do step6 onward by hand. */ 44351da177e4SLinus Torvalds tcp_urg(sk, skb, th); 44361da177e4SLinus Torvalds __kfree_skb(skb); 443755c97f3eSDavid S. Miller tcp_data_snd_check(sk, tp); 44381da177e4SLinus Torvalds return 0; 44391da177e4SLinus Torvalds } 44401da177e4SLinus Torvalds 44411da177e4SLinus Torvalds if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4442463c84b9SArnaldo Carvalho de Melo tcp_paws_discard(sk, skb)) { 44431da177e4SLinus Torvalds if (!th->rst) { 44441da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 44451da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 44461da177e4SLinus Torvalds goto discard; 44471da177e4SLinus Torvalds } 44481da177e4SLinus Torvalds /* Reset is accepted even if it did not pass PAWS. */ 44491da177e4SLinus Torvalds } 44501da177e4SLinus Torvalds 44511da177e4SLinus Torvalds /* step 1: check sequence number */ 44521da177e4SLinus Torvalds if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 44531da177e4SLinus Torvalds if (!th->rst) 44541da177e4SLinus Torvalds tcp_send_dupack(sk, skb); 44551da177e4SLinus Torvalds goto discard; 44561da177e4SLinus Torvalds } 44571da177e4SLinus Torvalds 44581da177e4SLinus Torvalds /* step 2: check RST bit */ 44591da177e4SLinus Torvalds if(th->rst) { 44601da177e4SLinus Torvalds tcp_reset(sk); 44611da177e4SLinus Torvalds goto discard; 44621da177e4SLinus Torvalds } 44631da177e4SLinus Torvalds 44641da177e4SLinus Torvalds tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 44651da177e4SLinus Torvalds 44661da177e4SLinus Torvalds /* step 3: check security and precedence [ignored] */ 44671da177e4SLinus Torvalds 44681da177e4SLinus Torvalds /* step 4: 44691da177e4SLinus Torvalds * 44701da177e4SLinus Torvalds * Check for a SYN in window. 44711da177e4SLinus Torvalds */ 44721da177e4SLinus Torvalds if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 44731da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 44741da177e4SLinus Torvalds tcp_reset(sk); 44751da177e4SLinus Torvalds return 1; 44761da177e4SLinus Torvalds } 44771da177e4SLinus Torvalds 44781da177e4SLinus Torvalds /* step 5: check the ACK field */ 44791da177e4SLinus Torvalds if (th->ack) { 44801da177e4SLinus Torvalds int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH); 44811da177e4SLinus Torvalds 44821da177e4SLinus Torvalds switch(sk->sk_state) { 44831da177e4SLinus Torvalds case TCP_SYN_RECV: 44841da177e4SLinus Torvalds if (acceptable) { 44851da177e4SLinus Torvalds tp->copied_seq = tp->rcv_nxt; 44861da177e4SLinus Torvalds mb(); 44871da177e4SLinus Torvalds tcp_set_state(sk, TCP_ESTABLISHED); 44881da177e4SLinus Torvalds sk->sk_state_change(sk); 44891da177e4SLinus Torvalds 44901da177e4SLinus Torvalds /* Note, that this wakeup is only for marginal 44911da177e4SLinus Torvalds * crossed SYN case. Passively open sockets 44921da177e4SLinus Torvalds * are not waked up, because sk->sk_sleep == 44931da177e4SLinus Torvalds * NULL and sk->sk_socket == NULL. 44941da177e4SLinus Torvalds */ 44951da177e4SLinus Torvalds if (sk->sk_socket) { 44961da177e4SLinus Torvalds sk_wake_async(sk,0,POLL_OUT); 44971da177e4SLinus Torvalds } 44981da177e4SLinus Torvalds 44991da177e4SLinus Torvalds tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 45001da177e4SLinus Torvalds tp->snd_wnd = ntohs(th->window) << 45011da177e4SLinus Torvalds tp->rx_opt.snd_wscale; 45021da177e4SLinus Torvalds tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, 45031da177e4SLinus Torvalds TCP_SKB_CB(skb)->seq); 45041da177e4SLinus Torvalds 45051da177e4SLinus Torvalds /* tcp_ack considers this ACK as duplicate 45061da177e4SLinus Torvalds * and does not calculate rtt. 45071da177e4SLinus Torvalds * Fix it at least with timestamps. 45081da177e4SLinus Torvalds */ 45091da177e4SLinus Torvalds if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 45101da177e4SLinus Torvalds !tp->srtt) 45112d2abbabSStephen Hemminger tcp_ack_saw_tstamp(sk, 0); 45121da177e4SLinus Torvalds 45131da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 45141da177e4SLinus Torvalds tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 45151da177e4SLinus Torvalds 45161da177e4SLinus Torvalds /* Make sure socket is routed, for 45171da177e4SLinus Torvalds * correct metrics. 45181da177e4SLinus Torvalds */ 45198292a17aSArnaldo Carvalho de Melo icsk->icsk_af_ops->rebuild_header(sk); 45201da177e4SLinus Torvalds 45211da177e4SLinus Torvalds tcp_init_metrics(sk); 45221da177e4SLinus Torvalds 45236687e988SArnaldo Carvalho de Melo tcp_init_congestion_control(sk); 4524317a76f9SStephen Hemminger 45251da177e4SLinus Torvalds /* Prevent spurious tcp_cwnd_restart() on 45261da177e4SLinus Torvalds * first data packet. 45271da177e4SLinus Torvalds */ 45281da177e4SLinus Torvalds tp->lsndtime = tcp_time_stamp; 45291da177e4SLinus Torvalds 45305d424d5aSJohn Heffner tcp_mtup_init(sk); 45311da177e4SLinus Torvalds tcp_initialize_rcv_mss(sk); 45321da177e4SLinus Torvalds tcp_init_buffer_space(sk); 45331da177e4SLinus Torvalds tcp_fast_path_on(tp); 45341da177e4SLinus Torvalds } else { 45351da177e4SLinus Torvalds return 1; 45361da177e4SLinus Torvalds } 45371da177e4SLinus Torvalds break; 45381da177e4SLinus Torvalds 45391da177e4SLinus Torvalds case TCP_FIN_WAIT1: 45401da177e4SLinus Torvalds if (tp->snd_una == tp->write_seq) { 45411da177e4SLinus Torvalds tcp_set_state(sk, TCP_FIN_WAIT2); 45421da177e4SLinus Torvalds sk->sk_shutdown |= SEND_SHUTDOWN; 45431da177e4SLinus Torvalds dst_confirm(sk->sk_dst_cache); 45441da177e4SLinus Torvalds 45451da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_DEAD)) 45461da177e4SLinus Torvalds /* Wake up lingering close() */ 45471da177e4SLinus Torvalds sk->sk_state_change(sk); 45481da177e4SLinus Torvalds else { 45491da177e4SLinus Torvalds int tmo; 45501da177e4SLinus Torvalds 45511da177e4SLinus Torvalds if (tp->linger2 < 0 || 45521da177e4SLinus Torvalds (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 45531da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 45541da177e4SLinus Torvalds tcp_done(sk); 45551da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 45561da177e4SLinus Torvalds return 1; 45571da177e4SLinus Torvalds } 45581da177e4SLinus Torvalds 4559463c84b9SArnaldo Carvalho de Melo tmo = tcp_fin_time(sk); 45601da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 4561463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 45621da177e4SLinus Torvalds } else if (th->fin || sock_owned_by_user(sk)) { 45631da177e4SLinus Torvalds /* Bad case. We could lose such FIN otherwise. 45641da177e4SLinus Torvalds * It is not a big problem, but it looks confusing 45651da177e4SLinus Torvalds * and not so rare event. We still can lose it now, 45661da177e4SLinus Torvalds * if it spins in bh_lock_sock(), but it is really 45671da177e4SLinus Torvalds * marginal case. 45681da177e4SLinus Torvalds */ 4569463c84b9SArnaldo Carvalho de Melo inet_csk_reset_keepalive_timer(sk, tmo); 45701da177e4SLinus Torvalds } else { 45711da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 45721da177e4SLinus Torvalds goto discard; 45731da177e4SLinus Torvalds } 45741da177e4SLinus Torvalds } 45751da177e4SLinus Torvalds } 45761da177e4SLinus Torvalds break; 45771da177e4SLinus Torvalds 45781da177e4SLinus Torvalds case TCP_CLOSING: 45791da177e4SLinus Torvalds if (tp->snd_una == tp->write_seq) { 45801da177e4SLinus Torvalds tcp_time_wait(sk, TCP_TIME_WAIT, 0); 45811da177e4SLinus Torvalds goto discard; 45821da177e4SLinus Torvalds } 45831da177e4SLinus Torvalds break; 45841da177e4SLinus Torvalds 45851da177e4SLinus Torvalds case TCP_LAST_ACK: 45861da177e4SLinus Torvalds if (tp->snd_una == tp->write_seq) { 45871da177e4SLinus Torvalds tcp_update_metrics(sk); 45881da177e4SLinus Torvalds tcp_done(sk); 45891da177e4SLinus Torvalds goto discard; 45901da177e4SLinus Torvalds } 45911da177e4SLinus Torvalds break; 45921da177e4SLinus Torvalds } 45931da177e4SLinus Torvalds } else 45941da177e4SLinus Torvalds goto discard; 45951da177e4SLinus Torvalds 45961da177e4SLinus Torvalds /* step 6: check the URG bit */ 45971da177e4SLinus Torvalds tcp_urg(sk, skb, th); 45981da177e4SLinus Torvalds 45991da177e4SLinus Torvalds /* step 7: process the segment text */ 46001da177e4SLinus Torvalds switch (sk->sk_state) { 46011da177e4SLinus Torvalds case TCP_CLOSE_WAIT: 46021da177e4SLinus Torvalds case TCP_CLOSING: 46031da177e4SLinus Torvalds case TCP_LAST_ACK: 46041da177e4SLinus Torvalds if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 46051da177e4SLinus Torvalds break; 46061da177e4SLinus Torvalds case TCP_FIN_WAIT1: 46071da177e4SLinus Torvalds case TCP_FIN_WAIT2: 46081da177e4SLinus Torvalds /* RFC 793 says to queue data in these states, 46091da177e4SLinus Torvalds * RFC 1122 says we MUST send a reset. 46101da177e4SLinus Torvalds * BSD 4.4 also does reset. 46111da177e4SLinus Torvalds */ 46121da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) { 46131da177e4SLinus Torvalds if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 46141da177e4SLinus Torvalds after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 46151da177e4SLinus Torvalds NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 46161da177e4SLinus Torvalds tcp_reset(sk); 46171da177e4SLinus Torvalds return 1; 46181da177e4SLinus Torvalds } 46191da177e4SLinus Torvalds } 46201da177e4SLinus Torvalds /* Fall through */ 46211da177e4SLinus Torvalds case TCP_ESTABLISHED: 46221da177e4SLinus Torvalds tcp_data_queue(sk, skb); 46231da177e4SLinus Torvalds queued = 1; 46241da177e4SLinus Torvalds break; 46251da177e4SLinus Torvalds } 46261da177e4SLinus Torvalds 46271da177e4SLinus Torvalds /* tcp_data could move socket to TIME-WAIT */ 46281da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 462955c97f3eSDavid S. Miller tcp_data_snd_check(sk, tp); 46301da177e4SLinus Torvalds tcp_ack_snd_check(sk); 46311da177e4SLinus Torvalds } 46321da177e4SLinus Torvalds 46331da177e4SLinus Torvalds if (!queued) { 46341da177e4SLinus Torvalds discard: 46351da177e4SLinus Torvalds __kfree_skb(skb); 46361da177e4SLinus Torvalds } 46371da177e4SLinus Torvalds return 0; 46381da177e4SLinus Torvalds } 46391da177e4SLinus Torvalds 46401da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_ecn); 46411da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_reordering); 46421da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_parse_options); 46431da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_rcv_established); 46441da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_rcv_state_process); 464540efc6faSStephen Hemminger EXPORT_SYMBOL(tcp_initialize_rcv_mss); 4646