1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: 23 * Pedro Roque : Fast Retransmit/Recovery. 24 * Two receive queues. 25 * Retransmit queue handled by TCP. 26 * Better retransmit timer handling. 27 * New congestion avoidance. 28 * Header prediction. 29 * Variable renaming. 30 * 31 * Eric : Fast Retransmit. 32 * Randy Scott : MSS option defines. 33 * Eric Schenk : Fixes to slow start algorithm. 34 * Eric Schenk : Yet another double ACK bug. 35 * Eric Schenk : Delayed ACK bug fixes. 36 * Eric Schenk : Floyd style fast retrans war avoidance. 37 * David S. Miller : Don't allow zero congestion window. 38 * Eric Schenk : Fix retransmitter so that it sends 39 * next packet on ack of previous packet. 40 * Andi Kleen : Moved open_request checking here 41 * and process RSTs for open_requests. 42 * Andi Kleen : Better prune_queue, and other fixes. 43 * Andrey Savochkin: Fix RTT measurements in the presence of 44 * timestamps. 45 * Andrey Savochkin: Check sequence numbers correctly when 46 * removing SACKs due to in sequence incoming 47 * data segments. 48 * Andi Kleen: Make sure we never ack data there is not 49 * enough room for. Also make this condition 50 * a fatal error if it might still happen. 51 * Andi Kleen: Add tcp_measure_rcv_mss to make 52 * connections with MSS<min(MTU,ann. MSS) 53 * work without delayed acks. 54 * Andi Kleen: Process packets with PSH set in the 55 * fast path. 56 * J Hadi Salim: ECN support 57 * Andrei Gurtov, 58 * Pasi Sarolahti, 59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 60 * engine. Lots of bugs are found. 61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 62 */ 63 64 #define pr_fmt(fmt) "TCP: " fmt 65 66 #include <linux/mm.h> 67 #include <linux/slab.h> 68 #include <linux/module.h> 69 #include <linux/sysctl.h> 70 #include <linux/kernel.h> 71 #include <net/dst.h> 72 #include <net/tcp.h> 73 #include <net/inet_common.h> 74 #include <linux/ipsec.h> 75 #include <asm/unaligned.h> 76 #include <net/netdma.h> 77 78 int sysctl_tcp_timestamps __read_mostly = 1; 79 int sysctl_tcp_window_scaling __read_mostly = 1; 80 int sysctl_tcp_sack __read_mostly = 1; 81 int sysctl_tcp_fack __read_mostly = 1; 82 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 83 EXPORT_SYMBOL(sysctl_tcp_reordering); 84 int sysctl_tcp_ecn __read_mostly = 2; 85 EXPORT_SYMBOL(sysctl_tcp_ecn); 86 int sysctl_tcp_dsack __read_mostly = 1; 87 int sysctl_tcp_app_win __read_mostly = 31; 88 int sysctl_tcp_adv_win_scale __read_mostly = 1; 89 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 90 91 int sysctl_tcp_stdurg __read_mostly; 92 int sysctl_tcp_rfc1337 __read_mostly; 93 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 94 int sysctl_tcp_frto __read_mostly = 2; 95 int sysctl_tcp_frto_response __read_mostly; 96 int sysctl_tcp_nometrics_save __read_mostly; 97 98 int sysctl_tcp_thin_dupack __read_mostly; 99 100 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 101 int sysctl_tcp_abc __read_mostly; 102 int sysctl_tcp_early_retrans __read_mostly = 2; 103 104 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 105 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 106 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 107 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 108 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 109 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 110 #define FLAG_ECE 0x40 /* ECE in this ACK */ 111 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 112 #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 113 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 114 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 115 #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 116 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 117 118 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 119 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 120 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 121 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 122 #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) 123 124 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 125 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 126 127 /* Adapt the MSS value used to make delayed ack decision to the 128 * real world. 129 */ 130 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 131 { 132 struct inet_connection_sock *icsk = inet_csk(sk); 133 const unsigned int lss = icsk->icsk_ack.last_seg_size; 134 unsigned int len; 135 136 icsk->icsk_ack.last_seg_size = 0; 137 138 /* skb->len may jitter because of SACKs, even if peer 139 * sends good full-sized frames. 140 */ 141 len = skb_shinfo(skb)->gso_size ? : skb->len; 142 if (len >= icsk->icsk_ack.rcv_mss) { 143 icsk->icsk_ack.rcv_mss = len; 144 } else { 145 /* Otherwise, we make more careful check taking into account, 146 * that SACKs block is variable. 147 * 148 * "len" is invariant segment length, including TCP header. 149 */ 150 len += skb->data - skb_transport_header(skb); 151 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || 152 /* If PSH is not set, packet should be 153 * full sized, provided peer TCP is not badly broken. 154 * This observation (if it is correct 8)) allows 155 * to handle super-low mtu links fairly. 156 */ 157 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 158 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 159 /* Subtract also invariant (if peer is RFC compliant), 160 * tcp header plus fixed timestamp option length. 161 * Resulting "len" is MSS free of SACK jitter. 162 */ 163 len -= tcp_sk(sk)->tcp_header_len; 164 icsk->icsk_ack.last_seg_size = len; 165 if (len == lss) { 166 icsk->icsk_ack.rcv_mss = len; 167 return; 168 } 169 } 170 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 171 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 172 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 173 } 174 } 175 176 static void tcp_incr_quickack(struct sock *sk) 177 { 178 struct inet_connection_sock *icsk = inet_csk(sk); 179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 180 181 if (quickacks == 0) 182 quickacks = 2; 183 if (quickacks > icsk->icsk_ack.quick) 184 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 185 } 186 187 static void tcp_enter_quickack_mode(struct sock *sk) 188 { 189 struct inet_connection_sock *icsk = inet_csk(sk); 190 tcp_incr_quickack(sk); 191 icsk->icsk_ack.pingpong = 0; 192 icsk->icsk_ack.ato = TCP_ATO_MIN; 193 } 194 195 /* Send ACKs quickly, if "quick" count is not exhausted 196 * and the session is not interactive. 197 */ 198 199 static inline bool tcp_in_quickack_mode(const struct sock *sk) 200 { 201 const struct inet_connection_sock *icsk = inet_csk(sk); 202 203 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 204 } 205 206 static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) 207 { 208 if (tp->ecn_flags & TCP_ECN_OK) 209 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 210 } 211 212 static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 213 { 214 if (tcp_hdr(skb)->cwr) 215 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 216 } 217 218 static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) 219 { 220 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 221 } 222 223 static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 224 { 225 if (!(tp->ecn_flags & TCP_ECN_OK)) 226 return; 227 228 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 229 case INET_ECN_NOT_ECT: 230 /* Funny extension: if ECT is not set on a segment, 231 * and we already seen ECT on a previous segment, 232 * it is probably a retransmit. 233 */ 234 if (tp->ecn_flags & TCP_ECN_SEEN) 235 tcp_enter_quickack_mode((struct sock *)tp); 236 break; 237 case INET_ECN_CE: 238 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 239 /* fallinto */ 240 default: 241 tp->ecn_flags |= TCP_ECN_SEEN; 242 } 243 } 244 245 static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 246 { 247 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 248 tp->ecn_flags &= ~TCP_ECN_OK; 249 } 250 251 static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 252 { 253 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 254 tp->ecn_flags &= ~TCP_ECN_OK; 255 } 256 257 static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 258 { 259 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 260 return true; 261 return false; 262 } 263 264 /* Buffer size and advertised window tuning. 265 * 266 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 267 */ 268 269 static void tcp_fixup_sndbuf(struct sock *sk) 270 { 271 int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER); 272 273 sndmem *= TCP_INIT_CWND; 274 if (sk->sk_sndbuf < sndmem) 275 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 276 } 277 278 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 279 * 280 * All tcp_full_space() is split to two parts: "network" buffer, allocated 281 * forward and advertised in receiver window (tp->rcv_wnd) and 282 * "application buffer", required to isolate scheduling/application 283 * latencies from network. 284 * window_clamp is maximal advertised window. It can be less than 285 * tcp_full_space(), in this case tcp_full_space() - window_clamp 286 * is reserved for "application" buffer. The less window_clamp is 287 * the smoother our behaviour from viewpoint of network, but the lower 288 * throughput and the higher sensitivity of the connection to losses. 8) 289 * 290 * rcv_ssthresh is more strict window_clamp used at "slow start" 291 * phase to predict further behaviour of this connection. 292 * It is used for two goals: 293 * - to enforce header prediction at sender, even when application 294 * requires some significant "application buffer". It is check #1. 295 * - to prevent pruning of receive queue because of misprediction 296 * of receiver window. Check #2. 297 * 298 * The scheme does not work when sender sends good segments opening 299 * window and then starts to feed us spaghetti. But it should work 300 * in common situations. Otherwise, we have to rely on queue collapsing. 301 */ 302 303 /* Slow part of check#2. */ 304 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 305 { 306 struct tcp_sock *tp = tcp_sk(sk); 307 /* Optimize this! */ 308 int truesize = tcp_win_from_space(skb->truesize) >> 1; 309 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; 310 311 while (tp->rcv_ssthresh <= window) { 312 if (truesize <= skb->len) 313 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 314 315 truesize >>= 1; 316 window >>= 1; 317 } 318 return 0; 319 } 320 321 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 322 { 323 struct tcp_sock *tp = tcp_sk(sk); 324 325 /* Check #1 */ 326 if (tp->rcv_ssthresh < tp->window_clamp && 327 (int)tp->rcv_ssthresh < tcp_space(sk) && 328 !sk_under_memory_pressure(sk)) { 329 int incr; 330 331 /* Check #2. Increase window, if skb with such overhead 332 * will fit to rcvbuf in future. 333 */ 334 if (tcp_win_from_space(skb->truesize) <= skb->len) 335 incr = 2 * tp->advmss; 336 else 337 incr = __tcp_grow_window(sk, skb); 338 339 if (incr) { 340 incr = max_t(int, incr, 2 * skb->len); 341 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 342 tp->window_clamp); 343 inet_csk(sk)->icsk_ack.quick |= 1; 344 } 345 } 346 } 347 348 /* 3. Tuning rcvbuf, when connection enters established state. */ 349 350 static void tcp_fixup_rcvbuf(struct sock *sk) 351 { 352 u32 mss = tcp_sk(sk)->advmss; 353 u32 icwnd = TCP_DEFAULT_INIT_RCVWND; 354 int rcvmem; 355 356 /* Limit to 10 segments if mss <= 1460, 357 * or 14600/mss segments, with a minimum of two segments. 358 */ 359 if (mss > 1460) 360 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 361 362 rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); 363 while (tcp_win_from_space(rcvmem) < mss) 364 rcvmem += 128; 365 366 rcvmem *= icwnd; 367 368 if (sk->sk_rcvbuf < rcvmem) 369 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); 370 } 371 372 /* 4. Try to fixup all. It is made immediately after connection enters 373 * established state. 374 */ 375 static void tcp_init_buffer_space(struct sock *sk) 376 { 377 struct tcp_sock *tp = tcp_sk(sk); 378 int maxwin; 379 380 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 381 tcp_fixup_rcvbuf(sk); 382 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 383 tcp_fixup_sndbuf(sk); 384 385 tp->rcvq_space.space = tp->rcv_wnd; 386 387 maxwin = tcp_full_space(sk); 388 389 if (tp->window_clamp >= maxwin) { 390 tp->window_clamp = maxwin; 391 392 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) 393 tp->window_clamp = max(maxwin - 394 (maxwin >> sysctl_tcp_app_win), 395 4 * tp->advmss); 396 } 397 398 /* Force reservation of one segment. */ 399 if (sysctl_tcp_app_win && 400 tp->window_clamp > 2 * tp->advmss && 401 tp->window_clamp + tp->advmss > maxwin) 402 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 403 404 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 405 tp->snd_cwnd_stamp = tcp_time_stamp; 406 } 407 408 /* 5. Recalculate window clamp after socket hit its memory bounds. */ 409 static void tcp_clamp_window(struct sock *sk) 410 { 411 struct tcp_sock *tp = tcp_sk(sk); 412 struct inet_connection_sock *icsk = inet_csk(sk); 413 414 icsk->icsk_ack.quick = 0; 415 416 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 417 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 418 !sk_under_memory_pressure(sk) && 419 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { 420 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 421 sysctl_tcp_rmem[2]); 422 } 423 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 424 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 425 } 426 427 /* Initialize RCV_MSS value. 428 * RCV_MSS is an our guess about MSS used by the peer. 429 * We haven't any direct information about the MSS. 430 * It's better to underestimate the RCV_MSS rather than overestimate. 431 * Overestimations make us ACKing less frequently than needed. 432 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 433 */ 434 void tcp_initialize_rcv_mss(struct sock *sk) 435 { 436 const struct tcp_sock *tp = tcp_sk(sk); 437 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 438 439 hint = min(hint, tp->rcv_wnd / 2); 440 hint = min(hint, TCP_MSS_DEFAULT); 441 hint = max(hint, TCP_MIN_MSS); 442 443 inet_csk(sk)->icsk_ack.rcv_mss = hint; 444 } 445 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 446 447 /* Receiver "autotuning" code. 448 * 449 * The algorithm for RTT estimation w/o timestamps is based on 450 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 451 * <http://public.lanl.gov/radiant/pubs.html#DRS> 452 * 453 * More detail on this code can be found at 454 * <http://staff.psc.edu/jheffner/>, 455 * though this reference is out of date. A new paper 456 * is pending. 457 */ 458 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 459 { 460 u32 new_sample = tp->rcv_rtt_est.rtt; 461 long m = sample; 462 463 if (m == 0) 464 m = 1; 465 466 if (new_sample != 0) { 467 /* If we sample in larger samples in the non-timestamp 468 * case, we could grossly overestimate the RTT especially 469 * with chatty applications or bulk transfer apps which 470 * are stalled on filesystem I/O. 471 * 472 * Also, since we are only going for a minimum in the 473 * non-timestamp case, we do not smooth things out 474 * else with timestamps disabled convergence takes too 475 * long. 476 */ 477 if (!win_dep) { 478 m -= (new_sample >> 3); 479 new_sample += m; 480 } else { 481 m <<= 3; 482 if (m < new_sample) 483 new_sample = m; 484 } 485 } else { 486 /* No previous measure. */ 487 new_sample = m << 3; 488 } 489 490 if (tp->rcv_rtt_est.rtt != new_sample) 491 tp->rcv_rtt_est.rtt = new_sample; 492 } 493 494 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 495 { 496 if (tp->rcv_rtt_est.time == 0) 497 goto new_measure; 498 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 499 return; 500 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); 501 502 new_measure: 503 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 504 tp->rcv_rtt_est.time = tcp_time_stamp; 505 } 506 507 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 508 const struct sk_buff *skb) 509 { 510 struct tcp_sock *tp = tcp_sk(sk); 511 if (tp->rx_opt.rcv_tsecr && 512 (TCP_SKB_CB(skb)->end_seq - 513 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 514 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 515 } 516 517 /* 518 * This function should be called every time data is copied to user space. 519 * It calculates the appropriate TCP receive buffer space. 520 */ 521 void tcp_rcv_space_adjust(struct sock *sk) 522 { 523 struct tcp_sock *tp = tcp_sk(sk); 524 int time; 525 int space; 526 527 if (tp->rcvq_space.time == 0) 528 goto new_measure; 529 530 time = tcp_time_stamp - tp->rcvq_space.time; 531 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) 532 return; 533 534 space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 535 536 space = max(tp->rcvq_space.space, space); 537 538 if (tp->rcvq_space.space != space) { 539 int rcvmem; 540 541 tp->rcvq_space.space = space; 542 543 if (sysctl_tcp_moderate_rcvbuf && 544 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 545 int new_clamp = space; 546 547 /* Receive space grows, normalize in order to 548 * take into account packet headers and sk_buff 549 * structure overhead. 550 */ 551 space /= tp->advmss; 552 if (!space) 553 space = 1; 554 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); 555 while (tcp_win_from_space(rcvmem) < tp->advmss) 556 rcvmem += 128; 557 space *= rcvmem; 558 space = min(space, sysctl_tcp_rmem[2]); 559 if (space > sk->sk_rcvbuf) { 560 sk->sk_rcvbuf = space; 561 562 /* Make the window clamp follow along. */ 563 tp->window_clamp = new_clamp; 564 } 565 } 566 } 567 568 new_measure: 569 tp->rcvq_space.seq = tp->copied_seq; 570 tp->rcvq_space.time = tcp_time_stamp; 571 } 572 573 /* There is something which you must keep in mind when you analyze the 574 * behavior of the tp->ato delayed ack timeout interval. When a 575 * connection starts up, we want to ack as quickly as possible. The 576 * problem is that "good" TCP's do slow start at the beginning of data 577 * transmission. The means that until we send the first few ACK's the 578 * sender will sit on his end and only queue most of his data, because 579 * he can only send snd_cwnd unacked packets at any given time. For 580 * each ACK we send, he increments snd_cwnd and transmits more of his 581 * queue. -DaveM 582 */ 583 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 584 { 585 struct tcp_sock *tp = tcp_sk(sk); 586 struct inet_connection_sock *icsk = inet_csk(sk); 587 u32 now; 588 589 inet_csk_schedule_ack(sk); 590 591 tcp_measure_rcv_mss(sk, skb); 592 593 tcp_rcv_rtt_measure(tp); 594 595 now = tcp_time_stamp; 596 597 if (!icsk->icsk_ack.ato) { 598 /* The _first_ data packet received, initialize 599 * delayed ACK engine. 600 */ 601 tcp_incr_quickack(sk); 602 icsk->icsk_ack.ato = TCP_ATO_MIN; 603 } else { 604 int m = now - icsk->icsk_ack.lrcvtime; 605 606 if (m <= TCP_ATO_MIN / 2) { 607 /* The fastest case is the first. */ 608 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 609 } else if (m < icsk->icsk_ack.ato) { 610 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 611 if (icsk->icsk_ack.ato > icsk->icsk_rto) 612 icsk->icsk_ack.ato = icsk->icsk_rto; 613 } else if (m > icsk->icsk_rto) { 614 /* Too long gap. Apparently sender failed to 615 * restart window, so that we send ACKs quickly. 616 */ 617 tcp_incr_quickack(sk); 618 sk_mem_reclaim(sk); 619 } 620 } 621 icsk->icsk_ack.lrcvtime = now; 622 623 TCP_ECN_check_ce(tp, skb); 624 625 if (skb->len >= 128) 626 tcp_grow_window(sk, skb); 627 } 628 629 /* Called to compute a smoothed rtt estimate. The data fed to this 630 * routine either comes from timestamps, or from segments that were 631 * known _not_ to have been retransmitted [see Karn/Partridge 632 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 633 * piece by Van Jacobson. 634 * NOTE: the next three routines used to be one big routine. 635 * To save cycles in the RFC 1323 implementation it was better to break 636 * it up into three procedures. -- erics 637 */ 638 static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 639 { 640 struct tcp_sock *tp = tcp_sk(sk); 641 long m = mrtt; /* RTT */ 642 643 /* The following amusing code comes from Jacobson's 644 * article in SIGCOMM '88. Note that rtt and mdev 645 * are scaled versions of rtt and mean deviation. 646 * This is designed to be as fast as possible 647 * m stands for "measurement". 648 * 649 * On a 1990 paper the rto value is changed to: 650 * RTO = rtt + 4 * mdev 651 * 652 * Funny. This algorithm seems to be very broken. 653 * These formulae increase RTO, when it should be decreased, increase 654 * too slowly, when it should be increased quickly, decrease too quickly 655 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 656 * does not matter how to _calculate_ it. Seems, it was trap 657 * that VJ failed to avoid. 8) 658 */ 659 if (m == 0) 660 m = 1; 661 if (tp->srtt != 0) { 662 m -= (tp->srtt >> 3); /* m is now error in rtt est */ 663 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 664 if (m < 0) { 665 m = -m; /* m is now abs(error) */ 666 m -= (tp->mdev >> 2); /* similar update on mdev */ 667 /* This is similar to one of Eifel findings. 668 * Eifel blocks mdev updates when rtt decreases. 669 * This solution is a bit different: we use finer gain 670 * for mdev in this case (alpha*beta). 671 * Like Eifel it also prevents growth of rto, 672 * but also it limits too fast rto decreases, 673 * happening in pure Eifel. 674 */ 675 if (m > 0) 676 m >>= 3; 677 } else { 678 m -= (tp->mdev >> 2); /* similar update on mdev */ 679 } 680 tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 681 if (tp->mdev > tp->mdev_max) { 682 tp->mdev_max = tp->mdev; 683 if (tp->mdev_max > tp->rttvar) 684 tp->rttvar = tp->mdev_max; 685 } 686 if (after(tp->snd_una, tp->rtt_seq)) { 687 if (tp->mdev_max < tp->rttvar) 688 tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; 689 tp->rtt_seq = tp->snd_nxt; 690 tp->mdev_max = tcp_rto_min(sk); 691 } 692 } else { 693 /* no previous measure. */ 694 tp->srtt = m << 3; /* take the measured time to be rtt */ 695 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 696 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 697 tp->rtt_seq = tp->snd_nxt; 698 } 699 } 700 701 /* Calculate rto without backoff. This is the second half of Van Jacobson's 702 * routine referred to above. 703 */ 704 static inline void tcp_set_rto(struct sock *sk) 705 { 706 const struct tcp_sock *tp = tcp_sk(sk); 707 /* Old crap is replaced with new one. 8) 708 * 709 * More seriously: 710 * 1. If rtt variance happened to be less 50msec, it is hallucination. 711 * It cannot be less due to utterly erratic ACK generation made 712 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 713 * to do with delayed acks, because at cwnd>2 true delack timeout 714 * is invisible. Actually, Linux-2.4 also generates erratic 715 * ACKs in some circumstances. 716 */ 717 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); 718 719 /* 2. Fixups made earlier cannot be right. 720 * If we do not estimate RTO correctly without them, 721 * all the algo is pure shit and should be replaced 722 * with correct one. It is exactly, which we pretend to do. 723 */ 724 725 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 726 * guarantees that rto is higher. 727 */ 728 tcp_bound_rto(sk); 729 } 730 731 /* Save metrics learned by this TCP session. 732 This function is called only, when TCP finishes successfully 733 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. 734 */ 735 void tcp_update_metrics(struct sock *sk) 736 { 737 struct tcp_sock *tp = tcp_sk(sk); 738 struct dst_entry *dst = __sk_dst_get(sk); 739 740 if (sysctl_tcp_nometrics_save) 741 return; 742 743 dst_confirm(dst); 744 745 if (dst && (dst->flags & DST_HOST)) { 746 const struct inet_connection_sock *icsk = inet_csk(sk); 747 int m; 748 unsigned long rtt; 749 750 if (icsk->icsk_backoff || !tp->srtt) { 751 /* This session failed to estimate rtt. Why? 752 * Probably, no packets returned in time. 753 * Reset our results. 754 */ 755 if (!(dst_metric_locked(dst, RTAX_RTT))) 756 dst_metric_set(dst, RTAX_RTT, 0); 757 return; 758 } 759 760 rtt = dst_metric_rtt(dst, RTAX_RTT); 761 m = rtt - tp->srtt; 762 763 /* If newly calculated rtt larger than stored one, 764 * store new one. Otherwise, use EWMA. Remember, 765 * rtt overestimation is always better than underestimation. 766 */ 767 if (!(dst_metric_locked(dst, RTAX_RTT))) { 768 if (m <= 0) 769 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); 770 else 771 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); 772 } 773 774 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { 775 unsigned long var; 776 if (m < 0) 777 m = -m; 778 779 /* Scale deviation to rttvar fixed point */ 780 m >>= 1; 781 if (m < tp->mdev) 782 m = tp->mdev; 783 784 var = dst_metric_rtt(dst, RTAX_RTTVAR); 785 if (m >= var) 786 var = m; 787 else 788 var -= (var - m) >> 2; 789 790 set_dst_metric_rtt(dst, RTAX_RTTVAR, var); 791 } 792 793 if (tcp_in_initial_slowstart(tp)) { 794 /* Slow start still did not finish. */ 795 if (dst_metric(dst, RTAX_SSTHRESH) && 796 !dst_metric_locked(dst, RTAX_SSTHRESH) && 797 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) 798 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); 799 if (!dst_metric_locked(dst, RTAX_CWND) && 800 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 801 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); 802 } else if (tp->snd_cwnd > tp->snd_ssthresh && 803 icsk->icsk_ca_state == TCP_CA_Open) { 804 /* Cong. avoidance phase, cwnd is reliable. */ 805 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 806 dst_metric_set(dst, RTAX_SSTHRESH, 807 max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); 808 if (!dst_metric_locked(dst, RTAX_CWND)) 809 dst_metric_set(dst, RTAX_CWND, 810 (dst_metric(dst, RTAX_CWND) + 811 tp->snd_cwnd) >> 1); 812 } else { 813 /* Else slow start did not finish, cwnd is non-sense, 814 ssthresh may be also invalid. 815 */ 816 if (!dst_metric_locked(dst, RTAX_CWND)) 817 dst_metric_set(dst, RTAX_CWND, 818 (dst_metric(dst, RTAX_CWND) + 819 tp->snd_ssthresh) >> 1); 820 if (dst_metric(dst, RTAX_SSTHRESH) && 821 !dst_metric_locked(dst, RTAX_SSTHRESH) && 822 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) 823 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); 824 } 825 826 if (!dst_metric_locked(dst, RTAX_REORDERING)) { 827 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && 828 tp->reordering != sysctl_tcp_reordering) 829 dst_metric_set(dst, RTAX_REORDERING, tp->reordering); 830 } 831 } 832 } 833 834 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 835 { 836 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 837 838 if (!cwnd) 839 cwnd = TCP_INIT_CWND; 840 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 841 } 842 843 /* Set slow start threshold and cwnd not falling to slow start */ 844 void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) 845 { 846 struct tcp_sock *tp = tcp_sk(sk); 847 const struct inet_connection_sock *icsk = inet_csk(sk); 848 849 tp->prior_ssthresh = 0; 850 tp->bytes_acked = 0; 851 if (icsk->icsk_ca_state < TCP_CA_CWR) { 852 tp->undo_marker = 0; 853 if (set_ssthresh) 854 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 855 tp->snd_cwnd = min(tp->snd_cwnd, 856 tcp_packets_in_flight(tp) + 1U); 857 tp->snd_cwnd_cnt = 0; 858 tp->high_seq = tp->snd_nxt; 859 tp->snd_cwnd_stamp = tcp_time_stamp; 860 TCP_ECN_queue_cwr(tp); 861 862 tcp_set_ca_state(sk, TCP_CA_CWR); 863 } 864 } 865 866 /* 867 * Packet counting of FACK is based on in-order assumptions, therefore TCP 868 * disables it when reordering is detected 869 */ 870 static void tcp_disable_fack(struct tcp_sock *tp) 871 { 872 /* RFC3517 uses different metric in lost marker => reset on change */ 873 if (tcp_is_fack(tp)) 874 tp->lost_skb_hint = NULL; 875 tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; 876 } 877 878 /* Take a notice that peer is sending D-SACKs */ 879 static void tcp_dsack_seen(struct tcp_sock *tp) 880 { 881 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 882 } 883 884 /* Initialize metrics on socket. */ 885 886 static void tcp_init_metrics(struct sock *sk) 887 { 888 struct tcp_sock *tp = tcp_sk(sk); 889 struct dst_entry *dst = __sk_dst_get(sk); 890 891 if (dst == NULL) 892 goto reset; 893 894 dst_confirm(dst); 895 896 if (dst_metric_locked(dst, RTAX_CWND)) 897 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); 898 if (dst_metric(dst, RTAX_SSTHRESH)) { 899 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); 900 if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 901 tp->snd_ssthresh = tp->snd_cwnd_clamp; 902 } else { 903 /* ssthresh may have been reduced unnecessarily during. 904 * 3WHS. Restore it back to its initial default. 905 */ 906 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 907 } 908 if (dst_metric(dst, RTAX_REORDERING) && 909 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 910 tcp_disable_fack(tp); 911 tcp_disable_early_retrans(tp); 912 tp->reordering = dst_metric(dst, RTAX_REORDERING); 913 } 914 915 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) 916 goto reset; 917 918 /* Initial rtt is determined from SYN,SYN-ACK. 919 * The segment is small and rtt may appear much 920 * less than real one. Use per-dst memory 921 * to make it more realistic. 922 * 923 * A bit of theory. RTT is time passed after "normal" sized packet 924 * is sent until it is ACKed. In normal circumstances sending small 925 * packets force peer to delay ACKs and calculation is correct too. 926 * The algorithm is adaptive and, provided we follow specs, it 927 * NEVER underestimate RTT. BUT! If peer tries to make some clever 928 * tricks sort of "quick acks" for time long enough to decrease RTT 929 * to low value, and then abruptly stops to do it and starts to delay 930 * ACKs, wait for troubles. 931 */ 932 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { 933 tp->srtt = dst_metric_rtt(dst, RTAX_RTT); 934 tp->rtt_seq = tp->snd_nxt; 935 } 936 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { 937 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); 938 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 939 } 940 tcp_set_rto(sk); 941 reset: 942 if (tp->srtt == 0) { 943 /* RFC6298: 5.7 We've failed to get a valid RTT sample from 944 * 3WHS. This is most likely due to retransmission, 945 * including spurious one. Reset the RTO back to 3secs 946 * from the more aggressive 1sec to avoid more spurious 947 * retransmission. 948 */ 949 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; 950 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; 951 } 952 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been 953 * retransmitted. In light of RFC6298 more aggressive 1sec 954 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK 955 * retransmission has occurred. 956 */ 957 if (tp->total_retrans > 1) 958 tp->snd_cwnd = 1; 959 else 960 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 961 tp->snd_cwnd_stamp = tcp_time_stamp; 962 } 963 964 static void tcp_update_reordering(struct sock *sk, const int metric, 965 const int ts) 966 { 967 struct tcp_sock *tp = tcp_sk(sk); 968 if (metric > tp->reordering) { 969 int mib_idx; 970 971 tp->reordering = min(TCP_MAX_REORDERING, metric); 972 973 /* This exciting event is worth to be remembered. 8) */ 974 if (ts) 975 mib_idx = LINUX_MIB_TCPTSREORDER; 976 else if (tcp_is_reno(tp)) 977 mib_idx = LINUX_MIB_TCPRENOREORDER; 978 else if (tcp_is_fack(tp)) 979 mib_idx = LINUX_MIB_TCPFACKREORDER; 980 else 981 mib_idx = LINUX_MIB_TCPSACKREORDER; 982 983 NET_INC_STATS_BH(sock_net(sk), mib_idx); 984 #if FASTRETRANS_DEBUG > 1 985 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 986 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 987 tp->reordering, 988 tp->fackets_out, 989 tp->sacked_out, 990 tp->undo_marker ? tp->undo_retrans : 0); 991 #endif 992 tcp_disable_fack(tp); 993 } 994 995 if (metric > 0) 996 tcp_disable_early_retrans(tp); 997 } 998 999 /* This must be called before lost_out is incremented */ 1000 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 1001 { 1002 if ((tp->retransmit_skb_hint == NULL) || 1003 before(TCP_SKB_CB(skb)->seq, 1004 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 1005 tp->retransmit_skb_hint = skb; 1006 1007 if (!tp->lost_out || 1008 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) 1009 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 1010 } 1011 1012 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) 1013 { 1014 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 1015 tcp_verify_retransmit_hint(tp, skb); 1016 1017 tp->lost_out += tcp_skb_pcount(skb); 1018 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1019 } 1020 } 1021 1022 static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, 1023 struct sk_buff *skb) 1024 { 1025 tcp_verify_retransmit_hint(tp, skb); 1026 1027 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 1028 tp->lost_out += tcp_skb_pcount(skb); 1029 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1030 } 1031 } 1032 1033 /* This procedure tags the retransmission queue when SACKs arrive. 1034 * 1035 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 1036 * Packets in queue with these bits set are counted in variables 1037 * sacked_out, retrans_out and lost_out, correspondingly. 1038 * 1039 * Valid combinations are: 1040 * Tag InFlight Description 1041 * 0 1 - orig segment is in flight. 1042 * S 0 - nothing flies, orig reached receiver. 1043 * L 0 - nothing flies, orig lost by net. 1044 * R 2 - both orig and retransmit are in flight. 1045 * L|R 1 - orig is lost, retransmit is in flight. 1046 * S|R 1 - orig reached receiver, retrans is still in flight. 1047 * (L|S|R is logically valid, it could occur when L|R is sacked, 1048 * but it is equivalent to plain S and code short-curcuits it to S. 1049 * L|S is logically invalid, it would mean -1 packet in flight 8)) 1050 * 1051 * These 6 states form finite state machine, controlled by the following events: 1052 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1053 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1054 * 3. Loss detection event of two flavors: 1055 * A. Scoreboard estimator decided the packet is lost. 1056 * A'. Reno "three dupacks" marks head of queue lost. 1057 * A''. Its FACK modification, head until snd.fack is lost. 1058 * B. SACK arrives sacking SND.NXT at the moment, when the 1059 * segment was retransmitted. 1060 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1061 * 1062 * It is pleasant to note, that state diagram turns out to be commutative, 1063 * so that we are allowed not to be bothered by order of our actions, 1064 * when multiple events arrive simultaneously. (see the function below). 1065 * 1066 * Reordering detection. 1067 * -------------------- 1068 * Reordering metric is maximal distance, which a packet can be displaced 1069 * in packet stream. With SACKs we can estimate it: 1070 * 1071 * 1. SACK fills old hole and the corresponding segment was not 1072 * ever retransmitted -> reordering. Alas, we cannot use it 1073 * when segment was retransmitted. 1074 * 2. The last flaw is solved with D-SACK. D-SACK arrives 1075 * for retransmitted and already SACKed segment -> reordering.. 1076 * Both of these heuristics are not used in Loss state, when we cannot 1077 * account for retransmits accurately. 1078 * 1079 * SACK block validation. 1080 * ---------------------- 1081 * 1082 * SACK block range validation checks that the received SACK block fits to 1083 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 1084 * Note that SND.UNA is not included to the range though being valid because 1085 * it means that the receiver is rather inconsistent with itself reporting 1086 * SACK reneging when it should advance SND.UNA. Such SACK block this is 1087 * perfectly valid, however, in light of RFC2018 which explicitly states 1088 * that "SACK block MUST reflect the newest segment. Even if the newest 1089 * segment is going to be discarded ...", not that it looks very clever 1090 * in case of head skb. Due to potentional receiver driven attacks, we 1091 * choose to avoid immediate execution of a walk in write queue due to 1092 * reneging and defer head skb's loss recovery to standard loss recovery 1093 * procedure that will eventually trigger (nothing forbids us doing this). 1094 * 1095 * Implements also blockage to start_seq wrap-around. Problem lies in the 1096 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 1097 * there's no guarantee that it will be before snd_nxt (n). The problem 1098 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1099 * wrap (s_w): 1100 * 1101 * <- outs wnd -> <- wrapzone -> 1102 * u e n u_w e_w s n_w 1103 * | | | | | | | 1104 * |<------------+------+----- TCP seqno space --------------+---------->| 1105 * ...-- <2^31 ->| |<--------... 1106 * ...---- >2^31 ------>| |<--------... 1107 * 1108 * Current code wouldn't be vulnerable but it's better still to discard such 1109 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 1110 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1111 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 1112 * equal to the ideal case (infinite seqno space without wrap caused issues). 1113 * 1114 * With D-SACK the lower bound is extended to cover sequence space below 1115 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1116 * again, D-SACK block must not to go across snd_una (for the same reason as 1117 * for the normal SACK blocks, explained above). But there all simplicity 1118 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1119 * fully below undo_marker they do not affect behavior in anyway and can 1120 * therefore be safely ignored. In rare cases (which are more or less 1121 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 1122 * fragmentation and packet reordering past skb's retransmission. To consider 1123 * them correctly, the acceptable range must be extended even more though 1124 * the exact amount is rather hard to quantify. However, tp->max_window can 1125 * be used as an exaggerated estimate. 1126 */ 1127 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, 1128 u32 start_seq, u32 end_seq) 1129 { 1130 /* Too far in future, or reversed (interpretation is ambiguous) */ 1131 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1132 return false; 1133 1134 /* Nasty start_seq wrap-around check (see comments above) */ 1135 if (!before(start_seq, tp->snd_nxt)) 1136 return false; 1137 1138 /* In outstanding window? ...This is valid exit for D-SACKs too. 1139 * start_seq == snd_una is non-sensical (see comments above) 1140 */ 1141 if (after(start_seq, tp->snd_una)) 1142 return true; 1143 1144 if (!is_dsack || !tp->undo_marker) 1145 return false; 1146 1147 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1148 if (after(end_seq, tp->snd_una)) 1149 return false; 1150 1151 if (!before(start_seq, tp->undo_marker)) 1152 return true; 1153 1154 /* Too old */ 1155 if (!after(end_seq, tp->undo_marker)) 1156 return false; 1157 1158 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1159 * start_seq < undo_marker and end_seq >= undo_marker. 1160 */ 1161 return !before(start_seq, end_seq - tp->max_window); 1162 } 1163 1164 /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 1165 * Event "B". Later note: FACK people cheated me again 8), we have to account 1166 * for reordering! Ugly, but should help. 1167 * 1168 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1169 * less than what is now known to be received by the other end (derived from 1170 * highest SACK block). Also calculate the lowest snd_nxt among the remaining 1171 * retransmitted skbs to avoid some costly processing per ACKs. 1172 */ 1173 static void tcp_mark_lost_retrans(struct sock *sk) 1174 { 1175 const struct inet_connection_sock *icsk = inet_csk(sk); 1176 struct tcp_sock *tp = tcp_sk(sk); 1177 struct sk_buff *skb; 1178 int cnt = 0; 1179 u32 new_low_seq = tp->snd_nxt; 1180 u32 received_upto = tcp_highest_sack_seq(tp); 1181 1182 if (!tcp_is_fack(tp) || !tp->retrans_out || 1183 !after(received_upto, tp->lost_retrans_low) || 1184 icsk->icsk_ca_state != TCP_CA_Recovery) 1185 return; 1186 1187 tcp_for_write_queue(skb, sk) { 1188 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; 1189 1190 if (skb == tcp_send_head(sk)) 1191 break; 1192 if (cnt == tp->retrans_out) 1193 break; 1194 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1195 continue; 1196 1197 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) 1198 continue; 1199 1200 /* TODO: We would like to get rid of tcp_is_fack(tp) only 1201 * constraint here (see above) but figuring out that at 1202 * least tp->reordering SACK blocks reside between ack_seq 1203 * and received_upto is not easy task to do cheaply with 1204 * the available datastructures. 1205 * 1206 * Whether FACK should check here for tp->reordering segs 1207 * in-between one could argue for either way (it would be 1208 * rather simple to implement as we could count fack_count 1209 * during the walk and do tp->fackets_out - fack_count). 1210 */ 1211 if (after(received_upto, ack_seq)) { 1212 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1213 tp->retrans_out -= tcp_skb_pcount(skb); 1214 1215 tcp_skb_mark_lost_uncond_verify(tp, skb); 1216 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 1217 } else { 1218 if (before(ack_seq, new_low_seq)) 1219 new_low_seq = ack_seq; 1220 cnt += tcp_skb_pcount(skb); 1221 } 1222 } 1223 1224 if (tp->retrans_out) 1225 tp->lost_retrans_low = new_low_seq; 1226 } 1227 1228 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1229 struct tcp_sack_block_wire *sp, int num_sacks, 1230 u32 prior_snd_una) 1231 { 1232 struct tcp_sock *tp = tcp_sk(sk); 1233 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1234 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1235 bool dup_sack = false; 1236 1237 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1238 dup_sack = true; 1239 tcp_dsack_seen(tp); 1240 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1241 } else if (num_sacks > 1) { 1242 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1243 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1244 1245 if (!after(end_seq_0, end_seq_1) && 1246 !before(start_seq_0, start_seq_1)) { 1247 dup_sack = true; 1248 tcp_dsack_seen(tp); 1249 NET_INC_STATS_BH(sock_net(sk), 1250 LINUX_MIB_TCPDSACKOFORECV); 1251 } 1252 } 1253 1254 /* D-SACK for already forgotten data... Do dumb counting. */ 1255 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1256 !after(end_seq_0, prior_snd_una) && 1257 after(end_seq_0, tp->undo_marker)) 1258 tp->undo_retrans--; 1259 1260 return dup_sack; 1261 } 1262 1263 struct tcp_sacktag_state { 1264 int reord; 1265 int fack_count; 1266 int flag; 1267 }; 1268 1269 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1270 * the incoming SACK may not exactly match but we can find smaller MSS 1271 * aligned portion of it that matches. Therefore we might need to fragment 1272 * which may fail and creates some hassle (caller must handle error case 1273 * returns). 1274 * 1275 * FIXME: this could be merged to shift decision code 1276 */ 1277 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1278 u32 start_seq, u32 end_seq) 1279 { 1280 int err; 1281 bool in_sack; 1282 unsigned int pkt_len; 1283 unsigned int mss; 1284 1285 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1286 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1287 1288 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1289 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1290 mss = tcp_skb_mss(skb); 1291 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1292 1293 if (!in_sack) { 1294 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1295 if (pkt_len < mss) 1296 pkt_len = mss; 1297 } else { 1298 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1299 if (pkt_len < mss) 1300 return -EINVAL; 1301 } 1302 1303 /* Round if necessary so that SACKs cover only full MSSes 1304 * and/or the remaining small portion (if present) 1305 */ 1306 if (pkt_len > mss) { 1307 unsigned int new_len = (pkt_len / mss) * mss; 1308 if (!in_sack && new_len < pkt_len) { 1309 new_len += mss; 1310 if (new_len > skb->len) 1311 return 0; 1312 } 1313 pkt_len = new_len; 1314 } 1315 err = tcp_fragment(sk, skb, pkt_len, mss); 1316 if (err < 0) 1317 return err; 1318 } 1319 1320 return in_sack; 1321 } 1322 1323 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ 1324 static u8 tcp_sacktag_one(struct sock *sk, 1325 struct tcp_sacktag_state *state, u8 sacked, 1326 u32 start_seq, u32 end_seq, 1327 bool dup_sack, int pcount) 1328 { 1329 struct tcp_sock *tp = tcp_sk(sk); 1330 int fack_count = state->fack_count; 1331 1332 /* Account D-SACK for retransmitted packet. */ 1333 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1334 if (tp->undo_marker && tp->undo_retrans && 1335 after(end_seq, tp->undo_marker)) 1336 tp->undo_retrans--; 1337 if (sacked & TCPCB_SACKED_ACKED) 1338 state->reord = min(fack_count, state->reord); 1339 } 1340 1341 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1342 if (!after(end_seq, tp->snd_una)) 1343 return sacked; 1344 1345 if (!(sacked & TCPCB_SACKED_ACKED)) { 1346 if (sacked & TCPCB_SACKED_RETRANS) { 1347 /* If the segment is not tagged as lost, 1348 * we do not clear RETRANS, believing 1349 * that retransmission is still in flight. 1350 */ 1351 if (sacked & TCPCB_LOST) { 1352 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1353 tp->lost_out -= pcount; 1354 tp->retrans_out -= pcount; 1355 } 1356 } else { 1357 if (!(sacked & TCPCB_RETRANS)) { 1358 /* New sack for not retransmitted frame, 1359 * which was in hole. It is reordering. 1360 */ 1361 if (before(start_seq, 1362 tcp_highest_sack_seq(tp))) 1363 state->reord = min(fack_count, 1364 state->reord); 1365 1366 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1367 if (!after(end_seq, tp->frto_highmark)) 1368 state->flag |= FLAG_ONLY_ORIG_SACKED; 1369 } 1370 1371 if (sacked & TCPCB_LOST) { 1372 sacked &= ~TCPCB_LOST; 1373 tp->lost_out -= pcount; 1374 } 1375 } 1376 1377 sacked |= TCPCB_SACKED_ACKED; 1378 state->flag |= FLAG_DATA_SACKED; 1379 tp->sacked_out += pcount; 1380 1381 fack_count += pcount; 1382 1383 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1384 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1385 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1386 tp->lost_cnt_hint += pcount; 1387 1388 if (fack_count > tp->fackets_out) 1389 tp->fackets_out = fack_count; 1390 } 1391 1392 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1393 * frames and clear it. undo_retrans is decreased above, L|R frames 1394 * are accounted above as well. 1395 */ 1396 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { 1397 sacked &= ~TCPCB_SACKED_RETRANS; 1398 tp->retrans_out -= pcount; 1399 } 1400 1401 return sacked; 1402 } 1403 1404 /* Shift newly-SACKed bytes from this skb to the immediately previous 1405 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1406 */ 1407 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1408 struct tcp_sacktag_state *state, 1409 unsigned int pcount, int shifted, int mss, 1410 bool dup_sack) 1411 { 1412 struct tcp_sock *tp = tcp_sk(sk); 1413 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1414 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1415 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1416 1417 BUG_ON(!pcount); 1418 1419 /* Adjust counters and hints for the newly sacked sequence 1420 * range but discard the return value since prev is already 1421 * marked. We must tag the range first because the seq 1422 * advancement below implicitly advances 1423 * tcp_highest_sack_seq() when skb is highest_sack. 1424 */ 1425 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1426 start_seq, end_seq, dup_sack, pcount); 1427 1428 if (skb == tp->lost_skb_hint) 1429 tp->lost_cnt_hint += pcount; 1430 1431 TCP_SKB_CB(prev)->end_seq += shifted; 1432 TCP_SKB_CB(skb)->seq += shifted; 1433 1434 skb_shinfo(prev)->gso_segs += pcount; 1435 BUG_ON(skb_shinfo(skb)->gso_segs < pcount); 1436 skb_shinfo(skb)->gso_segs -= pcount; 1437 1438 /* When we're adding to gso_segs == 1, gso_size will be zero, 1439 * in theory this shouldn't be necessary but as long as DSACK 1440 * code can come after this skb later on it's better to keep 1441 * setting gso_size to something. 1442 */ 1443 if (!skb_shinfo(prev)->gso_size) { 1444 skb_shinfo(prev)->gso_size = mss; 1445 skb_shinfo(prev)->gso_type = sk->sk_gso_type; 1446 } 1447 1448 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1449 if (skb_shinfo(skb)->gso_segs <= 1) { 1450 skb_shinfo(skb)->gso_size = 0; 1451 skb_shinfo(skb)->gso_type = 0; 1452 } 1453 1454 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1455 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1456 1457 if (skb->len > 0) { 1458 BUG_ON(!tcp_skb_pcount(skb)); 1459 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1460 return false; 1461 } 1462 1463 /* Whole SKB was eaten :-) */ 1464 1465 if (skb == tp->retransmit_skb_hint) 1466 tp->retransmit_skb_hint = prev; 1467 if (skb == tp->scoreboard_skb_hint) 1468 tp->scoreboard_skb_hint = prev; 1469 if (skb == tp->lost_skb_hint) { 1470 tp->lost_skb_hint = prev; 1471 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1472 } 1473 1474 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1475 if (skb == tcp_highest_sack(sk)) 1476 tcp_advance_highest_sack(sk, skb); 1477 1478 tcp_unlink_write_queue(skb, sk); 1479 sk_wmem_free_skb(sk, skb); 1480 1481 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1482 1483 return true; 1484 } 1485 1486 /* I wish gso_size would have a bit more sane initialization than 1487 * something-or-zero which complicates things 1488 */ 1489 static int tcp_skb_seglen(const struct sk_buff *skb) 1490 { 1491 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1492 } 1493 1494 /* Shifting pages past head area doesn't work */ 1495 static int skb_can_shift(const struct sk_buff *skb) 1496 { 1497 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1498 } 1499 1500 /* Try collapsing SACK blocks spanning across multiple skbs to a single 1501 * skb. 1502 */ 1503 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1504 struct tcp_sacktag_state *state, 1505 u32 start_seq, u32 end_seq, 1506 bool dup_sack) 1507 { 1508 struct tcp_sock *tp = tcp_sk(sk); 1509 struct sk_buff *prev; 1510 int mss; 1511 int pcount = 0; 1512 int len; 1513 int in_sack; 1514 1515 if (!sk_can_gso(sk)) 1516 goto fallback; 1517 1518 /* Normally R but no L won't result in plain S */ 1519 if (!dup_sack && 1520 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) 1521 goto fallback; 1522 if (!skb_can_shift(skb)) 1523 goto fallback; 1524 /* This frame is about to be dropped (was ACKed). */ 1525 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1526 goto fallback; 1527 1528 /* Can only happen with delayed DSACK + discard craziness */ 1529 if (unlikely(skb == tcp_write_queue_head(sk))) 1530 goto fallback; 1531 prev = tcp_write_queue_prev(sk, skb); 1532 1533 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) 1534 goto fallback; 1535 1536 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1537 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1538 1539 if (in_sack) { 1540 len = skb->len; 1541 pcount = tcp_skb_pcount(skb); 1542 mss = tcp_skb_seglen(skb); 1543 1544 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1545 * drop this restriction as unnecessary 1546 */ 1547 if (mss != tcp_skb_seglen(prev)) 1548 goto fallback; 1549 } else { 1550 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) 1551 goto noop; 1552 /* CHECKME: This is non-MSS split case only?, this will 1553 * cause skipped skbs due to advancing loop btw, original 1554 * has that feature too 1555 */ 1556 if (tcp_skb_pcount(skb) <= 1) 1557 goto noop; 1558 1559 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1560 if (!in_sack) { 1561 /* TODO: head merge to next could be attempted here 1562 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), 1563 * though it might not be worth of the additional hassle 1564 * 1565 * ...we can probably just fallback to what was done 1566 * previously. We could try merging non-SACKed ones 1567 * as well but it probably isn't going to buy off 1568 * because later SACKs might again split them, and 1569 * it would make skb timestamp tracking considerably 1570 * harder problem. 1571 */ 1572 goto fallback; 1573 } 1574 1575 len = end_seq - TCP_SKB_CB(skb)->seq; 1576 BUG_ON(len < 0); 1577 BUG_ON(len > skb->len); 1578 1579 /* MSS boundaries should be honoured or else pcount will 1580 * severely break even though it makes things bit trickier. 1581 * Optimize common case to avoid most of the divides 1582 */ 1583 mss = tcp_skb_mss(skb); 1584 1585 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1586 * drop this restriction as unnecessary 1587 */ 1588 if (mss != tcp_skb_seglen(prev)) 1589 goto fallback; 1590 1591 if (len == mss) { 1592 pcount = 1; 1593 } else if (len < mss) { 1594 goto noop; 1595 } else { 1596 pcount = len / mss; 1597 len = pcount * mss; 1598 } 1599 } 1600 1601 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ 1602 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) 1603 goto fallback; 1604 1605 if (!skb_shift(prev, skb, len)) 1606 goto fallback; 1607 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) 1608 goto out; 1609 1610 /* Hole filled allows collapsing with the next as well, this is very 1611 * useful when hole on every nth skb pattern happens 1612 */ 1613 if (prev == tcp_write_queue_tail(sk)) 1614 goto out; 1615 skb = tcp_write_queue_next(sk, prev); 1616 1617 if (!skb_can_shift(skb) || 1618 (skb == tcp_send_head(sk)) || 1619 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || 1620 (mss != tcp_skb_seglen(skb))) 1621 goto out; 1622 1623 len = skb->len; 1624 if (skb_shift(prev, skb, len)) { 1625 pcount += tcp_skb_pcount(skb); 1626 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); 1627 } 1628 1629 out: 1630 state->fack_count += pcount; 1631 return prev; 1632 1633 noop: 1634 return skb; 1635 1636 fallback: 1637 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1638 return NULL; 1639 } 1640 1641 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1642 struct tcp_sack_block *next_dup, 1643 struct tcp_sacktag_state *state, 1644 u32 start_seq, u32 end_seq, 1645 bool dup_sack_in) 1646 { 1647 struct tcp_sock *tp = tcp_sk(sk); 1648 struct sk_buff *tmp; 1649 1650 tcp_for_write_queue_from(skb, sk) { 1651 int in_sack = 0; 1652 bool dup_sack = dup_sack_in; 1653 1654 if (skb == tcp_send_head(sk)) 1655 break; 1656 1657 /* queue is in-order => we can short-circuit the walk early */ 1658 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1659 break; 1660 1661 if ((next_dup != NULL) && 1662 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1663 in_sack = tcp_match_skb_to_sack(sk, skb, 1664 next_dup->start_seq, 1665 next_dup->end_seq); 1666 if (in_sack > 0) 1667 dup_sack = true; 1668 } 1669 1670 /* skb reference here is a bit tricky to get right, since 1671 * shifting can eat and free both this skb and the next, 1672 * so not even _safe variant of the loop is enough. 1673 */ 1674 if (in_sack <= 0) { 1675 tmp = tcp_shift_skb_data(sk, skb, state, 1676 start_seq, end_seq, dup_sack); 1677 if (tmp != NULL) { 1678 if (tmp != skb) { 1679 skb = tmp; 1680 continue; 1681 } 1682 1683 in_sack = 0; 1684 } else { 1685 in_sack = tcp_match_skb_to_sack(sk, skb, 1686 start_seq, 1687 end_seq); 1688 } 1689 } 1690 1691 if (unlikely(in_sack < 0)) 1692 break; 1693 1694 if (in_sack) { 1695 TCP_SKB_CB(skb)->sacked = 1696 tcp_sacktag_one(sk, 1697 state, 1698 TCP_SKB_CB(skb)->sacked, 1699 TCP_SKB_CB(skb)->seq, 1700 TCP_SKB_CB(skb)->end_seq, 1701 dup_sack, 1702 tcp_skb_pcount(skb)); 1703 1704 if (!before(TCP_SKB_CB(skb)->seq, 1705 tcp_highest_sack_seq(tp))) 1706 tcp_advance_highest_sack(sk, skb); 1707 } 1708 1709 state->fack_count += tcp_skb_pcount(skb); 1710 } 1711 return skb; 1712 } 1713 1714 /* Avoid all extra work that is being done by sacktag while walking in 1715 * a normal way 1716 */ 1717 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1718 struct tcp_sacktag_state *state, 1719 u32 skip_to_seq) 1720 { 1721 tcp_for_write_queue_from(skb, sk) { 1722 if (skb == tcp_send_head(sk)) 1723 break; 1724 1725 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 1726 break; 1727 1728 state->fack_count += tcp_skb_pcount(skb); 1729 } 1730 return skb; 1731 } 1732 1733 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1734 struct sock *sk, 1735 struct tcp_sack_block *next_dup, 1736 struct tcp_sacktag_state *state, 1737 u32 skip_to_seq) 1738 { 1739 if (next_dup == NULL) 1740 return skb; 1741 1742 if (before(next_dup->start_seq, skip_to_seq)) { 1743 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); 1744 skb = tcp_sacktag_walk(skb, sk, NULL, state, 1745 next_dup->start_seq, next_dup->end_seq, 1746 1); 1747 } 1748 1749 return skb; 1750 } 1751 1752 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1753 { 1754 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1755 } 1756 1757 static int 1758 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1759 u32 prior_snd_una) 1760 { 1761 const struct inet_connection_sock *icsk = inet_csk(sk); 1762 struct tcp_sock *tp = tcp_sk(sk); 1763 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1764 TCP_SKB_CB(ack_skb)->sacked); 1765 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1766 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1767 struct tcp_sack_block *cache; 1768 struct tcp_sacktag_state state; 1769 struct sk_buff *skb; 1770 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1771 int used_sacks; 1772 bool found_dup_sack = false; 1773 int i, j; 1774 int first_sack_index; 1775 1776 state.flag = 0; 1777 state.reord = tp->packets_out; 1778 1779 if (!tp->sacked_out) { 1780 if (WARN_ON(tp->fackets_out)) 1781 tp->fackets_out = 0; 1782 tcp_highest_sack_reset(sk); 1783 } 1784 1785 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1786 num_sacks, prior_snd_una); 1787 if (found_dup_sack) 1788 state.flag |= FLAG_DSACKING_ACK; 1789 1790 /* Eliminate too old ACKs, but take into 1791 * account more or less fresh ones, they can 1792 * contain valid SACK info. 1793 */ 1794 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1795 return 0; 1796 1797 if (!tp->packets_out) 1798 goto out; 1799 1800 used_sacks = 0; 1801 first_sack_index = 0; 1802 for (i = 0; i < num_sacks; i++) { 1803 bool dup_sack = !i && found_dup_sack; 1804 1805 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1806 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1807 1808 if (!tcp_is_sackblock_valid(tp, dup_sack, 1809 sp[used_sacks].start_seq, 1810 sp[used_sacks].end_seq)) { 1811 int mib_idx; 1812 1813 if (dup_sack) { 1814 if (!tp->undo_marker) 1815 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1816 else 1817 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1818 } else { 1819 /* Don't count olds caused by ACK reordering */ 1820 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1821 !after(sp[used_sacks].end_seq, tp->snd_una)) 1822 continue; 1823 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1824 } 1825 1826 NET_INC_STATS_BH(sock_net(sk), mib_idx); 1827 if (i == 0) 1828 first_sack_index = -1; 1829 continue; 1830 } 1831 1832 /* Ignore very old stuff early */ 1833 if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1834 continue; 1835 1836 used_sacks++; 1837 } 1838 1839 /* order SACK blocks to allow in order walk of the retrans queue */ 1840 for (i = used_sacks - 1; i > 0; i--) { 1841 for (j = 0; j < i; j++) { 1842 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1843 swap(sp[j], sp[j + 1]); 1844 1845 /* Track where the first SACK block goes to */ 1846 if (j == first_sack_index) 1847 first_sack_index = j + 1; 1848 } 1849 } 1850 } 1851 1852 skb = tcp_write_queue_head(sk); 1853 state.fack_count = 0; 1854 i = 0; 1855 1856 if (!tp->sacked_out) { 1857 /* It's already past, so skip checking against it */ 1858 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1859 } else { 1860 cache = tp->recv_sack_cache; 1861 /* Skip empty blocks in at head of the cache */ 1862 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1863 !cache->end_seq) 1864 cache++; 1865 } 1866 1867 while (i < used_sacks) { 1868 u32 start_seq = sp[i].start_seq; 1869 u32 end_seq = sp[i].end_seq; 1870 bool dup_sack = (found_dup_sack && (i == first_sack_index)); 1871 struct tcp_sack_block *next_dup = NULL; 1872 1873 if (found_dup_sack && ((i + 1) == first_sack_index)) 1874 next_dup = &sp[i + 1]; 1875 1876 /* Skip too early cached blocks */ 1877 while (tcp_sack_cache_ok(tp, cache) && 1878 !before(start_seq, cache->end_seq)) 1879 cache++; 1880 1881 /* Can skip some work by looking recv_sack_cache? */ 1882 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1883 after(end_seq, cache->start_seq)) { 1884 1885 /* Head todo? */ 1886 if (before(start_seq, cache->start_seq)) { 1887 skb = tcp_sacktag_skip(skb, sk, &state, 1888 start_seq); 1889 skb = tcp_sacktag_walk(skb, sk, next_dup, 1890 &state, 1891 start_seq, 1892 cache->start_seq, 1893 dup_sack); 1894 } 1895 1896 /* Rest of the block already fully processed? */ 1897 if (!after(end_seq, cache->end_seq)) 1898 goto advance_sp; 1899 1900 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1901 &state, 1902 cache->end_seq); 1903 1904 /* ...tail remains todo... */ 1905 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1906 /* ...but better entrypoint exists! */ 1907 skb = tcp_highest_sack(sk); 1908 if (skb == NULL) 1909 break; 1910 state.fack_count = tp->fackets_out; 1911 cache++; 1912 goto walk; 1913 } 1914 1915 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); 1916 /* Check overlap against next cached too (past this one already) */ 1917 cache++; 1918 continue; 1919 } 1920 1921 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1922 skb = tcp_highest_sack(sk); 1923 if (skb == NULL) 1924 break; 1925 state.fack_count = tp->fackets_out; 1926 } 1927 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); 1928 1929 walk: 1930 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, 1931 start_seq, end_seq, dup_sack); 1932 1933 advance_sp: 1934 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct 1935 * due to in-order walk 1936 */ 1937 if (after(end_seq, tp->frto_highmark)) 1938 state.flag &= ~FLAG_ONLY_ORIG_SACKED; 1939 1940 i++; 1941 } 1942 1943 /* Clear the head of the cache sack blocks so we can skip it next time */ 1944 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 1945 tp->recv_sack_cache[i].start_seq = 0; 1946 tp->recv_sack_cache[i].end_seq = 0; 1947 } 1948 for (j = 0; j < used_sacks; j++) 1949 tp->recv_sack_cache[i++] = sp[j]; 1950 1951 tcp_mark_lost_retrans(sk); 1952 1953 tcp_verify_left_out(tp); 1954 1955 if ((state.reord < tp->fackets_out) && 1956 ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && 1957 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1958 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); 1959 1960 out: 1961 1962 #if FASTRETRANS_DEBUG > 0 1963 WARN_ON((int)tp->sacked_out < 0); 1964 WARN_ON((int)tp->lost_out < 0); 1965 WARN_ON((int)tp->retrans_out < 0); 1966 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1967 #endif 1968 return state.flag; 1969 } 1970 1971 /* Limits sacked_out so that sum with lost_out isn't ever larger than 1972 * packets_out. Returns false if sacked_out adjustement wasn't necessary. 1973 */ 1974 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) 1975 { 1976 u32 holes; 1977 1978 holes = max(tp->lost_out, 1U); 1979 holes = min(holes, tp->packets_out); 1980 1981 if ((tp->sacked_out + holes) > tp->packets_out) { 1982 tp->sacked_out = tp->packets_out - holes; 1983 return true; 1984 } 1985 return false; 1986 } 1987 1988 /* If we receive more dupacks than we expected counting segments 1989 * in assumption of absent reordering, interpret this as reordering. 1990 * The only another reason could be bug in receiver TCP. 1991 */ 1992 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1993 { 1994 struct tcp_sock *tp = tcp_sk(sk); 1995 if (tcp_limit_reno_sacked(tp)) 1996 tcp_update_reordering(sk, tp->packets_out + addend, 0); 1997 } 1998 1999 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 2000 2001 static void tcp_add_reno_sack(struct sock *sk) 2002 { 2003 struct tcp_sock *tp = tcp_sk(sk); 2004 tp->sacked_out++; 2005 tcp_check_reno_reordering(sk, 0); 2006 tcp_verify_left_out(tp); 2007 } 2008 2009 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 2010 2011 static void tcp_remove_reno_sacks(struct sock *sk, int acked) 2012 { 2013 struct tcp_sock *tp = tcp_sk(sk); 2014 2015 if (acked > 0) { 2016 /* One ACK acked hole. The rest eat duplicate ACKs. */ 2017 if (acked - 1 >= tp->sacked_out) 2018 tp->sacked_out = 0; 2019 else 2020 tp->sacked_out -= acked - 1; 2021 } 2022 tcp_check_reno_reordering(sk, acked); 2023 tcp_verify_left_out(tp); 2024 } 2025 2026 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 2027 { 2028 tp->sacked_out = 0; 2029 } 2030 2031 static int tcp_is_sackfrto(const struct tcp_sock *tp) 2032 { 2033 return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); 2034 } 2035 2036 /* F-RTO can only be used if TCP has never retransmitted anything other than 2037 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 2038 */ 2039 bool tcp_use_frto(struct sock *sk) 2040 { 2041 const struct tcp_sock *tp = tcp_sk(sk); 2042 const struct inet_connection_sock *icsk = inet_csk(sk); 2043 struct sk_buff *skb; 2044 2045 if (!sysctl_tcp_frto) 2046 return false; 2047 2048 /* MTU probe and F-RTO won't really play nicely along currently */ 2049 if (icsk->icsk_mtup.probe_size) 2050 return false; 2051 2052 if (tcp_is_sackfrto(tp)) 2053 return true; 2054 2055 /* Avoid expensive walking of rexmit queue if possible */ 2056 if (tp->retrans_out > 1) 2057 return false; 2058 2059 skb = tcp_write_queue_head(sk); 2060 if (tcp_skb_is_last(sk, skb)) 2061 return true; 2062 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 2063 tcp_for_write_queue_from(skb, sk) { 2064 if (skb == tcp_send_head(sk)) 2065 break; 2066 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2067 return false; 2068 /* Short-circuit when first non-SACKed skb has been checked */ 2069 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2070 break; 2071 } 2072 return true; 2073 } 2074 2075 /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 2076 * recovery a bit and use heuristics in tcp_process_frto() to detect if 2077 * the RTO was spurious. Only clear SACKED_RETRANS of the head here to 2078 * keep retrans_out counting accurate (with SACK F-RTO, other than head 2079 * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS 2080 * bits are handled if the Loss state is really to be entered (in 2081 * tcp_enter_frto_loss). 2082 * 2083 * Do like tcp_enter_loss() would; when RTO expires the second time it 2084 * does: 2085 * "Reduce ssthresh if it has not yet been made inside this window." 2086 */ 2087 void tcp_enter_frto(struct sock *sk) 2088 { 2089 const struct inet_connection_sock *icsk = inet_csk(sk); 2090 struct tcp_sock *tp = tcp_sk(sk); 2091 struct sk_buff *skb; 2092 2093 if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || 2094 tp->snd_una == tp->high_seq || 2095 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && 2096 !icsk->icsk_retransmits)) { 2097 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2098 /* Our state is too optimistic in ssthresh() call because cwnd 2099 * is not reduced until tcp_enter_frto_loss() when previous F-RTO 2100 * recovery has not yet completed. Pattern would be this: RTO, 2101 * Cumulative ACK, RTO (2xRTO for the same segment does not end 2102 * up here twice). 2103 * RFC4138 should be more specific on what to do, even though 2104 * RTO is quite unlikely to occur after the first Cumulative ACK 2105 * due to back-off and complexity of triggering events ... 2106 */ 2107 if (tp->frto_counter) { 2108 u32 stored_cwnd; 2109 stored_cwnd = tp->snd_cwnd; 2110 tp->snd_cwnd = 2; 2111 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2112 tp->snd_cwnd = stored_cwnd; 2113 } else { 2114 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2115 } 2116 /* ... in theory, cong.control module could do "any tricks" in 2117 * ssthresh(), which means that ca_state, lost bits and lost_out 2118 * counter would have to be faked before the call occurs. We 2119 * consider that too expensive, unlikely and hacky, so modules 2120 * using these in ssthresh() must deal these incompatibility 2121 * issues if they receives CA_EVENT_FRTO and frto_counter != 0 2122 */ 2123 tcp_ca_event(sk, CA_EVENT_FRTO); 2124 } 2125 2126 tp->undo_marker = tp->snd_una; 2127 tp->undo_retrans = 0; 2128 2129 skb = tcp_write_queue_head(sk); 2130 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2131 tp->undo_marker = 0; 2132 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2133 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2134 tp->retrans_out -= tcp_skb_pcount(skb); 2135 } 2136 tcp_verify_left_out(tp); 2137 2138 /* Too bad if TCP was application limited */ 2139 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 2140 2141 /* Earlier loss recovery underway (see RFC4138; Appendix B). 2142 * The last condition is necessary at least in tp->frto_counter case. 2143 */ 2144 if (tcp_is_sackfrto(tp) && (tp->frto_counter || 2145 ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && 2146 after(tp->high_seq, tp->snd_una)) { 2147 tp->frto_highmark = tp->high_seq; 2148 } else { 2149 tp->frto_highmark = tp->snd_nxt; 2150 } 2151 tcp_set_ca_state(sk, TCP_CA_Disorder); 2152 tp->high_seq = tp->snd_nxt; 2153 tp->frto_counter = 1; 2154 } 2155 2156 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, 2157 * which indicates that we should follow the traditional RTO recovery, 2158 * i.e. mark everything lost and do go-back-N retransmission. 2159 */ 2160 static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) 2161 { 2162 struct tcp_sock *tp = tcp_sk(sk); 2163 struct sk_buff *skb; 2164 2165 tp->lost_out = 0; 2166 tp->retrans_out = 0; 2167 if (tcp_is_reno(tp)) 2168 tcp_reset_reno_sack(tp); 2169 2170 tcp_for_write_queue(skb, sk) { 2171 if (skb == tcp_send_head(sk)) 2172 break; 2173 2174 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2175 /* 2176 * Count the retransmission made on RTO correctly (only when 2177 * waiting for the first ACK and did not get it)... 2178 */ 2179 if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { 2180 /* For some reason this R-bit might get cleared? */ 2181 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 2182 tp->retrans_out += tcp_skb_pcount(skb); 2183 /* ...enter this if branch just for the first segment */ 2184 flag |= FLAG_DATA_ACKED; 2185 } else { 2186 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2187 tp->undo_marker = 0; 2188 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2189 } 2190 2191 /* Marking forward transmissions that were made after RTO lost 2192 * can cause unnecessary retransmissions in some scenarios, 2193 * SACK blocks will mitigate that in some but not in all cases. 2194 * We used to not mark them but it was causing break-ups with 2195 * receivers that do only in-order receival. 2196 * 2197 * TODO: we could detect presence of such receiver and select 2198 * different behavior per flow. 2199 */ 2200 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 2201 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2202 tp->lost_out += tcp_skb_pcount(skb); 2203 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 2204 } 2205 } 2206 tcp_verify_left_out(tp); 2207 2208 tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; 2209 tp->snd_cwnd_cnt = 0; 2210 tp->snd_cwnd_stamp = tcp_time_stamp; 2211 tp->frto_counter = 0; 2212 tp->bytes_acked = 0; 2213 2214 tp->reordering = min_t(unsigned int, tp->reordering, 2215 sysctl_tcp_reordering); 2216 tcp_set_ca_state(sk, TCP_CA_Loss); 2217 tp->high_seq = tp->snd_nxt; 2218 TCP_ECN_queue_cwr(tp); 2219 2220 tcp_clear_all_retrans_hints(tp); 2221 } 2222 2223 static void tcp_clear_retrans_partial(struct tcp_sock *tp) 2224 { 2225 tp->retrans_out = 0; 2226 tp->lost_out = 0; 2227 2228 tp->undo_marker = 0; 2229 tp->undo_retrans = 0; 2230 } 2231 2232 void tcp_clear_retrans(struct tcp_sock *tp) 2233 { 2234 tcp_clear_retrans_partial(tp); 2235 2236 tp->fackets_out = 0; 2237 tp->sacked_out = 0; 2238 } 2239 2240 /* Enter Loss state. If "how" is not zero, forget all SACK information 2241 * and reset tags completely, otherwise preserve SACKs. If receiver 2242 * dropped its ofo queue, we will know this due to reneging detection. 2243 */ 2244 void tcp_enter_loss(struct sock *sk, int how) 2245 { 2246 const struct inet_connection_sock *icsk = inet_csk(sk); 2247 struct tcp_sock *tp = tcp_sk(sk); 2248 struct sk_buff *skb; 2249 2250 /* Reduce ssthresh if it has not yet been made inside this window. */ 2251 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 2252 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 2253 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2254 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2255 tcp_ca_event(sk, CA_EVENT_LOSS); 2256 } 2257 tp->snd_cwnd = 1; 2258 tp->snd_cwnd_cnt = 0; 2259 tp->snd_cwnd_stamp = tcp_time_stamp; 2260 2261 tp->bytes_acked = 0; 2262 tcp_clear_retrans_partial(tp); 2263 2264 if (tcp_is_reno(tp)) 2265 tcp_reset_reno_sack(tp); 2266 2267 if (!how) { 2268 /* Push undo marker, if it was plain RTO and nothing 2269 * was retransmitted. */ 2270 tp->undo_marker = tp->snd_una; 2271 } else { 2272 tp->sacked_out = 0; 2273 tp->fackets_out = 0; 2274 } 2275 tcp_clear_all_retrans_hints(tp); 2276 2277 tcp_for_write_queue(skb, sk) { 2278 if (skb == tcp_send_head(sk)) 2279 break; 2280 2281 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2282 tp->undo_marker = 0; 2283 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 2284 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 2285 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 2286 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2287 tp->lost_out += tcp_skb_pcount(skb); 2288 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 2289 } 2290 } 2291 tcp_verify_left_out(tp); 2292 2293 tp->reordering = min_t(unsigned int, tp->reordering, 2294 sysctl_tcp_reordering); 2295 tcp_set_ca_state(sk, TCP_CA_Loss); 2296 tp->high_seq = tp->snd_nxt; 2297 TCP_ECN_queue_cwr(tp); 2298 /* Abort F-RTO algorithm if one is in progress */ 2299 tp->frto_counter = 0; 2300 } 2301 2302 /* If ACK arrived pointing to a remembered SACK, it means that our 2303 * remembered SACKs do not reflect real state of receiver i.e. 2304 * receiver _host_ is heavily congested (or buggy). 2305 * 2306 * Do processing similar to RTO timeout. 2307 */ 2308 static bool tcp_check_sack_reneging(struct sock *sk, int flag) 2309 { 2310 if (flag & FLAG_SACK_RENEGING) { 2311 struct inet_connection_sock *icsk = inet_csk(sk); 2312 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 2313 2314 tcp_enter_loss(sk, 1); 2315 icsk->icsk_retransmits++; 2316 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 2317 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2318 icsk->icsk_rto, TCP_RTO_MAX); 2319 return true; 2320 } 2321 return false; 2322 } 2323 2324 static inline int tcp_fackets_out(const struct tcp_sock *tp) 2325 { 2326 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 2327 } 2328 2329 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 2330 * counter when SACK is enabled (without SACK, sacked_out is used for 2331 * that purpose). 2332 * 2333 * Instead, with FACK TCP uses fackets_out that includes both SACKed 2334 * segments up to the highest received SACK block so far and holes in 2335 * between them. 2336 * 2337 * With reordering, holes may still be in flight, so RFC3517 recovery 2338 * uses pure sacked_out (total number of SACKed segments) even though 2339 * it violates the RFC that uses duplicate ACKs, often these are equal 2340 * but when e.g. out-of-window ACKs or packet duplication occurs, 2341 * they differ. Since neither occurs due to loss, TCP should really 2342 * ignore them. 2343 */ 2344 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 2345 { 2346 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2347 } 2348 2349 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) 2350 { 2351 struct tcp_sock *tp = tcp_sk(sk); 2352 unsigned long delay; 2353 2354 /* Delay early retransmit and entering fast recovery for 2355 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples 2356 * available, or RTO is scheduled to fire first. 2357 */ 2358 if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) 2359 return false; 2360 2361 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); 2362 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) 2363 return false; 2364 2365 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); 2366 tp->early_retrans_delayed = 1; 2367 return true; 2368 } 2369 2370 static inline int tcp_skb_timedout(const struct sock *sk, 2371 const struct sk_buff *skb) 2372 { 2373 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; 2374 } 2375 2376 static inline int tcp_head_timedout(const struct sock *sk) 2377 { 2378 const struct tcp_sock *tp = tcp_sk(sk); 2379 2380 return tp->packets_out && 2381 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 2382 } 2383 2384 /* Linux NewReno/SACK/FACK/ECN state machine. 2385 * -------------------------------------- 2386 * 2387 * "Open" Normal state, no dubious events, fast path. 2388 * "Disorder" In all the respects it is "Open", 2389 * but requires a bit more attention. It is entered when 2390 * we see some SACKs or dupacks. It is split of "Open" 2391 * mainly to move some processing from fast path to slow one. 2392 * "CWR" CWND was reduced due to some Congestion Notification event. 2393 * It can be ECN, ICMP source quench, local device congestion. 2394 * "Recovery" CWND was reduced, we are fast-retransmitting. 2395 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 2396 * 2397 * tcp_fastretrans_alert() is entered: 2398 * - each incoming ACK, if state is not "Open" 2399 * - when arrived ACK is unusual, namely: 2400 * * SACK 2401 * * Duplicate ACK. 2402 * * ECN ECE. 2403 * 2404 * Counting packets in flight is pretty simple. 2405 * 2406 * in_flight = packets_out - left_out + retrans_out 2407 * 2408 * packets_out is SND.NXT-SND.UNA counted in packets. 2409 * 2410 * retrans_out is number of retransmitted segments. 2411 * 2412 * left_out is number of segments left network, but not ACKed yet. 2413 * 2414 * left_out = sacked_out + lost_out 2415 * 2416 * sacked_out: Packets, which arrived to receiver out of order 2417 * and hence not ACKed. With SACKs this number is simply 2418 * amount of SACKed data. Even without SACKs 2419 * it is easy to give pretty reliable estimate of this number, 2420 * counting duplicate ACKs. 2421 * 2422 * lost_out: Packets lost by network. TCP has no explicit 2423 * "loss notification" feedback from network (for now). 2424 * It means that this number can be only _guessed_. 2425 * Actually, it is the heuristics to predict lossage that 2426 * distinguishes different algorithms. 2427 * 2428 * F.e. after RTO, when all the queue is considered as lost, 2429 * lost_out = packets_out and in_flight = retrans_out. 2430 * 2431 * Essentially, we have now two algorithms counting 2432 * lost packets. 2433 * 2434 * FACK: It is the simplest heuristics. As soon as we decided 2435 * that something is lost, we decide that _all_ not SACKed 2436 * packets until the most forward SACK are lost. I.e. 2437 * lost_out = fackets_out - sacked_out and left_out = fackets_out. 2438 * It is absolutely correct estimate, if network does not reorder 2439 * packets. And it loses any connection to reality when reordering 2440 * takes place. We use FACK by default until reordering 2441 * is suspected on the path to this destination. 2442 * 2443 * NewReno: when Recovery is entered, we assume that one segment 2444 * is lost (classic Reno). While we are in Recovery and 2445 * a partial ACK arrives, we assume that one more packet 2446 * is lost (NewReno). This heuristics are the same in NewReno 2447 * and SACK. 2448 * 2449 * Imagine, that's all! Forget about all this shamanism about CWND inflation 2450 * deflation etc. CWND is real congestion window, never inflated, changes 2451 * only according to classic VJ rules. 2452 * 2453 * Really tricky (and requiring careful tuning) part of algorithm 2454 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2455 * The first determines the moment _when_ we should reduce CWND and, 2456 * hence, slow down forward transmission. In fact, it determines the moment 2457 * when we decide that hole is caused by loss, rather than by a reorder. 2458 * 2459 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2460 * holes, caused by lost packets. 2461 * 2462 * And the most logically complicated part of algorithm is undo 2463 * heuristics. We detect false retransmits due to both too early 2464 * fast retransmit (reordering) and underestimated RTO, analyzing 2465 * timestamps and D-SACKs. When we detect that some segments were 2466 * retransmitted by mistake and CWND reduction was wrong, we undo 2467 * window reduction and abort recovery phase. This logic is hidden 2468 * inside several functions named tcp_try_undo_<something>. 2469 */ 2470 2471 /* This function decides, when we should leave Disordered state 2472 * and enter Recovery phase, reducing congestion window. 2473 * 2474 * Main question: may we further continue forward transmission 2475 * with the same cwnd? 2476 */ 2477 static bool tcp_time_to_recover(struct sock *sk, int flag) 2478 { 2479 struct tcp_sock *tp = tcp_sk(sk); 2480 __u32 packets_out; 2481 2482 /* Do not perform any recovery during F-RTO algorithm */ 2483 if (tp->frto_counter) 2484 return false; 2485 2486 /* Trick#1: The loss is proven. */ 2487 if (tp->lost_out) 2488 return true; 2489 2490 /* Not-A-Trick#2 : Classic rule... */ 2491 if (tcp_dupack_heuristics(tp) > tp->reordering) 2492 return true; 2493 2494 /* Trick#3 : when we use RFC2988 timer restart, fast 2495 * retransmit can be triggered by timeout of queue head. 2496 */ 2497 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2498 return true; 2499 2500 /* Trick#4: It is still not OK... But will it be useful to delay 2501 * recovery more? 2502 */ 2503 packets_out = tp->packets_out; 2504 if (packets_out <= tp->reordering && 2505 tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && 2506 !tcp_may_send_now(sk)) { 2507 /* We have nothing to send. This connection is limited 2508 * either by receiver window or by application. 2509 */ 2510 return true; 2511 } 2512 2513 /* If a thin stream is detected, retransmit after first 2514 * received dupack. Employ only if SACK is supported in order 2515 * to avoid possible corner-case series of spurious retransmissions 2516 * Use only if there are no unsent data. 2517 */ 2518 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && 2519 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && 2520 tcp_is_sack(tp) && !tcp_send_head(sk)) 2521 return true; 2522 2523 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious 2524 * retransmissions due to small network reorderings, we implement 2525 * Mitigation A.3 in the RFC and delay the retransmission for a short 2526 * interval if appropriate. 2527 */ 2528 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && 2529 (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && 2530 !tcp_may_send_now(sk)) 2531 return !tcp_pause_early_retransmit(sk, flag); 2532 2533 return false; 2534 } 2535 2536 /* New heuristics: it is possible only after we switched to restart timer 2537 * each time when something is ACKed. Hence, we can detect timed out packets 2538 * during fast retransmit without falling to slow start. 2539 * 2540 * Usefulness of this as is very questionable, since we should know which of 2541 * the segments is the next to timeout which is relatively expensive to find 2542 * in general case unless we add some data structure just for that. The 2543 * current approach certainly won't find the right one too often and when it 2544 * finally does find _something_ it usually marks large part of the window 2545 * right away (because a retransmission with a larger timestamp blocks the 2546 * loop from advancing). -ij 2547 */ 2548 static void tcp_timeout_skbs(struct sock *sk) 2549 { 2550 struct tcp_sock *tp = tcp_sk(sk); 2551 struct sk_buff *skb; 2552 2553 if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) 2554 return; 2555 2556 skb = tp->scoreboard_skb_hint; 2557 if (tp->scoreboard_skb_hint == NULL) 2558 skb = tcp_write_queue_head(sk); 2559 2560 tcp_for_write_queue_from(skb, sk) { 2561 if (skb == tcp_send_head(sk)) 2562 break; 2563 if (!tcp_skb_timedout(sk, skb)) 2564 break; 2565 2566 tcp_skb_mark_lost(tp, skb); 2567 } 2568 2569 tp->scoreboard_skb_hint = skb; 2570 2571 tcp_verify_left_out(tp); 2572 } 2573 2574 /* Detect loss in event "A" above by marking head of queue up as lost. 2575 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments 2576 * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2577 * has at least tp->reordering SACKed seqments above it; "packets" refers to 2578 * the maximum SACKed segments to pass before reaching this limit. 2579 */ 2580 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2581 { 2582 struct tcp_sock *tp = tcp_sk(sk); 2583 struct sk_buff *skb; 2584 int cnt, oldcnt; 2585 int err; 2586 unsigned int mss; 2587 /* Use SACK to deduce losses of new sequences sent during recovery */ 2588 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2589 2590 WARN_ON(packets > tp->packets_out); 2591 if (tp->lost_skb_hint) { 2592 skb = tp->lost_skb_hint; 2593 cnt = tp->lost_cnt_hint; 2594 /* Head already handled? */ 2595 if (mark_head && skb != tcp_write_queue_head(sk)) 2596 return; 2597 } else { 2598 skb = tcp_write_queue_head(sk); 2599 cnt = 0; 2600 } 2601 2602 tcp_for_write_queue_from(skb, sk) { 2603 if (skb == tcp_send_head(sk)) 2604 break; 2605 /* TODO: do this better */ 2606 /* this is not the most efficient way to do this... */ 2607 tp->lost_skb_hint = skb; 2608 tp->lost_cnt_hint = cnt; 2609 2610 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2611 break; 2612 2613 oldcnt = cnt; 2614 if (tcp_is_fack(tp) || tcp_is_reno(tp) || 2615 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2616 cnt += tcp_skb_pcount(skb); 2617 2618 if (cnt > packets) { 2619 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2620 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 2621 (oldcnt >= packets)) 2622 break; 2623 2624 mss = skb_shinfo(skb)->gso_size; 2625 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2626 if (err < 0) 2627 break; 2628 cnt = packets; 2629 } 2630 2631 tcp_skb_mark_lost(tp, skb); 2632 2633 if (mark_head) 2634 break; 2635 } 2636 tcp_verify_left_out(tp); 2637 } 2638 2639 /* Account newly detected lost packet(s) */ 2640 2641 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2642 { 2643 struct tcp_sock *tp = tcp_sk(sk); 2644 2645 if (tcp_is_reno(tp)) { 2646 tcp_mark_head_lost(sk, 1, 1); 2647 } else if (tcp_is_fack(tp)) { 2648 int lost = tp->fackets_out - tp->reordering; 2649 if (lost <= 0) 2650 lost = 1; 2651 tcp_mark_head_lost(sk, lost, 0); 2652 } else { 2653 int sacked_upto = tp->sacked_out - tp->reordering; 2654 if (sacked_upto >= 0) 2655 tcp_mark_head_lost(sk, sacked_upto, 0); 2656 else if (fast_rexmit) 2657 tcp_mark_head_lost(sk, 1, 1); 2658 } 2659 2660 tcp_timeout_skbs(sk); 2661 } 2662 2663 /* CWND moderation, preventing bursts due to too big ACKs 2664 * in dubious situations. 2665 */ 2666 static inline void tcp_moderate_cwnd(struct tcp_sock *tp) 2667 { 2668 tp->snd_cwnd = min(tp->snd_cwnd, 2669 tcp_packets_in_flight(tp) + tcp_max_burst(tp)); 2670 tp->snd_cwnd_stamp = tcp_time_stamp; 2671 } 2672 2673 /* Lower bound on congestion window is slow start threshold 2674 * unless congestion avoidance choice decides to overide it. 2675 */ 2676 static inline u32 tcp_cwnd_min(const struct sock *sk) 2677 { 2678 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 2679 2680 return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; 2681 } 2682 2683 /* Decrease cwnd each second ack. */ 2684 static void tcp_cwnd_down(struct sock *sk, int flag) 2685 { 2686 struct tcp_sock *tp = tcp_sk(sk); 2687 int decr = tp->snd_cwnd_cnt + 1; 2688 2689 if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || 2690 (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { 2691 tp->snd_cwnd_cnt = decr & 1; 2692 decr >>= 1; 2693 2694 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) 2695 tp->snd_cwnd -= decr; 2696 2697 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 2698 tp->snd_cwnd_stamp = tcp_time_stamp; 2699 } 2700 } 2701 2702 /* Nothing was retransmitted or returned timestamp is less 2703 * than timestamp of the first retransmission. 2704 */ 2705 static inline int tcp_packet_delayed(const struct tcp_sock *tp) 2706 { 2707 return !tp->retrans_stamp || 2708 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2709 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); 2710 } 2711 2712 /* Undo procedures. */ 2713 2714 #if FASTRETRANS_DEBUG > 1 2715 static void DBGUNDO(struct sock *sk, const char *msg) 2716 { 2717 struct tcp_sock *tp = tcp_sk(sk); 2718 struct inet_sock *inet = inet_sk(sk); 2719 2720 if (sk->sk_family == AF_INET) { 2721 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2722 msg, 2723 &inet->inet_daddr, ntohs(inet->inet_dport), 2724 tp->snd_cwnd, tcp_left_out(tp), 2725 tp->snd_ssthresh, tp->prior_ssthresh, 2726 tp->packets_out); 2727 } 2728 #if IS_ENABLED(CONFIG_IPV6) 2729 else if (sk->sk_family == AF_INET6) { 2730 struct ipv6_pinfo *np = inet6_sk(sk); 2731 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2732 msg, 2733 &np->daddr, ntohs(inet->inet_dport), 2734 tp->snd_cwnd, tcp_left_out(tp), 2735 tp->snd_ssthresh, tp->prior_ssthresh, 2736 tp->packets_out); 2737 } 2738 #endif 2739 } 2740 #else 2741 #define DBGUNDO(x...) do { } while (0) 2742 #endif 2743 2744 static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) 2745 { 2746 struct tcp_sock *tp = tcp_sk(sk); 2747 2748 if (tp->prior_ssthresh) { 2749 const struct inet_connection_sock *icsk = inet_csk(sk); 2750 2751 if (icsk->icsk_ca_ops->undo_cwnd) 2752 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2753 else 2754 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 2755 2756 if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { 2757 tp->snd_ssthresh = tp->prior_ssthresh; 2758 TCP_ECN_withdraw_cwr(tp); 2759 } 2760 } else { 2761 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2762 } 2763 tp->snd_cwnd_stamp = tcp_time_stamp; 2764 } 2765 2766 static inline int tcp_may_undo(const struct tcp_sock *tp) 2767 { 2768 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2769 } 2770 2771 /* People celebrate: "We love our President!" */ 2772 static bool tcp_try_undo_recovery(struct sock *sk) 2773 { 2774 struct tcp_sock *tp = tcp_sk(sk); 2775 2776 if (tcp_may_undo(tp)) { 2777 int mib_idx; 2778 2779 /* Happy end! We did not retransmit anything 2780 * or our original transmission succeeded. 2781 */ 2782 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2783 tcp_undo_cwr(sk, true); 2784 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2785 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2786 else 2787 mib_idx = LINUX_MIB_TCPFULLUNDO; 2788 2789 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2790 tp->undo_marker = 0; 2791 } 2792 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2793 /* Hold old state until something *above* high_seq 2794 * is ACKed. For Reno it is MUST to prevent false 2795 * fast retransmits (RFC2582). SACK TCP is safe. */ 2796 tcp_moderate_cwnd(tp); 2797 return true; 2798 } 2799 tcp_set_ca_state(sk, TCP_CA_Open); 2800 return false; 2801 } 2802 2803 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2804 static void tcp_try_undo_dsack(struct sock *sk) 2805 { 2806 struct tcp_sock *tp = tcp_sk(sk); 2807 2808 if (tp->undo_marker && !tp->undo_retrans) { 2809 DBGUNDO(sk, "D-SACK"); 2810 tcp_undo_cwr(sk, true); 2811 tp->undo_marker = 0; 2812 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2813 } 2814 } 2815 2816 /* We can clear retrans_stamp when there are no retransmissions in the 2817 * window. It would seem that it is trivially available for us in 2818 * tp->retrans_out, however, that kind of assumptions doesn't consider 2819 * what will happen if errors occur when sending retransmission for the 2820 * second time. ...It could the that such segment has only 2821 * TCPCB_EVER_RETRANS set at the present time. It seems that checking 2822 * the head skb is enough except for some reneging corner cases that 2823 * are not worth the effort. 2824 * 2825 * Main reason for all this complexity is the fact that connection dying 2826 * time now depends on the validity of the retrans_stamp, in particular, 2827 * that successive retransmissions of a segment must not advance 2828 * retrans_stamp under any conditions. 2829 */ 2830 static bool tcp_any_retrans_done(const struct sock *sk) 2831 { 2832 const struct tcp_sock *tp = tcp_sk(sk); 2833 struct sk_buff *skb; 2834 2835 if (tp->retrans_out) 2836 return true; 2837 2838 skb = tcp_write_queue_head(sk); 2839 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2840 return true; 2841 2842 return false; 2843 } 2844 2845 /* Undo during fast recovery after partial ACK. */ 2846 2847 static int tcp_try_undo_partial(struct sock *sk, int acked) 2848 { 2849 struct tcp_sock *tp = tcp_sk(sk); 2850 /* Partial ACK arrived. Force Hoe's retransmit. */ 2851 int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); 2852 2853 if (tcp_may_undo(tp)) { 2854 /* Plain luck! Hole if filled with delayed 2855 * packet, rather than with a retransmit. 2856 */ 2857 if (!tcp_any_retrans_done(sk)) 2858 tp->retrans_stamp = 0; 2859 2860 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2861 2862 DBGUNDO(sk, "Hoe"); 2863 tcp_undo_cwr(sk, false); 2864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2865 2866 /* So... Do not make Hoe's retransmit yet. 2867 * If the first packet was delayed, the rest 2868 * ones are most probably delayed as well. 2869 */ 2870 failed = 0; 2871 } 2872 return failed; 2873 } 2874 2875 /* Undo during loss recovery after partial ACK. */ 2876 static bool tcp_try_undo_loss(struct sock *sk) 2877 { 2878 struct tcp_sock *tp = tcp_sk(sk); 2879 2880 if (tcp_may_undo(tp)) { 2881 struct sk_buff *skb; 2882 tcp_for_write_queue(skb, sk) { 2883 if (skb == tcp_send_head(sk)) 2884 break; 2885 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2886 } 2887 2888 tcp_clear_all_retrans_hints(tp); 2889 2890 DBGUNDO(sk, "partial loss"); 2891 tp->lost_out = 0; 2892 tcp_undo_cwr(sk, true); 2893 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2894 inet_csk(sk)->icsk_retransmits = 0; 2895 tp->undo_marker = 0; 2896 if (tcp_is_sack(tp)) 2897 tcp_set_ca_state(sk, TCP_CA_Open); 2898 return true; 2899 } 2900 return false; 2901 } 2902 2903 static inline void tcp_complete_cwr(struct sock *sk) 2904 { 2905 struct tcp_sock *tp = tcp_sk(sk); 2906 2907 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2908 if (tp->undo_marker) { 2909 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { 2910 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2911 tp->snd_cwnd_stamp = tcp_time_stamp; 2912 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { 2913 /* PRR algorithm. */ 2914 tp->snd_cwnd = tp->snd_ssthresh; 2915 tp->snd_cwnd_stamp = tcp_time_stamp; 2916 } 2917 } 2918 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2919 } 2920 2921 static void tcp_try_keep_open(struct sock *sk) 2922 { 2923 struct tcp_sock *tp = tcp_sk(sk); 2924 int state = TCP_CA_Open; 2925 2926 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) 2927 state = TCP_CA_Disorder; 2928 2929 if (inet_csk(sk)->icsk_ca_state != state) { 2930 tcp_set_ca_state(sk, state); 2931 tp->high_seq = tp->snd_nxt; 2932 } 2933 } 2934 2935 static void tcp_try_to_open(struct sock *sk, int flag) 2936 { 2937 struct tcp_sock *tp = tcp_sk(sk); 2938 2939 tcp_verify_left_out(tp); 2940 2941 if (!tp->frto_counter && !tcp_any_retrans_done(sk)) 2942 tp->retrans_stamp = 0; 2943 2944 if (flag & FLAG_ECE) 2945 tcp_enter_cwr(sk, 1); 2946 2947 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2948 tcp_try_keep_open(sk); 2949 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 2950 tcp_moderate_cwnd(tp); 2951 } else { 2952 tcp_cwnd_down(sk, flag); 2953 } 2954 } 2955 2956 static void tcp_mtup_probe_failed(struct sock *sk) 2957 { 2958 struct inet_connection_sock *icsk = inet_csk(sk); 2959 2960 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2961 icsk->icsk_mtup.probe_size = 0; 2962 } 2963 2964 static void tcp_mtup_probe_success(struct sock *sk) 2965 { 2966 struct tcp_sock *tp = tcp_sk(sk); 2967 struct inet_connection_sock *icsk = inet_csk(sk); 2968 2969 /* FIXME: breaks with very large cwnd */ 2970 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2971 tp->snd_cwnd = tp->snd_cwnd * 2972 tcp_mss_to_mtu(sk, tp->mss_cache) / 2973 icsk->icsk_mtup.probe_size; 2974 tp->snd_cwnd_cnt = 0; 2975 tp->snd_cwnd_stamp = tcp_time_stamp; 2976 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2977 2978 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2979 icsk->icsk_mtup.probe_size = 0; 2980 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2981 } 2982 2983 /* Do a simple retransmit without using the backoff mechanisms in 2984 * tcp_timer. This is used for path mtu discovery. 2985 * The socket is already locked here. 2986 */ 2987 void tcp_simple_retransmit(struct sock *sk) 2988 { 2989 const struct inet_connection_sock *icsk = inet_csk(sk); 2990 struct tcp_sock *tp = tcp_sk(sk); 2991 struct sk_buff *skb; 2992 unsigned int mss = tcp_current_mss(sk); 2993 u32 prior_lost = tp->lost_out; 2994 2995 tcp_for_write_queue(skb, sk) { 2996 if (skb == tcp_send_head(sk)) 2997 break; 2998 if (tcp_skb_seglen(skb) > mss && 2999 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 3000 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 3001 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 3002 tp->retrans_out -= tcp_skb_pcount(skb); 3003 } 3004 tcp_skb_mark_lost_uncond_verify(tp, skb); 3005 } 3006 } 3007 3008 tcp_clear_retrans_hints_partial(tp); 3009 3010 if (prior_lost == tp->lost_out) 3011 return; 3012 3013 if (tcp_is_reno(tp)) 3014 tcp_limit_reno_sacked(tp); 3015 3016 tcp_verify_left_out(tp); 3017 3018 /* Don't muck with the congestion window here. 3019 * Reason is that we do not increase amount of _data_ 3020 * in network, but units changed and effective 3021 * cwnd/ssthresh really reduced now. 3022 */ 3023 if (icsk->icsk_ca_state != TCP_CA_Loss) { 3024 tp->high_seq = tp->snd_nxt; 3025 tp->snd_ssthresh = tcp_current_ssthresh(sk); 3026 tp->prior_ssthresh = 0; 3027 tp->undo_marker = 0; 3028 tcp_set_ca_state(sk, TCP_CA_Loss); 3029 } 3030 tcp_xmit_retransmit_queue(sk); 3031 } 3032 EXPORT_SYMBOL(tcp_simple_retransmit); 3033 3034 /* This function implements the PRR algorithm, specifcally the PRR-SSRB 3035 * (proportional rate reduction with slow start reduction bound) as described in 3036 * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. 3037 * It computes the number of packets to send (sndcnt) based on packets newly 3038 * delivered: 3039 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 3040 * cwnd reductions across a full RTT. 3041 * 2) If packets in flight is lower than ssthresh (such as due to excess 3042 * losses and/or application stalls), do not perform any further cwnd 3043 * reductions, but instead slow start up to ssthresh. 3044 */ 3045 static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, 3046 int fast_rexmit, int flag) 3047 { 3048 struct tcp_sock *tp = tcp_sk(sk); 3049 int sndcnt = 0; 3050 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); 3051 3052 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { 3053 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + 3054 tp->prior_cwnd - 1; 3055 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; 3056 } else { 3057 sndcnt = min_t(int, delta, 3058 max_t(int, tp->prr_delivered - tp->prr_out, 3059 newly_acked_sacked) + 1); 3060 } 3061 3062 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); 3063 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 3064 } 3065 3066 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) 3067 { 3068 struct tcp_sock *tp = tcp_sk(sk); 3069 int mib_idx; 3070 3071 if (tcp_is_reno(tp)) 3072 mib_idx = LINUX_MIB_TCPRENORECOVERY; 3073 else 3074 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 3075 3076 NET_INC_STATS_BH(sock_net(sk), mib_idx); 3077 3078 tp->high_seq = tp->snd_nxt; 3079 tp->prior_ssthresh = 0; 3080 tp->undo_marker = tp->snd_una; 3081 tp->undo_retrans = tp->retrans_out; 3082 3083 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 3084 if (!ece_ack) 3085 tp->prior_ssthresh = tcp_current_ssthresh(sk); 3086 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 3087 TCP_ECN_queue_cwr(tp); 3088 } 3089 3090 tp->bytes_acked = 0; 3091 tp->snd_cwnd_cnt = 0; 3092 tp->prior_cwnd = tp->snd_cwnd; 3093 tp->prr_delivered = 0; 3094 tp->prr_out = 0; 3095 tcp_set_ca_state(sk, TCP_CA_Recovery); 3096 } 3097 3098 /* Process an event, which can update packets-in-flight not trivially. 3099 * Main goal of this function is to calculate new estimate for left_out, 3100 * taking into account both packets sitting in receiver's buffer and 3101 * packets lost by network. 3102 * 3103 * Besides that it does CWND reduction, when packet loss is detected 3104 * and changes state of machine. 3105 * 3106 * It does _not_ decide what to send, it is made in function 3107 * tcp_xmit_retransmit_queue(). 3108 */ 3109 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 3110 int newly_acked_sacked, bool is_dupack, 3111 int flag) 3112 { 3113 struct inet_connection_sock *icsk = inet_csk(sk); 3114 struct tcp_sock *tp = tcp_sk(sk); 3115 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 3116 (tcp_fackets_out(tp) > tp->reordering)); 3117 int fast_rexmit = 0; 3118 3119 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 3120 tp->sacked_out = 0; 3121 if (WARN_ON(!tp->sacked_out && tp->fackets_out)) 3122 tp->fackets_out = 0; 3123 3124 /* Now state machine starts. 3125 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 3126 if (flag & FLAG_ECE) 3127 tp->prior_ssthresh = 0; 3128 3129 /* B. In all the states check for reneging SACKs. */ 3130 if (tcp_check_sack_reneging(sk, flag)) 3131 return; 3132 3133 /* C. Check consistency of the current state. */ 3134 tcp_verify_left_out(tp); 3135 3136 /* D. Check state exit conditions. State can be terminated 3137 * when high_seq is ACKed. */ 3138 if (icsk->icsk_ca_state == TCP_CA_Open) { 3139 WARN_ON(tp->retrans_out != 0); 3140 tp->retrans_stamp = 0; 3141 } else if (!before(tp->snd_una, tp->high_seq)) { 3142 switch (icsk->icsk_ca_state) { 3143 case TCP_CA_Loss: 3144 icsk->icsk_retransmits = 0; 3145 if (tcp_try_undo_recovery(sk)) 3146 return; 3147 break; 3148 3149 case TCP_CA_CWR: 3150 /* CWR is to be held something *above* high_seq 3151 * is ACKed for CWR bit to reach receiver. */ 3152 if (tp->snd_una != tp->high_seq) { 3153 tcp_complete_cwr(sk); 3154 tcp_set_ca_state(sk, TCP_CA_Open); 3155 } 3156 break; 3157 3158 case TCP_CA_Recovery: 3159 if (tcp_is_reno(tp)) 3160 tcp_reset_reno_sack(tp); 3161 if (tcp_try_undo_recovery(sk)) 3162 return; 3163 tcp_complete_cwr(sk); 3164 break; 3165 } 3166 } 3167 3168 /* E. Process state. */ 3169 switch (icsk->icsk_ca_state) { 3170 case TCP_CA_Recovery: 3171 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 3172 if (tcp_is_reno(tp) && is_dupack) 3173 tcp_add_reno_sack(sk); 3174 } else 3175 do_lost = tcp_try_undo_partial(sk, pkts_acked); 3176 break; 3177 case TCP_CA_Loss: 3178 if (flag & FLAG_DATA_ACKED) 3179 icsk->icsk_retransmits = 0; 3180 if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) 3181 tcp_reset_reno_sack(tp); 3182 if (!tcp_try_undo_loss(sk)) { 3183 tcp_moderate_cwnd(tp); 3184 tcp_xmit_retransmit_queue(sk); 3185 return; 3186 } 3187 if (icsk->icsk_ca_state != TCP_CA_Open) 3188 return; 3189 /* Loss is undone; fall through to processing in Open state. */ 3190 default: 3191 if (tcp_is_reno(tp)) { 3192 if (flag & FLAG_SND_UNA_ADVANCED) 3193 tcp_reset_reno_sack(tp); 3194 if (is_dupack) 3195 tcp_add_reno_sack(sk); 3196 } 3197 3198 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3199 tcp_try_undo_dsack(sk); 3200 3201 if (!tcp_time_to_recover(sk, flag)) { 3202 tcp_try_to_open(sk, flag); 3203 return; 3204 } 3205 3206 /* MTU probe failure: don't reduce cwnd */ 3207 if (icsk->icsk_ca_state < TCP_CA_CWR && 3208 icsk->icsk_mtup.probe_size && 3209 tp->snd_una == tp->mtu_probe.probe_seq_start) { 3210 tcp_mtup_probe_failed(sk); 3211 /* Restores the reduction we did in tcp_mtup_probe() */ 3212 tp->snd_cwnd++; 3213 tcp_simple_retransmit(sk); 3214 return; 3215 } 3216 3217 /* Otherwise enter Recovery state */ 3218 tcp_enter_recovery(sk, (flag & FLAG_ECE)); 3219 fast_rexmit = 1; 3220 } 3221 3222 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 3223 tcp_update_scoreboard(sk, fast_rexmit); 3224 tp->prr_delivered += newly_acked_sacked; 3225 tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); 3226 tcp_xmit_retransmit_queue(sk); 3227 } 3228 3229 void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) 3230 { 3231 tcp_rtt_estimator(sk, seq_rtt); 3232 tcp_set_rto(sk); 3233 inet_csk(sk)->icsk_backoff = 0; 3234 } 3235 EXPORT_SYMBOL(tcp_valid_rtt_meas); 3236 3237 /* Read draft-ietf-tcplw-high-performance before mucking 3238 * with this code. (Supersedes RFC1323) 3239 */ 3240 static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 3241 { 3242 /* RTTM Rule: A TSecr value received in a segment is used to 3243 * update the averaged RTT measurement only if the segment 3244 * acknowledges some new data, i.e., only if it advances the 3245 * left edge of the send window. 3246 * 3247 * See draft-ietf-tcplw-high-performance-00, section 3.3. 3248 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 3249 * 3250 * Changed: reset backoff as soon as we see the first valid sample. 3251 * If we do not, we get strongly overestimated rto. With timestamps 3252 * samples are accepted even from very old segments: f.e., when rtt=1 3253 * increases to 8, we retransmit 5 times and after 8 seconds delayed 3254 * answer arrives rto becomes 120 seconds! If at least one of segments 3255 * in window is lost... Voila. --ANK (010210) 3256 */ 3257 struct tcp_sock *tp = tcp_sk(sk); 3258 3259 tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); 3260 } 3261 3262 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) 3263 { 3264 /* We don't have a timestamp. Can only use 3265 * packets that are not retransmitted to determine 3266 * rtt estimates. Also, we must not reset the 3267 * backoff for rto until we get a non-retransmitted 3268 * packet. This allows us to deal with a situation 3269 * where the network delay has increased suddenly. 3270 * I.e. Karn's algorithm. (SIGCOMM '87, p5.) 3271 */ 3272 3273 if (flag & FLAG_RETRANS_DATA_ACKED) 3274 return; 3275 3276 tcp_valid_rtt_meas(sk, seq_rtt); 3277 } 3278 3279 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 3280 const s32 seq_rtt) 3281 { 3282 const struct tcp_sock *tp = tcp_sk(sk); 3283 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 3284 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 3285 tcp_ack_saw_tstamp(sk, flag); 3286 else if (seq_rtt >= 0) 3287 tcp_ack_no_tstamp(sk, seq_rtt, flag); 3288 } 3289 3290 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 3291 { 3292 const struct inet_connection_sock *icsk = inet_csk(sk); 3293 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); 3294 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 3295 } 3296 3297 /* Restart timer after forward progress on connection. 3298 * RFC2988 recommends to restart timer to now+rto. 3299 */ 3300 void tcp_rearm_rto(struct sock *sk) 3301 { 3302 struct tcp_sock *tp = tcp_sk(sk); 3303 3304 if (!tp->packets_out) { 3305 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3306 } else { 3307 u32 rto = inet_csk(sk)->icsk_rto; 3308 /* Offset the time elapsed after installing regular RTO */ 3309 if (tp->early_retrans_delayed) { 3310 struct sk_buff *skb = tcp_write_queue_head(sk); 3311 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; 3312 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); 3313 /* delta may not be positive if the socket is locked 3314 * when the delayed ER timer fires and is rescheduled. 3315 */ 3316 if (delta > 0) 3317 rto = delta; 3318 } 3319 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 3320 TCP_RTO_MAX); 3321 } 3322 tp->early_retrans_delayed = 0; 3323 } 3324 3325 /* This function is called when the delayed ER timer fires. TCP enters 3326 * fast recovery and performs fast-retransmit. 3327 */ 3328 void tcp_resume_early_retransmit(struct sock *sk) 3329 { 3330 struct tcp_sock *tp = tcp_sk(sk); 3331 3332 tcp_rearm_rto(sk); 3333 3334 /* Stop if ER is disabled after the delayed ER timer is scheduled */ 3335 if (!tp->do_early_retrans) 3336 return; 3337 3338 tcp_enter_recovery(sk, false); 3339 tcp_update_scoreboard(sk, 1); 3340 tcp_xmit_retransmit_queue(sk); 3341 } 3342 3343 /* If we get here, the whole TSO packet has not been acked. */ 3344 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3345 { 3346 struct tcp_sock *tp = tcp_sk(sk); 3347 u32 packets_acked; 3348 3349 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 3350 3351 packets_acked = tcp_skb_pcount(skb); 3352 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 3353 return 0; 3354 packets_acked -= tcp_skb_pcount(skb); 3355 3356 if (packets_acked) { 3357 BUG_ON(tcp_skb_pcount(skb) == 0); 3358 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 3359 } 3360 3361 return packets_acked; 3362 } 3363 3364 /* Remove acknowledged frames from the retransmission queue. If our packet 3365 * is before the ack sequence we can discard it as it's confirmed to have 3366 * arrived at the other end. 3367 */ 3368 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, 3369 u32 prior_snd_una) 3370 { 3371 struct tcp_sock *tp = tcp_sk(sk); 3372 const struct inet_connection_sock *icsk = inet_csk(sk); 3373 struct sk_buff *skb; 3374 u32 now = tcp_time_stamp; 3375 int fully_acked = true; 3376 int flag = 0; 3377 u32 pkts_acked = 0; 3378 u32 reord = tp->packets_out; 3379 u32 prior_sacked = tp->sacked_out; 3380 s32 seq_rtt = -1; 3381 s32 ca_seq_rtt = -1; 3382 ktime_t last_ackt = net_invalid_timestamp(); 3383 3384 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3385 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3386 u32 acked_pcount; 3387 u8 sacked = scb->sacked; 3388 3389 /* Determine how many packets and what bytes were acked, tso and else */ 3390 if (after(scb->end_seq, tp->snd_una)) { 3391 if (tcp_skb_pcount(skb) == 1 || 3392 !after(tp->snd_una, scb->seq)) 3393 break; 3394 3395 acked_pcount = tcp_tso_acked(sk, skb); 3396 if (!acked_pcount) 3397 break; 3398 3399 fully_acked = false; 3400 } else { 3401 acked_pcount = tcp_skb_pcount(skb); 3402 } 3403 3404 if (sacked & TCPCB_RETRANS) { 3405 if (sacked & TCPCB_SACKED_RETRANS) 3406 tp->retrans_out -= acked_pcount; 3407 flag |= FLAG_RETRANS_DATA_ACKED; 3408 ca_seq_rtt = -1; 3409 seq_rtt = -1; 3410 if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) 3411 flag |= FLAG_NONHEAD_RETRANS_ACKED; 3412 } else { 3413 ca_seq_rtt = now - scb->when; 3414 last_ackt = skb->tstamp; 3415 if (seq_rtt < 0) { 3416 seq_rtt = ca_seq_rtt; 3417 } 3418 if (!(sacked & TCPCB_SACKED_ACKED)) 3419 reord = min(pkts_acked, reord); 3420 } 3421 3422 if (sacked & TCPCB_SACKED_ACKED) 3423 tp->sacked_out -= acked_pcount; 3424 if (sacked & TCPCB_LOST) 3425 tp->lost_out -= acked_pcount; 3426 3427 tp->packets_out -= acked_pcount; 3428 pkts_acked += acked_pcount; 3429 3430 /* Initial outgoing SYN's get put onto the write_queue 3431 * just like anything else we transmit. It is not 3432 * true data, and if we misinform our callers that 3433 * this ACK acks real data, we will erroneously exit 3434 * connection startup slow start one packet too 3435 * quickly. This is severely frowned upon behavior. 3436 */ 3437 if (!(scb->tcp_flags & TCPHDR_SYN)) { 3438 flag |= FLAG_DATA_ACKED; 3439 } else { 3440 flag |= FLAG_SYN_ACKED; 3441 tp->retrans_stamp = 0; 3442 } 3443 3444 if (!fully_acked) 3445 break; 3446 3447 tcp_unlink_write_queue(skb, sk); 3448 sk_wmem_free_skb(sk, skb); 3449 tp->scoreboard_skb_hint = NULL; 3450 if (skb == tp->retransmit_skb_hint) 3451 tp->retransmit_skb_hint = NULL; 3452 if (skb == tp->lost_skb_hint) 3453 tp->lost_skb_hint = NULL; 3454 } 3455 3456 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) 3457 tp->snd_up = tp->snd_una; 3458 3459 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3460 flag |= FLAG_SACK_RENEGING; 3461 3462 if (flag & FLAG_ACKED) { 3463 const struct tcp_congestion_ops *ca_ops 3464 = inet_csk(sk)->icsk_ca_ops; 3465 3466 if (unlikely(icsk->icsk_mtup.probe_size && 3467 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3468 tcp_mtup_probe_success(sk); 3469 } 3470 3471 tcp_ack_update_rtt(sk, flag, seq_rtt); 3472 tcp_rearm_rto(sk); 3473 3474 if (tcp_is_reno(tp)) { 3475 tcp_remove_reno_sacks(sk, pkts_acked); 3476 } else { 3477 int delta; 3478 3479 /* Non-retransmitted hole got filled? That's reordering */ 3480 if (reord < prior_fackets) 3481 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3482 3483 delta = tcp_is_fack(tp) ? pkts_acked : 3484 prior_sacked - tp->sacked_out; 3485 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3486 } 3487 3488 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 3489 3490 if (ca_ops->pkts_acked) { 3491 s32 rtt_us = -1; 3492 3493 /* Is the ACK triggering packet unambiguous? */ 3494 if (!(flag & FLAG_RETRANS_DATA_ACKED)) { 3495 /* High resolution needed and available? */ 3496 if (ca_ops->flags & TCP_CONG_RTT_STAMP && 3497 !ktime_equal(last_ackt, 3498 net_invalid_timestamp())) 3499 rtt_us = ktime_us_delta(ktime_get_real(), 3500 last_ackt); 3501 else if (ca_seq_rtt >= 0) 3502 rtt_us = jiffies_to_usecs(ca_seq_rtt); 3503 } 3504 3505 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3506 } 3507 } 3508 3509 #if FASTRETRANS_DEBUG > 0 3510 WARN_ON((int)tp->sacked_out < 0); 3511 WARN_ON((int)tp->lost_out < 0); 3512 WARN_ON((int)tp->retrans_out < 0); 3513 if (!tp->packets_out && tcp_is_sack(tp)) { 3514 icsk = inet_csk(sk); 3515 if (tp->lost_out) { 3516 pr_debug("Leak l=%u %d\n", 3517 tp->lost_out, icsk->icsk_ca_state); 3518 tp->lost_out = 0; 3519 } 3520 if (tp->sacked_out) { 3521 pr_debug("Leak s=%u %d\n", 3522 tp->sacked_out, icsk->icsk_ca_state); 3523 tp->sacked_out = 0; 3524 } 3525 if (tp->retrans_out) { 3526 pr_debug("Leak r=%u %d\n", 3527 tp->retrans_out, icsk->icsk_ca_state); 3528 tp->retrans_out = 0; 3529 } 3530 } 3531 #endif 3532 return flag; 3533 } 3534 3535 static void tcp_ack_probe(struct sock *sk) 3536 { 3537 const struct tcp_sock *tp = tcp_sk(sk); 3538 struct inet_connection_sock *icsk = inet_csk(sk); 3539 3540 /* Was it a usable window open? */ 3541 3542 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { 3543 icsk->icsk_backoff = 0; 3544 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3545 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3546 * This function is not for random using! 3547 */ 3548 } else { 3549 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3550 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3551 TCP_RTO_MAX); 3552 } 3553 } 3554 3555 static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3556 { 3557 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3558 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3559 } 3560 3561 static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3562 { 3563 const struct tcp_sock *tp = tcp_sk(sk); 3564 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3565 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); 3566 } 3567 3568 /* Check that window update is acceptable. 3569 * The function assumes that snd_una<=ack<=snd_next. 3570 */ 3571 static inline int tcp_may_update_window(const struct tcp_sock *tp, 3572 const u32 ack, const u32 ack_seq, 3573 const u32 nwin) 3574 { 3575 return after(ack, tp->snd_una) || 3576 after(ack_seq, tp->snd_wl1) || 3577 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); 3578 } 3579 3580 /* Update our send window. 3581 * 3582 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3583 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3584 */ 3585 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3586 u32 ack_seq) 3587 { 3588 struct tcp_sock *tp = tcp_sk(sk); 3589 int flag = 0; 3590 u32 nwin = ntohs(tcp_hdr(skb)->window); 3591 3592 if (likely(!tcp_hdr(skb)->syn)) 3593 nwin <<= tp->rx_opt.snd_wscale; 3594 3595 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3596 flag |= FLAG_WIN_UPDATE; 3597 tcp_update_wl(tp, ack_seq); 3598 3599 if (tp->snd_wnd != nwin) { 3600 tp->snd_wnd = nwin; 3601 3602 /* Note, it is the only place, where 3603 * fast path is recovered for sending TCP. 3604 */ 3605 tp->pred_flags = 0; 3606 tcp_fast_path_check(sk); 3607 3608 if (nwin > tp->max_window) { 3609 tp->max_window = nwin; 3610 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3611 } 3612 } 3613 } 3614 3615 tp->snd_una = ack; 3616 3617 return flag; 3618 } 3619 3620 /* A very conservative spurious RTO response algorithm: reduce cwnd and 3621 * continue in congestion avoidance. 3622 */ 3623 static void tcp_conservative_spur_to_response(struct tcp_sock *tp) 3624 { 3625 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3626 tp->snd_cwnd_cnt = 0; 3627 tp->bytes_acked = 0; 3628 TCP_ECN_queue_cwr(tp); 3629 tcp_moderate_cwnd(tp); 3630 } 3631 3632 /* A conservative spurious RTO response algorithm: reduce cwnd using 3633 * rate halving and continue in congestion avoidance. 3634 */ 3635 static void tcp_ratehalving_spur_to_response(struct sock *sk) 3636 { 3637 tcp_enter_cwr(sk, 0); 3638 } 3639 3640 static void tcp_undo_spur_to_response(struct sock *sk, int flag) 3641 { 3642 if (flag & FLAG_ECE) 3643 tcp_ratehalving_spur_to_response(sk); 3644 else 3645 tcp_undo_cwr(sk, true); 3646 } 3647 3648 /* F-RTO spurious RTO detection algorithm (RFC4138) 3649 * 3650 * F-RTO affects during two new ACKs following RTO (well, almost, see inline 3651 * comments). State (ACK number) is kept in frto_counter. When ACK advances 3652 * window (but not to or beyond highest sequence sent before RTO): 3653 * On First ACK, send two new segments out. 3654 * On Second ACK, RTO was likely spurious. Do spurious response (response 3655 * algorithm is not part of the F-RTO detection algorithm 3656 * given in RFC4138 but can be selected separately). 3657 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss 3658 * and TCP falls back to conventional RTO recovery. F-RTO allows overriding 3659 * of Nagle, this is done using frto_counter states 2 and 3, when a new data 3660 * segment of any size sent during F-RTO, state 2 is upgraded to 3. 3661 * 3662 * Rationale: if the RTO was spurious, new ACKs should arrive from the 3663 * original window even after we transmit two new data segments. 3664 * 3665 * SACK version: 3666 * on first step, wait until first cumulative ACK arrives, then move to 3667 * the second step. In second step, the next ACK decides. 3668 * 3669 * F-RTO is implemented (mainly) in four functions: 3670 * - tcp_use_frto() is used to determine if TCP is can use F-RTO 3671 * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is 3672 * called when tcp_use_frto() showed green light 3673 * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm 3674 * - tcp_enter_frto_loss() is called if there is not enough evidence 3675 * to prove that the RTO is indeed spurious. It transfers the control 3676 * from F-RTO to the conventional RTO recovery 3677 */ 3678 static bool tcp_process_frto(struct sock *sk, int flag) 3679 { 3680 struct tcp_sock *tp = tcp_sk(sk); 3681 3682 tcp_verify_left_out(tp); 3683 3684 /* Duplicate the behavior from Loss state (fastretrans_alert) */ 3685 if (flag & FLAG_DATA_ACKED) 3686 inet_csk(sk)->icsk_retransmits = 0; 3687 3688 if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || 3689 ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) 3690 tp->undo_marker = 0; 3691 3692 if (!before(tp->snd_una, tp->frto_highmark)) { 3693 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3694 return true; 3695 } 3696 3697 if (!tcp_is_sackfrto(tp)) { 3698 /* RFC4138 shortcoming in step 2; should also have case c): 3699 * ACK isn't duplicate nor advances window, e.g., opposite dir 3700 * data, winupdate 3701 */ 3702 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 3703 return true; 3704 3705 if (!(flag & FLAG_DATA_ACKED)) { 3706 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 3707 flag); 3708 return true; 3709 } 3710 } else { 3711 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3712 /* Prevent sending of new data. */ 3713 tp->snd_cwnd = min(tp->snd_cwnd, 3714 tcp_packets_in_flight(tp)); 3715 return true; 3716 } 3717 3718 if ((tp->frto_counter >= 2) && 3719 (!(flag & FLAG_FORWARD_PROGRESS) || 3720 ((flag & FLAG_DATA_SACKED) && 3721 !(flag & FLAG_ONLY_ORIG_SACKED)))) { 3722 /* RFC4138 shortcoming (see comment above) */ 3723 if (!(flag & FLAG_FORWARD_PROGRESS) && 3724 (flag & FLAG_NOT_DUP)) 3725 return true; 3726 3727 tcp_enter_frto_loss(sk, 3, flag); 3728 return true; 3729 } 3730 } 3731 3732 if (tp->frto_counter == 1) { 3733 /* tcp_may_send_now needs to see updated state */ 3734 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 3735 tp->frto_counter = 2; 3736 3737 if (!tcp_may_send_now(sk)) 3738 tcp_enter_frto_loss(sk, 2, flag); 3739 3740 return true; 3741 } else { 3742 switch (sysctl_tcp_frto_response) { 3743 case 2: 3744 tcp_undo_spur_to_response(sk, flag); 3745 break; 3746 case 1: 3747 tcp_conservative_spur_to_response(tp); 3748 break; 3749 default: 3750 tcp_ratehalving_spur_to_response(sk); 3751 break; 3752 } 3753 tp->frto_counter = 0; 3754 tp->undo_marker = 0; 3755 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); 3756 } 3757 return false; 3758 } 3759 3760 /* This routine deals with incoming acks, but not outgoing ones. */ 3761 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3762 { 3763 struct inet_connection_sock *icsk = inet_csk(sk); 3764 struct tcp_sock *tp = tcp_sk(sk); 3765 u32 prior_snd_una = tp->snd_una; 3766 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3767 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3768 bool is_dupack = false; 3769 u32 prior_in_flight; 3770 u32 prior_fackets; 3771 int prior_packets; 3772 int prior_sacked = tp->sacked_out; 3773 int pkts_acked = 0; 3774 int newly_acked_sacked = 0; 3775 bool frto_cwnd = false; 3776 3777 /* If the ack is older than previous acks 3778 * then we can probably ignore it. 3779 */ 3780 if (before(ack, prior_snd_una)) 3781 goto old_ack; 3782 3783 /* If the ack includes data we haven't sent yet, discard 3784 * this segment (RFC793 Section 3.9). 3785 */ 3786 if (after(ack, tp->snd_nxt)) 3787 goto invalid_ack; 3788 3789 if (tp->early_retrans_delayed) 3790 tcp_rearm_rto(sk); 3791 3792 if (after(ack, prior_snd_una)) 3793 flag |= FLAG_SND_UNA_ADVANCED; 3794 3795 if (sysctl_tcp_abc) { 3796 if (icsk->icsk_ca_state < TCP_CA_CWR) 3797 tp->bytes_acked += ack - prior_snd_una; 3798 else if (icsk->icsk_ca_state == TCP_CA_Loss) 3799 /* we assume just one segment left network */ 3800 tp->bytes_acked += min(ack - prior_snd_una, 3801 tp->mss_cache); 3802 } 3803 3804 prior_fackets = tp->fackets_out; 3805 prior_in_flight = tcp_packets_in_flight(tp); 3806 3807 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3808 /* Window is constant, pure forward advance. 3809 * No more checks are required. 3810 * Note, we use the fact that SND.UNA>=SND.WL2. 3811 */ 3812 tcp_update_wl(tp, ack_seq); 3813 tp->snd_una = ack; 3814 flag |= FLAG_WIN_UPDATE; 3815 3816 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3817 3818 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); 3819 } else { 3820 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3821 flag |= FLAG_DATA; 3822 else 3823 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3824 3825 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3826 3827 if (TCP_SKB_CB(skb)->sacked) 3828 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3829 3830 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 3831 flag |= FLAG_ECE; 3832 3833 tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 3834 } 3835 3836 /* We passed data and got it acked, remove any soft error 3837 * log. Something worked... 3838 */ 3839 sk->sk_err_soft = 0; 3840 icsk->icsk_probes_out = 0; 3841 tp->rcv_tstamp = tcp_time_stamp; 3842 prior_packets = tp->packets_out; 3843 if (!prior_packets) 3844 goto no_queue; 3845 3846 /* See if we can take anything off of the retransmit queue. */ 3847 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3848 3849 pkts_acked = prior_packets - tp->packets_out; 3850 newly_acked_sacked = (prior_packets - prior_sacked) - 3851 (tp->packets_out - tp->sacked_out); 3852 3853 if (tp->frto_counter) 3854 frto_cwnd = tcp_process_frto(sk, flag); 3855 /* Guarantee sacktag reordering detection against wrap-arounds */ 3856 if (before(tp->frto_highmark, tp->snd_una)) 3857 tp->frto_highmark = 0; 3858 3859 if (tcp_ack_is_dubious(sk, flag)) { 3860 /* Advance CWND, if state allows this. */ 3861 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 3862 tcp_may_raise_cwnd(sk, flag)) 3863 tcp_cong_avoid(sk, ack, prior_in_flight); 3864 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3865 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3866 is_dupack, flag); 3867 } else { 3868 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3869 tcp_cong_avoid(sk, ack, prior_in_flight); 3870 } 3871 3872 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3873 dst_confirm(__sk_dst_get(sk)); 3874 3875 return 1; 3876 3877 no_queue: 3878 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3879 if (flag & FLAG_DSACKING_ACK) 3880 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3881 is_dupack, flag); 3882 /* If this ack opens up a zero window, clear backoff. It was 3883 * being used to time the probes, and is probably far higher than 3884 * it needs to be for normal retransmission. 3885 */ 3886 if (tcp_send_head(sk)) 3887 tcp_ack_probe(sk); 3888 return 1; 3889 3890 invalid_ack: 3891 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3892 return -1; 3893 3894 old_ack: 3895 /* If data was SACKed, tag it and see if we should send more data. 3896 * If data was DSACKed, see if we can undo a cwnd reduction. 3897 */ 3898 if (TCP_SKB_CB(skb)->sacked) { 3899 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3900 newly_acked_sacked = tp->sacked_out - prior_sacked; 3901 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3902 is_dupack, flag); 3903 } 3904 3905 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3906 return 0; 3907 } 3908 3909 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 3910 * But, this can also be called on packets in the established flow when 3911 * the fast version below fails. 3912 */ 3913 void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, 3914 const u8 **hvpp, int estab) 3915 { 3916 const unsigned char *ptr; 3917 const struct tcphdr *th = tcp_hdr(skb); 3918 int length = (th->doff * 4) - sizeof(struct tcphdr); 3919 3920 ptr = (const unsigned char *)(th + 1); 3921 opt_rx->saw_tstamp = 0; 3922 3923 while (length > 0) { 3924 int opcode = *ptr++; 3925 int opsize; 3926 3927 switch (opcode) { 3928 case TCPOPT_EOL: 3929 return; 3930 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 3931 length--; 3932 continue; 3933 default: 3934 opsize = *ptr++; 3935 if (opsize < 2) /* "silly options" */ 3936 return; 3937 if (opsize > length) 3938 return; /* don't parse partial options */ 3939 switch (opcode) { 3940 case TCPOPT_MSS: 3941 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 3942 u16 in_mss = get_unaligned_be16(ptr); 3943 if (in_mss) { 3944 if (opt_rx->user_mss && 3945 opt_rx->user_mss < in_mss) 3946 in_mss = opt_rx->user_mss; 3947 opt_rx->mss_clamp = in_mss; 3948 } 3949 } 3950 break; 3951 case TCPOPT_WINDOW: 3952 if (opsize == TCPOLEN_WINDOW && th->syn && 3953 !estab && sysctl_tcp_window_scaling) { 3954 __u8 snd_wscale = *(__u8 *)ptr; 3955 opt_rx->wscale_ok = 1; 3956 if (snd_wscale > 14) { 3957 net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", 3958 __func__, 3959 snd_wscale); 3960 snd_wscale = 14; 3961 } 3962 opt_rx->snd_wscale = snd_wscale; 3963 } 3964 break; 3965 case TCPOPT_TIMESTAMP: 3966 if ((opsize == TCPOLEN_TIMESTAMP) && 3967 ((estab && opt_rx->tstamp_ok) || 3968 (!estab && sysctl_tcp_timestamps))) { 3969 opt_rx->saw_tstamp = 1; 3970 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3971 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3972 } 3973 break; 3974 case TCPOPT_SACK_PERM: 3975 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3976 !estab && sysctl_tcp_sack) { 3977 opt_rx->sack_ok = TCP_SACK_SEEN; 3978 tcp_sack_reset(opt_rx); 3979 } 3980 break; 3981 3982 case TCPOPT_SACK: 3983 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 3984 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 3985 opt_rx->sack_ok) { 3986 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 3987 } 3988 break; 3989 #ifdef CONFIG_TCP_MD5SIG 3990 case TCPOPT_MD5SIG: 3991 /* 3992 * The MD5 Hash has already been 3993 * checked (see tcp_v{4,6}_do_rcv()). 3994 */ 3995 break; 3996 #endif 3997 case TCPOPT_COOKIE: 3998 /* This option is variable length. 3999 */ 4000 switch (opsize) { 4001 case TCPOLEN_COOKIE_BASE: 4002 /* not yet implemented */ 4003 break; 4004 case TCPOLEN_COOKIE_PAIR: 4005 /* not yet implemented */ 4006 break; 4007 case TCPOLEN_COOKIE_MIN+0: 4008 case TCPOLEN_COOKIE_MIN+2: 4009 case TCPOLEN_COOKIE_MIN+4: 4010 case TCPOLEN_COOKIE_MIN+6: 4011 case TCPOLEN_COOKIE_MAX: 4012 /* 16-bit multiple */ 4013 opt_rx->cookie_plus = opsize; 4014 *hvpp = ptr; 4015 break; 4016 default: 4017 /* ignore option */ 4018 break; 4019 } 4020 break; 4021 } 4022 4023 ptr += opsize-2; 4024 length -= opsize; 4025 } 4026 } 4027 } 4028 EXPORT_SYMBOL(tcp_parse_options); 4029 4030 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 4031 { 4032 const __be32 *ptr = (const __be32 *)(th + 1); 4033 4034 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4035 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 4036 tp->rx_opt.saw_tstamp = 1; 4037 ++ptr; 4038 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4039 ++ptr; 4040 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 4041 return true; 4042 } 4043 return false; 4044 } 4045 4046 /* Fast parse options. This hopes to only see timestamps. 4047 * If it is wrong it falls back on tcp_parse_options(). 4048 */ 4049 static bool tcp_fast_parse_options(const struct sk_buff *skb, 4050 const struct tcphdr *th, 4051 struct tcp_sock *tp, const u8 **hvpp) 4052 { 4053 /* In the spirit of fast parsing, compare doff directly to constant 4054 * values. Because equality is used, short doff can be ignored here. 4055 */ 4056 if (th->doff == (sizeof(*th) / 4)) { 4057 tp->rx_opt.saw_tstamp = 0; 4058 return false; 4059 } else if (tp->rx_opt.tstamp_ok && 4060 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 4061 if (tcp_parse_aligned_timestamp(tp, th)) 4062 return true; 4063 } 4064 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 4065 return true; 4066 } 4067 4068 #ifdef CONFIG_TCP_MD5SIG 4069 /* 4070 * Parse MD5 Signature option 4071 */ 4072 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 4073 { 4074 int length = (th->doff << 2) - sizeof(*th); 4075 const u8 *ptr = (const u8 *)(th + 1); 4076 4077 /* If the TCP option is too short, we can short cut */ 4078 if (length < TCPOLEN_MD5SIG) 4079 return NULL; 4080 4081 while (length > 0) { 4082 int opcode = *ptr++; 4083 int opsize; 4084 4085 switch(opcode) { 4086 case TCPOPT_EOL: 4087 return NULL; 4088 case TCPOPT_NOP: 4089 length--; 4090 continue; 4091 default: 4092 opsize = *ptr++; 4093 if (opsize < 2 || opsize > length) 4094 return NULL; 4095 if (opcode == TCPOPT_MD5SIG) 4096 return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 4097 } 4098 ptr += opsize - 2; 4099 length -= opsize; 4100 } 4101 return NULL; 4102 } 4103 EXPORT_SYMBOL(tcp_parse_md5sig_option); 4104 #endif 4105 4106 static inline void tcp_store_ts_recent(struct tcp_sock *tp) 4107 { 4108 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 4109 tp->rx_opt.ts_recent_stamp = get_seconds(); 4110 } 4111 4112 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 4113 { 4114 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 4115 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 4116 * extra check below makes sure this can only happen 4117 * for pure ACK frames. -DaveM 4118 * 4119 * Not only, also it occurs for expired timestamps. 4120 */ 4121 4122 if (tcp_paws_check(&tp->rx_opt, 0)) 4123 tcp_store_ts_recent(tp); 4124 } 4125 } 4126 4127 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 4128 * 4129 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 4130 * it can pass through stack. So, the following predicate verifies that 4131 * this segment is not used for anything but congestion avoidance or 4132 * fast retransmit. Moreover, we even are able to eliminate most of such 4133 * second order effects, if we apply some small "replay" window (~RTO) 4134 * to timestamp space. 4135 * 4136 * All these measures still do not guarantee that we reject wrapped ACKs 4137 * on networks with high bandwidth, when sequence space is recycled fastly, 4138 * but it guarantees that such events will be very rare and do not affect 4139 * connection seriously. This doesn't look nice, but alas, PAWS is really 4140 * buggy extension. 4141 * 4142 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 4143 * states that events when retransmit arrives after original data are rare. 4144 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 4145 * the biggest problem on large power networks even with minor reordering. 4146 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 4147 * up to bandwidth of 18Gigabit/sec. 8) ] 4148 */ 4149 4150 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 4151 { 4152 const struct tcp_sock *tp = tcp_sk(sk); 4153 const struct tcphdr *th = tcp_hdr(skb); 4154 u32 seq = TCP_SKB_CB(skb)->seq; 4155 u32 ack = TCP_SKB_CB(skb)->ack_seq; 4156 4157 return (/* 1. Pure ACK with correct sequence number. */ 4158 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 4159 4160 /* 2. ... and duplicate ACK. */ 4161 ack == tp->snd_una && 4162 4163 /* 3. ... and does not update window. */ 4164 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 4165 4166 /* 4. ... and sits in replay window. */ 4167 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 4168 } 4169 4170 static inline int tcp_paws_discard(const struct sock *sk, 4171 const struct sk_buff *skb) 4172 { 4173 const struct tcp_sock *tp = tcp_sk(sk); 4174 4175 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && 4176 !tcp_disordered_ack(sk, skb); 4177 } 4178 4179 /* Check segment sequence number for validity. 4180 * 4181 * Segment controls are considered valid, if the segment 4182 * fits to the window after truncation to the window. Acceptability 4183 * of data (and SYN, FIN, of course) is checked separately. 4184 * See tcp_data_queue(), for example. 4185 * 4186 * Also, controls (RST is main one) are accepted using RCV.WUP instead 4187 * of RCV.NXT. Peer still did not advance his SND.UNA when we 4188 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 4189 * (borrowed from freebsd) 4190 */ 4191 4192 static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 4193 { 4194 return !before(end_seq, tp->rcv_wup) && 4195 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 4196 } 4197 4198 /* When we get a reset we do this. */ 4199 static void tcp_reset(struct sock *sk) 4200 { 4201 /* We want the right error as BSD sees it (and indeed as we do). */ 4202 switch (sk->sk_state) { 4203 case TCP_SYN_SENT: 4204 sk->sk_err = ECONNREFUSED; 4205 break; 4206 case TCP_CLOSE_WAIT: 4207 sk->sk_err = EPIPE; 4208 break; 4209 case TCP_CLOSE: 4210 return; 4211 default: 4212 sk->sk_err = ECONNRESET; 4213 } 4214 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4215 smp_wmb(); 4216 4217 if (!sock_flag(sk, SOCK_DEAD)) 4218 sk->sk_error_report(sk); 4219 4220 tcp_done(sk); 4221 } 4222 4223 /* 4224 * Process the FIN bit. This now behaves as it is supposed to work 4225 * and the FIN takes effect when it is validly part of sequence 4226 * space. Not before when we get holes. 4227 * 4228 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 4229 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 4230 * TIME-WAIT) 4231 * 4232 * If we are in FINWAIT-1, a received FIN indicates simultaneous 4233 * close and we go into CLOSING (and later onto TIME-WAIT) 4234 * 4235 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4236 */ 4237 static void tcp_fin(struct sock *sk) 4238 { 4239 struct tcp_sock *tp = tcp_sk(sk); 4240 4241 inet_csk_schedule_ack(sk); 4242 4243 sk->sk_shutdown |= RCV_SHUTDOWN; 4244 sock_set_flag(sk, SOCK_DONE); 4245 4246 switch (sk->sk_state) { 4247 case TCP_SYN_RECV: 4248 case TCP_ESTABLISHED: 4249 /* Move to CLOSE_WAIT */ 4250 tcp_set_state(sk, TCP_CLOSE_WAIT); 4251 inet_csk(sk)->icsk_ack.pingpong = 1; 4252 break; 4253 4254 case TCP_CLOSE_WAIT: 4255 case TCP_CLOSING: 4256 /* Received a retransmission of the FIN, do 4257 * nothing. 4258 */ 4259 break; 4260 case TCP_LAST_ACK: 4261 /* RFC793: Remain in the LAST-ACK state. */ 4262 break; 4263 4264 case TCP_FIN_WAIT1: 4265 /* This case occurs when a simultaneous close 4266 * happens, we must ack the received FIN and 4267 * enter the CLOSING state. 4268 */ 4269 tcp_send_ack(sk); 4270 tcp_set_state(sk, TCP_CLOSING); 4271 break; 4272 case TCP_FIN_WAIT2: 4273 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 4274 tcp_send_ack(sk); 4275 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 4276 break; 4277 default: 4278 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4279 * cases we should never reach this piece of code. 4280 */ 4281 pr_err("%s: Impossible, sk->sk_state=%d\n", 4282 __func__, sk->sk_state); 4283 break; 4284 } 4285 4286 /* It _is_ possible, that we have something out-of-order _after_ FIN. 4287 * Probably, we should reset in this case. For now drop them. 4288 */ 4289 __skb_queue_purge(&tp->out_of_order_queue); 4290 if (tcp_is_sack(tp)) 4291 tcp_sack_reset(&tp->rx_opt); 4292 sk_mem_reclaim(sk); 4293 4294 if (!sock_flag(sk, SOCK_DEAD)) { 4295 sk->sk_state_change(sk); 4296 4297 /* Do not send POLL_HUP for half duplex close. */ 4298 if (sk->sk_shutdown == SHUTDOWN_MASK || 4299 sk->sk_state == TCP_CLOSE) 4300 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 4301 else 4302 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 4303 } 4304 } 4305 4306 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4307 u32 end_seq) 4308 { 4309 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4310 if (before(seq, sp->start_seq)) 4311 sp->start_seq = seq; 4312 if (after(end_seq, sp->end_seq)) 4313 sp->end_seq = end_seq; 4314 return true; 4315 } 4316 return false; 4317 } 4318 4319 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4320 { 4321 struct tcp_sock *tp = tcp_sk(sk); 4322 4323 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4324 int mib_idx; 4325 4326 if (before(seq, tp->rcv_nxt)) 4327 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 4328 else 4329 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4330 4331 NET_INC_STATS_BH(sock_net(sk), mib_idx); 4332 4333 tp->rx_opt.dsack = 1; 4334 tp->duplicate_sack[0].start_seq = seq; 4335 tp->duplicate_sack[0].end_seq = end_seq; 4336 } 4337 } 4338 4339 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 4340 { 4341 struct tcp_sock *tp = tcp_sk(sk); 4342 4343 if (!tp->rx_opt.dsack) 4344 tcp_dsack_set(sk, seq, end_seq); 4345 else 4346 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4347 } 4348 4349 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 4350 { 4351 struct tcp_sock *tp = tcp_sk(sk); 4352 4353 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4354 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4355 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4356 tcp_enter_quickack_mode(sk); 4357 4358 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4359 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4360 4361 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4362 end_seq = tp->rcv_nxt; 4363 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 4364 } 4365 } 4366 4367 tcp_send_ack(sk); 4368 } 4369 4370 /* These routines update the SACK block as out-of-order packets arrive or 4371 * in-order packets close up the sequence space. 4372 */ 4373 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 4374 { 4375 int this_sack; 4376 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4377 struct tcp_sack_block *swalk = sp + 1; 4378 4379 /* See if the recent change to the first SACK eats into 4380 * or hits the sequence space of other SACK blocks, if so coalesce. 4381 */ 4382 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 4383 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 4384 int i; 4385 4386 /* Zap SWALK, by moving every further SACK up by one slot. 4387 * Decrease num_sacks. 4388 */ 4389 tp->rx_opt.num_sacks--; 4390 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 4391 sp[i] = sp[i + 1]; 4392 continue; 4393 } 4394 this_sack++, swalk++; 4395 } 4396 } 4397 4398 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 4399 { 4400 struct tcp_sock *tp = tcp_sk(sk); 4401 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4402 int cur_sacks = tp->rx_opt.num_sacks; 4403 int this_sack; 4404 4405 if (!cur_sacks) 4406 goto new_sack; 4407 4408 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 4409 if (tcp_sack_extend(sp, seq, end_seq)) { 4410 /* Rotate this_sack to the first one. */ 4411 for (; this_sack > 0; this_sack--, sp--) 4412 swap(*sp, *(sp - 1)); 4413 if (cur_sacks > 1) 4414 tcp_sack_maybe_coalesce(tp); 4415 return; 4416 } 4417 } 4418 4419 /* Could not find an adjacent existing SACK, build a new one, 4420 * put it at the front, and shift everyone else down. We 4421 * always know there is at least one SACK present already here. 4422 * 4423 * If the sack array is full, forget about the last one. 4424 */ 4425 if (this_sack >= TCP_NUM_SACKS) { 4426 this_sack--; 4427 tp->rx_opt.num_sacks--; 4428 sp--; 4429 } 4430 for (; this_sack > 0; this_sack--, sp--) 4431 *sp = *(sp - 1); 4432 4433 new_sack: 4434 /* Build the new head SACK, and we're done. */ 4435 sp->start_seq = seq; 4436 sp->end_seq = end_seq; 4437 tp->rx_opt.num_sacks++; 4438 } 4439 4440 /* RCV.NXT advances, some SACKs should be eaten. */ 4441 4442 static void tcp_sack_remove(struct tcp_sock *tp) 4443 { 4444 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4445 int num_sacks = tp->rx_opt.num_sacks; 4446 int this_sack; 4447 4448 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 4449 if (skb_queue_empty(&tp->out_of_order_queue)) { 4450 tp->rx_opt.num_sacks = 0; 4451 return; 4452 } 4453 4454 for (this_sack = 0; this_sack < num_sacks;) { 4455 /* Check if the start of the sack is covered by RCV.NXT. */ 4456 if (!before(tp->rcv_nxt, sp->start_seq)) { 4457 int i; 4458 4459 /* RCV.NXT must cover all the block! */ 4460 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 4461 4462 /* Zap this SACK, by moving forward any other SACKS. */ 4463 for (i=this_sack+1; i < num_sacks; i++) 4464 tp->selective_acks[i-1] = tp->selective_acks[i]; 4465 num_sacks--; 4466 continue; 4467 } 4468 this_sack++; 4469 sp++; 4470 } 4471 tp->rx_opt.num_sacks = num_sacks; 4472 } 4473 4474 /* This one checks to see if we can put data from the 4475 * out_of_order queue into the receive_queue. 4476 */ 4477 static void tcp_ofo_queue(struct sock *sk) 4478 { 4479 struct tcp_sock *tp = tcp_sk(sk); 4480 __u32 dsack_high = tp->rcv_nxt; 4481 struct sk_buff *skb; 4482 4483 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { 4484 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 4485 break; 4486 4487 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 4488 __u32 dsack = dsack_high; 4489 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 4490 dsack_high = TCP_SKB_CB(skb)->end_seq; 4491 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 4492 } 4493 4494 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4495 SOCK_DEBUG(sk, "ofo packet was already received\n"); 4496 __skb_unlink(skb, &tp->out_of_order_queue); 4497 __kfree_skb(skb); 4498 continue; 4499 } 4500 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 4501 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4502 TCP_SKB_CB(skb)->end_seq); 4503 4504 __skb_unlink(skb, &tp->out_of_order_queue); 4505 __skb_queue_tail(&sk->sk_receive_queue, skb); 4506 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4507 if (tcp_hdr(skb)->fin) 4508 tcp_fin(sk); 4509 } 4510 } 4511 4512 static bool tcp_prune_ofo_queue(struct sock *sk); 4513 static int tcp_prune_queue(struct sock *sk); 4514 4515 static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4516 { 4517 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4518 !sk_rmem_schedule(sk, size)) { 4519 4520 if (tcp_prune_queue(sk) < 0) 4521 return -1; 4522 4523 if (!sk_rmem_schedule(sk, size)) { 4524 if (!tcp_prune_ofo_queue(sk)) 4525 return -1; 4526 4527 if (!sk_rmem_schedule(sk, size)) 4528 return -1; 4529 } 4530 } 4531 return 0; 4532 } 4533 4534 /** 4535 * tcp_try_coalesce - try to merge skb to prior one 4536 * @sk: socket 4537 * @to: prior buffer 4538 * @from: buffer to add in queue 4539 * @fragstolen: pointer to boolean 4540 * 4541 * Before queueing skb @from after @to, try to merge them 4542 * to reduce overall memory use and queue lengths, if cost is small. 4543 * Packets in ofo or receive queues can stay a long time. 4544 * Better try to coalesce them right now to avoid future collapses. 4545 * Returns true if caller should free @from instead of queueing it 4546 */ 4547 static bool tcp_try_coalesce(struct sock *sk, 4548 struct sk_buff *to, 4549 struct sk_buff *from, 4550 bool *fragstolen) 4551 { 4552 int delta; 4553 4554 *fragstolen = false; 4555 4556 if (tcp_hdr(from)->fin) 4557 return false; 4558 4559 /* Its possible this segment overlaps with prior segment in queue */ 4560 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) 4561 return false; 4562 4563 if (!skb_try_coalesce(to, from, fragstolen, &delta)) 4564 return false; 4565 4566 atomic_add(delta, &sk->sk_rmem_alloc); 4567 sk_mem_charge(sk, delta); 4568 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4569 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4570 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4571 return true; 4572 } 4573 4574 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4575 { 4576 struct tcp_sock *tp = tcp_sk(sk); 4577 struct sk_buff *skb1; 4578 u32 seq, end_seq; 4579 4580 TCP_ECN_check_ce(tp, skb); 4581 4582 if (tcp_try_rmem_schedule(sk, skb->truesize)) { 4583 /* TODO: should increment a counter */ 4584 __kfree_skb(skb); 4585 return; 4586 } 4587 4588 /* Disable header prediction. */ 4589 tp->pred_flags = 0; 4590 inet_csk_schedule_ack(sk); 4591 4592 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4593 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4594 4595 skb1 = skb_peek_tail(&tp->out_of_order_queue); 4596 if (!skb1) { 4597 /* Initial out of order segment, build 1 SACK. */ 4598 if (tcp_is_sack(tp)) { 4599 tp->rx_opt.num_sacks = 1; 4600 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 4601 tp->selective_acks[0].end_seq = 4602 TCP_SKB_CB(skb)->end_seq; 4603 } 4604 __skb_queue_head(&tp->out_of_order_queue, skb); 4605 goto end; 4606 } 4607 4608 seq = TCP_SKB_CB(skb)->seq; 4609 end_seq = TCP_SKB_CB(skb)->end_seq; 4610 4611 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4612 bool fragstolen; 4613 4614 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { 4615 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4616 } else { 4617 kfree_skb_partial(skb, fragstolen); 4618 skb = NULL; 4619 } 4620 4621 if (!tp->rx_opt.num_sacks || 4622 tp->selective_acks[0].end_seq != seq) 4623 goto add_sack; 4624 4625 /* Common case: data arrive in order after hole. */ 4626 tp->selective_acks[0].end_seq = end_seq; 4627 goto end; 4628 } 4629 4630 /* Find place to insert this segment. */ 4631 while (1) { 4632 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4633 break; 4634 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { 4635 skb1 = NULL; 4636 break; 4637 } 4638 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); 4639 } 4640 4641 /* Do skb overlap to previous one? */ 4642 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4643 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4644 /* All the bits are present. Drop. */ 4645 __kfree_skb(skb); 4646 skb = NULL; 4647 tcp_dsack_set(sk, seq, end_seq); 4648 goto add_sack; 4649 } 4650 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4651 /* Partial overlap. */ 4652 tcp_dsack_set(sk, seq, 4653 TCP_SKB_CB(skb1)->end_seq); 4654 } else { 4655 if (skb_queue_is_first(&tp->out_of_order_queue, 4656 skb1)) 4657 skb1 = NULL; 4658 else 4659 skb1 = skb_queue_prev( 4660 &tp->out_of_order_queue, 4661 skb1); 4662 } 4663 } 4664 if (!skb1) 4665 __skb_queue_head(&tp->out_of_order_queue, skb); 4666 else 4667 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4668 4669 /* And clean segments covered by new one as whole. */ 4670 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { 4671 skb1 = skb_queue_next(&tp->out_of_order_queue, skb); 4672 4673 if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) 4674 break; 4675 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4676 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4677 end_seq); 4678 break; 4679 } 4680 __skb_unlink(skb1, &tp->out_of_order_queue); 4681 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4682 TCP_SKB_CB(skb1)->end_seq); 4683 __kfree_skb(skb1); 4684 } 4685 4686 add_sack: 4687 if (tcp_is_sack(tp)) 4688 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4689 end: 4690 if (skb) 4691 skb_set_owner_r(skb, sk); 4692 } 4693 4694 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4695 bool *fragstolen) 4696 { 4697 int eaten; 4698 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4699 4700 __skb_pull(skb, hdrlen); 4701 eaten = (tail && 4702 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; 4703 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4704 if (!eaten) { 4705 __skb_queue_tail(&sk->sk_receive_queue, skb); 4706 skb_set_owner_r(skb, sk); 4707 } 4708 return eaten; 4709 } 4710 4711 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4712 { 4713 struct sk_buff *skb; 4714 struct tcphdr *th; 4715 bool fragstolen; 4716 4717 if (tcp_try_rmem_schedule(sk, size + sizeof(*th))) 4718 goto err; 4719 4720 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 4721 if (!skb) 4722 goto err; 4723 4724 th = (struct tcphdr *)skb_put(skb, sizeof(*th)); 4725 skb_reset_transport_header(skb); 4726 memset(th, 0, sizeof(*th)); 4727 4728 if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) 4729 goto err_free; 4730 4731 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 4732 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 4733 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 4734 4735 if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { 4736 WARN_ON_ONCE(fragstolen); /* should not happen */ 4737 __kfree_skb(skb); 4738 } 4739 return size; 4740 4741 err_free: 4742 kfree_skb(skb); 4743 err: 4744 return -ENOMEM; 4745 } 4746 4747 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4748 { 4749 const struct tcphdr *th = tcp_hdr(skb); 4750 struct tcp_sock *tp = tcp_sk(sk); 4751 int eaten = -1; 4752 bool fragstolen = false; 4753 4754 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4755 goto drop; 4756 4757 skb_dst_drop(skb); 4758 __skb_pull(skb, th->doff * 4); 4759 4760 TCP_ECN_accept_cwr(tp, skb); 4761 4762 tp->rx_opt.dsack = 0; 4763 4764 /* Queue data for delivery to the user. 4765 * Packets in sequence go to the receive queue. 4766 * Out of sequence packets to the out_of_order_queue. 4767 */ 4768 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4769 if (tcp_receive_window(tp) == 0) 4770 goto out_of_window; 4771 4772 /* Ok. In sequence. In window. */ 4773 if (tp->ucopy.task == current && 4774 tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && 4775 sock_owned_by_user(sk) && !tp->urg_data) { 4776 int chunk = min_t(unsigned int, skb->len, 4777 tp->ucopy.len); 4778 4779 __set_current_state(TASK_RUNNING); 4780 4781 local_bh_enable(); 4782 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 4783 tp->ucopy.len -= chunk; 4784 tp->copied_seq += chunk; 4785 eaten = (chunk == skb->len); 4786 tcp_rcv_space_adjust(sk); 4787 } 4788 local_bh_disable(); 4789 } 4790 4791 if (eaten <= 0) { 4792 queue_and_out: 4793 if (eaten < 0 && 4794 tcp_try_rmem_schedule(sk, skb->truesize)) 4795 goto drop; 4796 4797 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4798 } 4799 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4800 if (skb->len) 4801 tcp_event_data_recv(sk, skb); 4802 if (th->fin) 4803 tcp_fin(sk); 4804 4805 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4806 tcp_ofo_queue(sk); 4807 4808 /* RFC2581. 4.2. SHOULD send immediate ACK, when 4809 * gap in queue is filled. 4810 */ 4811 if (skb_queue_empty(&tp->out_of_order_queue)) 4812 inet_csk(sk)->icsk_ack.pingpong = 0; 4813 } 4814 4815 if (tp->rx_opt.num_sacks) 4816 tcp_sack_remove(tp); 4817 4818 tcp_fast_path_check(sk); 4819 4820 if (eaten > 0) 4821 kfree_skb_partial(skb, fragstolen); 4822 else if (!sock_flag(sk, SOCK_DEAD)) 4823 sk->sk_data_ready(sk, 0); 4824 return; 4825 } 4826 4827 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4828 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4829 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4830 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4831 4832 out_of_window: 4833 tcp_enter_quickack_mode(sk); 4834 inet_csk_schedule_ack(sk); 4835 drop: 4836 __kfree_skb(skb); 4837 return; 4838 } 4839 4840 /* Out of window. F.e. zero window probe. */ 4841 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4842 goto out_of_window; 4843 4844 tcp_enter_quickack_mode(sk); 4845 4846 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4847 /* Partial packet, seq < rcv_next < end_seq */ 4848 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4849 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4850 TCP_SKB_CB(skb)->end_seq); 4851 4852 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4853 4854 /* If window is closed, drop tail of packet. But after 4855 * remembering D-SACK for its head made in previous line. 4856 */ 4857 if (!tcp_receive_window(tp)) 4858 goto out_of_window; 4859 goto queue_and_out; 4860 } 4861 4862 tcp_data_queue_ofo(sk, skb); 4863 } 4864 4865 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4866 struct sk_buff_head *list) 4867 { 4868 struct sk_buff *next = NULL; 4869 4870 if (!skb_queue_is_last(list, skb)) 4871 next = skb_queue_next(list, skb); 4872 4873 __skb_unlink(skb, list); 4874 __kfree_skb(skb); 4875 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4876 4877 return next; 4878 } 4879 4880 /* Collapse contiguous sequence of skbs head..tail with 4881 * sequence numbers start..end. 4882 * 4883 * If tail is NULL, this means until the end of the list. 4884 * 4885 * Segments with FIN/SYN are not collapsed (only because this 4886 * simplifies code) 4887 */ 4888 static void 4889 tcp_collapse(struct sock *sk, struct sk_buff_head *list, 4890 struct sk_buff *head, struct sk_buff *tail, 4891 u32 start, u32 end) 4892 { 4893 struct sk_buff *skb, *n; 4894 bool end_of_skbs; 4895 4896 /* First, check that queue is collapsible and find 4897 * the point where collapsing can be useful. */ 4898 skb = head; 4899 restart: 4900 end_of_skbs = true; 4901 skb_queue_walk_from_safe(list, skb, n) { 4902 if (skb == tail) 4903 break; 4904 /* No new bits? It is possible on ofo queue. */ 4905 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4906 skb = tcp_collapse_one(sk, skb, list); 4907 if (!skb) 4908 break; 4909 goto restart; 4910 } 4911 4912 /* The first skb to collapse is: 4913 * - not SYN/FIN and 4914 * - bloated or contains data before "start" or 4915 * overlaps to the next one. 4916 */ 4917 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4918 (tcp_win_from_space(skb->truesize) > skb->len || 4919 before(TCP_SKB_CB(skb)->seq, start))) { 4920 end_of_skbs = false; 4921 break; 4922 } 4923 4924 if (!skb_queue_is_last(list, skb)) { 4925 struct sk_buff *next = skb_queue_next(list, skb); 4926 if (next != tail && 4927 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { 4928 end_of_skbs = false; 4929 break; 4930 } 4931 } 4932 4933 /* Decided to skip this, advance start seq. */ 4934 start = TCP_SKB_CB(skb)->end_seq; 4935 } 4936 if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4937 return; 4938 4939 while (before(start, end)) { 4940 struct sk_buff *nskb; 4941 unsigned int header = skb_headroom(skb); 4942 int copy = SKB_MAX_ORDER(header, 0); 4943 4944 /* Too big header? This can happen with IPv6. */ 4945 if (copy < 0) 4946 return; 4947 if (end - start < copy) 4948 copy = end - start; 4949 nskb = alloc_skb(copy + header, GFP_ATOMIC); 4950 if (!nskb) 4951 return; 4952 4953 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 4954 skb_set_network_header(nskb, (skb_network_header(skb) - 4955 skb->head)); 4956 skb_set_transport_header(nskb, (skb_transport_header(skb) - 4957 skb->head)); 4958 skb_reserve(nskb, header); 4959 memcpy(nskb->head, skb->head, header); 4960 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4961 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4962 __skb_queue_before(list, skb, nskb); 4963 skb_set_owner_r(nskb, sk); 4964 4965 /* Copy data, releasing collapsed skbs. */ 4966 while (copy > 0) { 4967 int offset = start - TCP_SKB_CB(skb)->seq; 4968 int size = TCP_SKB_CB(skb)->end_seq - start; 4969 4970 BUG_ON(offset < 0); 4971 if (size > 0) { 4972 size = min(copy, size); 4973 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 4974 BUG(); 4975 TCP_SKB_CB(nskb)->end_seq += size; 4976 copy -= size; 4977 start += size; 4978 } 4979 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4980 skb = tcp_collapse_one(sk, skb, list); 4981 if (!skb || 4982 skb == tail || 4983 tcp_hdr(skb)->syn || 4984 tcp_hdr(skb)->fin) 4985 return; 4986 } 4987 } 4988 } 4989 } 4990 4991 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 4992 * and tcp_collapse() them until all the queue is collapsed. 4993 */ 4994 static void tcp_collapse_ofo_queue(struct sock *sk) 4995 { 4996 struct tcp_sock *tp = tcp_sk(sk); 4997 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 4998 struct sk_buff *head; 4999 u32 start, end; 5000 5001 if (skb == NULL) 5002 return; 5003 5004 start = TCP_SKB_CB(skb)->seq; 5005 end = TCP_SKB_CB(skb)->end_seq; 5006 head = skb; 5007 5008 for (;;) { 5009 struct sk_buff *next = NULL; 5010 5011 if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) 5012 next = skb_queue_next(&tp->out_of_order_queue, skb); 5013 skb = next; 5014 5015 /* Segment is terminated when we see gap or when 5016 * we are at the end of all the queue. */ 5017 if (!skb || 5018 after(TCP_SKB_CB(skb)->seq, end) || 5019 before(TCP_SKB_CB(skb)->end_seq, start)) { 5020 tcp_collapse(sk, &tp->out_of_order_queue, 5021 head, skb, start, end); 5022 head = skb; 5023 if (!skb) 5024 break; 5025 /* Start new segment */ 5026 start = TCP_SKB_CB(skb)->seq; 5027 end = TCP_SKB_CB(skb)->end_seq; 5028 } else { 5029 if (before(TCP_SKB_CB(skb)->seq, start)) 5030 start = TCP_SKB_CB(skb)->seq; 5031 if (after(TCP_SKB_CB(skb)->end_seq, end)) 5032 end = TCP_SKB_CB(skb)->end_seq; 5033 } 5034 } 5035 } 5036 5037 /* 5038 * Purge the out-of-order queue. 5039 * Return true if queue was pruned. 5040 */ 5041 static bool tcp_prune_ofo_queue(struct sock *sk) 5042 { 5043 struct tcp_sock *tp = tcp_sk(sk); 5044 bool res = false; 5045 5046 if (!skb_queue_empty(&tp->out_of_order_queue)) { 5047 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 5048 __skb_queue_purge(&tp->out_of_order_queue); 5049 5050 /* Reset SACK state. A conforming SACK implementation will 5051 * do the same at a timeout based retransmit. When a connection 5052 * is in a sad state like this, we care only about integrity 5053 * of the connection not performance. 5054 */ 5055 if (tp->rx_opt.sack_ok) 5056 tcp_sack_reset(&tp->rx_opt); 5057 sk_mem_reclaim(sk); 5058 res = true; 5059 } 5060 return res; 5061 } 5062 5063 /* Reduce allocated memory if we can, trying to get 5064 * the socket within its memory limits again. 5065 * 5066 * Return less than zero if we should start dropping frames 5067 * until the socket owning process reads some of the data 5068 * to stabilize the situation. 5069 */ 5070 static int tcp_prune_queue(struct sock *sk) 5071 { 5072 struct tcp_sock *tp = tcp_sk(sk); 5073 5074 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 5075 5076 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); 5077 5078 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 5079 tcp_clamp_window(sk); 5080 else if (sk_under_memory_pressure(sk)) 5081 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 5082 5083 tcp_collapse_ofo_queue(sk); 5084 if (!skb_queue_empty(&sk->sk_receive_queue)) 5085 tcp_collapse(sk, &sk->sk_receive_queue, 5086 skb_peek(&sk->sk_receive_queue), 5087 NULL, 5088 tp->copied_seq, tp->rcv_nxt); 5089 sk_mem_reclaim(sk); 5090 5091 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5092 return 0; 5093 5094 /* Collapsing did not help, destructive actions follow. 5095 * This must not ever occur. */ 5096 5097 tcp_prune_ofo_queue(sk); 5098 5099 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5100 return 0; 5101 5102 /* If we are really being abused, tell the caller to silently 5103 * drop receive data on the floor. It will get retransmitted 5104 * and hopefully then we'll have sufficient space. 5105 */ 5106 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); 5107 5108 /* Massive buffer overcommit. */ 5109 tp->pred_flags = 0; 5110 return -1; 5111 } 5112 5113 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 5114 * As additional protections, we do not touch cwnd in retransmission phases, 5115 * and if application hit its sndbuf limit recently. 5116 */ 5117 void tcp_cwnd_application_limited(struct sock *sk) 5118 { 5119 struct tcp_sock *tp = tcp_sk(sk); 5120 5121 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 5122 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5123 /* Limited by application or receiver window. */ 5124 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 5125 u32 win_used = max(tp->snd_cwnd_used, init_win); 5126 if (win_used < tp->snd_cwnd) { 5127 tp->snd_ssthresh = tcp_current_ssthresh(sk); 5128 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 5129 } 5130 tp->snd_cwnd_used = 0; 5131 } 5132 tp->snd_cwnd_stamp = tcp_time_stamp; 5133 } 5134 5135 static bool tcp_should_expand_sndbuf(const struct sock *sk) 5136 { 5137 const struct tcp_sock *tp = tcp_sk(sk); 5138 5139 /* If the user specified a specific send buffer setting, do 5140 * not modify it. 5141 */ 5142 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 5143 return false; 5144 5145 /* If we are under global TCP memory pressure, do not expand. */ 5146 if (sk_under_memory_pressure(sk)) 5147 return false; 5148 5149 /* If we are under soft global TCP memory pressure, do not expand. */ 5150 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 5151 return false; 5152 5153 /* If we filled the congestion window, do not expand. */ 5154 if (tp->packets_out >= tp->snd_cwnd) 5155 return false; 5156 5157 return true; 5158 } 5159 5160 /* When incoming ACK allowed to free some skb from write_queue, 5161 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 5162 * on the exit from tcp input handler. 5163 * 5164 * PROBLEM: sndbuf expansion does not work well with largesend. 5165 */ 5166 static void tcp_new_space(struct sock *sk) 5167 { 5168 struct tcp_sock *tp = tcp_sk(sk); 5169 5170 if (tcp_should_expand_sndbuf(sk)) { 5171 int sndmem = SKB_TRUESIZE(max_t(u32, 5172 tp->rx_opt.mss_clamp, 5173 tp->mss_cache) + 5174 MAX_TCP_HEADER); 5175 int demanded = max_t(unsigned int, tp->snd_cwnd, 5176 tp->reordering + 1); 5177 sndmem *= 2 * demanded; 5178 if (sndmem > sk->sk_sndbuf) 5179 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 5180 tp->snd_cwnd_stamp = tcp_time_stamp; 5181 } 5182 5183 sk->sk_write_space(sk); 5184 } 5185 5186 static void tcp_check_space(struct sock *sk) 5187 { 5188 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 5189 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 5190 if (sk->sk_socket && 5191 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 5192 tcp_new_space(sk); 5193 } 5194 } 5195 5196 static inline void tcp_data_snd_check(struct sock *sk) 5197 { 5198 tcp_push_pending_frames(sk); 5199 tcp_check_space(sk); 5200 } 5201 5202 /* 5203 * Check if sending an ack is needed. 5204 */ 5205 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 5206 { 5207 struct tcp_sock *tp = tcp_sk(sk); 5208 5209 /* More than one full frame received... */ 5210 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && 5211 /* ... and right edge of window advances far enough. 5212 * (tcp_recvmsg() will send ACK otherwise). Or... 5213 */ 5214 __tcp_select_window(sk) >= tp->rcv_wnd) || 5215 /* We ACK each frame or... */ 5216 tcp_in_quickack_mode(sk) || 5217 /* We have out of order data. */ 5218 (ofo_possible && skb_peek(&tp->out_of_order_queue))) { 5219 /* Then ack it now */ 5220 tcp_send_ack(sk); 5221 } else { 5222 /* Else, send delayed ack. */ 5223 tcp_send_delayed_ack(sk); 5224 } 5225 } 5226 5227 static inline void tcp_ack_snd_check(struct sock *sk) 5228 { 5229 if (!inet_csk_ack_scheduled(sk)) { 5230 /* We sent a data segment already. */ 5231 return; 5232 } 5233 __tcp_ack_snd_check(sk, 1); 5234 } 5235 5236 /* 5237 * This routine is only called when we have urgent data 5238 * signaled. Its the 'slow' part of tcp_urg. It could be 5239 * moved inline now as tcp_urg is only called from one 5240 * place. We handle URGent data wrong. We have to - as 5241 * BSD still doesn't use the correction from RFC961. 5242 * For 1003.1g we should support a new option TCP_STDURG to permit 5243 * either form (or just set the sysctl tcp_stdurg). 5244 */ 5245 5246 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 5247 { 5248 struct tcp_sock *tp = tcp_sk(sk); 5249 u32 ptr = ntohs(th->urg_ptr); 5250 5251 if (ptr && !sysctl_tcp_stdurg) 5252 ptr--; 5253 ptr += ntohl(th->seq); 5254 5255 /* Ignore urgent data that we've already seen and read. */ 5256 if (after(tp->copied_seq, ptr)) 5257 return; 5258 5259 /* Do not replay urg ptr. 5260 * 5261 * NOTE: interesting situation not covered by specs. 5262 * Misbehaving sender may send urg ptr, pointing to segment, 5263 * which we already have in ofo queue. We are not able to fetch 5264 * such data and will stay in TCP_URG_NOTYET until will be eaten 5265 * by recvmsg(). Seems, we are not obliged to handle such wicked 5266 * situations. But it is worth to think about possibility of some 5267 * DoSes using some hypothetical application level deadlock. 5268 */ 5269 if (before(ptr, tp->rcv_nxt)) 5270 return; 5271 5272 /* Do we already have a newer (or duplicate) urgent pointer? */ 5273 if (tp->urg_data && !after(ptr, tp->urg_seq)) 5274 return; 5275 5276 /* Tell the world about our new urgent pointer. */ 5277 sk_send_sigurg(sk); 5278 5279 /* We may be adding urgent data when the last byte read was 5280 * urgent. To do this requires some care. We cannot just ignore 5281 * tp->copied_seq since we would read the last urgent byte again 5282 * as data, nor can we alter copied_seq until this data arrives 5283 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 5284 * 5285 * NOTE. Double Dutch. Rendering to plain English: author of comment 5286 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 5287 * and expect that both A and B disappear from stream. This is _wrong_. 5288 * Though this happens in BSD with high probability, this is occasional. 5289 * Any application relying on this is buggy. Note also, that fix "works" 5290 * only in this artificial test. Insert some normal data between A and B and we will 5291 * decline of BSD again. Verdict: it is better to remove to trap 5292 * buggy users. 5293 */ 5294 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 5295 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 5296 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 5297 tp->copied_seq++; 5298 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 5299 __skb_unlink(skb, &sk->sk_receive_queue); 5300 __kfree_skb(skb); 5301 } 5302 } 5303 5304 tp->urg_data = TCP_URG_NOTYET; 5305 tp->urg_seq = ptr; 5306 5307 /* Disable header prediction. */ 5308 tp->pred_flags = 0; 5309 } 5310 5311 /* This is the 'fast' part of urgent handling. */ 5312 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 5313 { 5314 struct tcp_sock *tp = tcp_sk(sk); 5315 5316 /* Check if we get a new urgent pointer - normally not. */ 5317 if (th->urg) 5318 tcp_check_urg(sk, th); 5319 5320 /* Do we wait for any urgent data? - normally not... */ 5321 if (tp->urg_data == TCP_URG_NOTYET) { 5322 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 5323 th->syn; 5324 5325 /* Is the urgent pointer pointing into this packet? */ 5326 if (ptr < skb->len) { 5327 u8 tmp; 5328 if (skb_copy_bits(skb, ptr, &tmp, 1)) 5329 BUG(); 5330 tp->urg_data = TCP_URG_VALID | tmp; 5331 if (!sock_flag(sk, SOCK_DEAD)) 5332 sk->sk_data_ready(sk, 0); 5333 } 5334 } 5335 } 5336 5337 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 5338 { 5339 struct tcp_sock *tp = tcp_sk(sk); 5340 int chunk = skb->len - hlen; 5341 int err; 5342 5343 local_bh_enable(); 5344 if (skb_csum_unnecessary(skb)) 5345 err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); 5346 else 5347 err = skb_copy_and_csum_datagram_iovec(skb, hlen, 5348 tp->ucopy.iov); 5349 5350 if (!err) { 5351 tp->ucopy.len -= chunk; 5352 tp->copied_seq += chunk; 5353 tcp_rcv_space_adjust(sk); 5354 } 5355 5356 local_bh_disable(); 5357 return err; 5358 } 5359 5360 static __sum16 __tcp_checksum_complete_user(struct sock *sk, 5361 struct sk_buff *skb) 5362 { 5363 __sum16 result; 5364 5365 if (sock_owned_by_user(sk)) { 5366 local_bh_enable(); 5367 result = __tcp_checksum_complete(skb); 5368 local_bh_disable(); 5369 } else { 5370 result = __tcp_checksum_complete(skb); 5371 } 5372 return result; 5373 } 5374 5375 static inline int tcp_checksum_complete_user(struct sock *sk, 5376 struct sk_buff *skb) 5377 { 5378 return !skb_csum_unnecessary(skb) && 5379 __tcp_checksum_complete_user(sk, skb); 5380 } 5381 5382 #ifdef CONFIG_NET_DMA 5383 static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 5384 int hlen) 5385 { 5386 struct tcp_sock *tp = tcp_sk(sk); 5387 int chunk = skb->len - hlen; 5388 int dma_cookie; 5389 bool copied_early = false; 5390 5391 if (tp->ucopy.wakeup) 5392 return false; 5393 5394 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5395 tp->ucopy.dma_chan = net_dma_find_channel(); 5396 5397 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5398 5399 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, 5400 skb, hlen, 5401 tp->ucopy.iov, chunk, 5402 tp->ucopy.pinned_list); 5403 5404 if (dma_cookie < 0) 5405 goto out; 5406 5407 tp->ucopy.dma_cookie = dma_cookie; 5408 copied_early = true; 5409 5410 tp->ucopy.len -= chunk; 5411 tp->copied_seq += chunk; 5412 tcp_rcv_space_adjust(sk); 5413 5414 if ((tp->ucopy.len == 0) || 5415 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5416 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5417 tp->ucopy.wakeup = 1; 5418 sk->sk_data_ready(sk, 0); 5419 } 5420 } else if (chunk > 0) { 5421 tp->ucopy.wakeup = 1; 5422 sk->sk_data_ready(sk, 0); 5423 } 5424 out: 5425 return copied_early; 5426 } 5427 #endif /* CONFIG_NET_DMA */ 5428 5429 /* Does PAWS and seqno based validation of an incoming segment, flags will 5430 * play significant role here. 5431 */ 5432 static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5433 const struct tcphdr *th, int syn_inerr) 5434 { 5435 const u8 *hash_location; 5436 struct tcp_sock *tp = tcp_sk(sk); 5437 5438 /* RFC1323: H1. Apply PAWS check first. */ 5439 if (tcp_fast_parse_options(skb, th, tp, &hash_location) && 5440 tp->rx_opt.saw_tstamp && 5441 tcp_paws_discard(sk, skb)) { 5442 if (!th->rst) { 5443 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5444 tcp_send_dupack(sk, skb); 5445 goto discard; 5446 } 5447 /* Reset is accepted even if it did not pass PAWS. */ 5448 } 5449 5450 /* Step 1: check sequence number */ 5451 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 5452 /* RFC793, page 37: "In all states except SYN-SENT, all reset 5453 * (RST) segments are validated by checking their SEQ-fields." 5454 * And page 69: "If an incoming segment is not acceptable, 5455 * an acknowledgment should be sent in reply (unless the RST 5456 * bit is set, if so drop the segment and return)". 5457 */ 5458 if (!th->rst) 5459 tcp_send_dupack(sk, skb); 5460 goto discard; 5461 } 5462 5463 /* Step 2: check RST bit */ 5464 if (th->rst) { 5465 tcp_reset(sk); 5466 goto discard; 5467 } 5468 5469 /* ts_recent update must be made after we are sure that the packet 5470 * is in window. 5471 */ 5472 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 5473 5474 /* step 3: check security and precedence [ignored] */ 5475 5476 /* step 4: Check for a SYN in window. */ 5477 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5478 if (syn_inerr) 5479 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); 5481 tcp_reset(sk); 5482 return -1; 5483 } 5484 5485 return 1; 5486 5487 discard: 5488 __kfree_skb(skb); 5489 return 0; 5490 } 5491 5492 /* 5493 * TCP receive function for the ESTABLISHED state. 5494 * 5495 * It is split into a fast path and a slow path. The fast path is 5496 * disabled when: 5497 * - A zero window was announced from us - zero window probing 5498 * is only handled properly in the slow path. 5499 * - Out of order segments arrived. 5500 * - Urgent data is expected. 5501 * - There is no buffer space left 5502 * - Unexpected TCP flags/window values/header lengths are received 5503 * (detected by checking the TCP header against pred_flags) 5504 * - Data is sent in both directions. Fast path only supports pure senders 5505 * or pure receivers (this means either the sequence number or the ack 5506 * value must stay constant) 5507 * - Unexpected TCP option. 5508 * 5509 * When these conditions are not satisfied it drops into a standard 5510 * receive procedure patterned after RFC793 to handle all cases. 5511 * The first three cases are guaranteed by proper pred_flags setting, 5512 * the rest is checked inline. Fast processing is turned on in 5513 * tcp_data_queue when everything is OK. 5514 */ 5515 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5516 const struct tcphdr *th, unsigned int len) 5517 { 5518 struct tcp_sock *tp = tcp_sk(sk); 5519 int res; 5520 5521 /* 5522 * Header prediction. 5523 * The code loosely follows the one in the famous 5524 * "30 instruction TCP receive" Van Jacobson mail. 5525 * 5526 * Van's trick is to deposit buffers into socket queue 5527 * on a device interrupt, to call tcp_recv function 5528 * on the receive process context and checksum and copy 5529 * the buffer to user space. smart... 5530 * 5531 * Our current scheme is not silly either but we take the 5532 * extra cost of the net_bh soft interrupt processing... 5533 * We do checksum and copy also but from device to kernel. 5534 */ 5535 5536 tp->rx_opt.saw_tstamp = 0; 5537 5538 /* pred_flags is 0xS?10 << 16 + snd_wnd 5539 * if header_prediction is to be made 5540 * 'S' will always be tp->tcp_header_len >> 2 5541 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 5542 * turn it off (when there are holes in the receive 5543 * space for instance) 5544 * PSH flag is ignored. 5545 */ 5546 5547 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5548 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && 5549 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 5550 int tcp_header_len = tp->tcp_header_len; 5551 5552 /* Timestamp header prediction: tcp_header_len 5553 * is automatically equal to th->doff*4 due to pred_flags 5554 * match. 5555 */ 5556 5557 /* Check timestamp */ 5558 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 5559 /* No? Slow path! */ 5560 if (!tcp_parse_aligned_timestamp(tp, th)) 5561 goto slow_path; 5562 5563 /* If PAWS failed, check it more carefully in slow path */ 5564 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 5565 goto slow_path; 5566 5567 /* DO NOT update ts_recent here, if checksum fails 5568 * and timestamp was corrupted part, it will result 5569 * in a hung connection since we will drop all 5570 * future packets due to the PAWS test. 5571 */ 5572 } 5573 5574 if (len <= tcp_header_len) { 5575 /* Bulk data transfer: sender */ 5576 if (len == tcp_header_len) { 5577 /* Predicted packet is in window by definition. 5578 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5579 * Hence, check seq<=rcv_wup reduces to: 5580 */ 5581 if (tcp_header_len == 5582 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5583 tp->rcv_nxt == tp->rcv_wup) 5584 tcp_store_ts_recent(tp); 5585 5586 /* We know that such packets are checksummed 5587 * on entry. 5588 */ 5589 tcp_ack(sk, skb, 0); 5590 __kfree_skb(skb); 5591 tcp_data_snd_check(sk); 5592 return 0; 5593 } else { /* Header too small */ 5594 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5595 goto discard; 5596 } 5597 } else { 5598 int eaten = 0; 5599 int copied_early = 0; 5600 bool fragstolen = false; 5601 5602 if (tp->copied_seq == tp->rcv_nxt && 5603 len - tcp_header_len <= tp->ucopy.len) { 5604 #ifdef CONFIG_NET_DMA 5605 if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 5606 copied_early = 1; 5607 eaten = 1; 5608 } 5609 #endif 5610 if (tp->ucopy.task == current && 5611 sock_owned_by_user(sk) && !copied_early) { 5612 __set_current_state(TASK_RUNNING); 5613 5614 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 5615 eaten = 1; 5616 } 5617 if (eaten) { 5618 /* Predicted packet is in window by definition. 5619 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5620 * Hence, check seq<=rcv_wup reduces to: 5621 */ 5622 if (tcp_header_len == 5623 (sizeof(struct tcphdr) + 5624 TCPOLEN_TSTAMP_ALIGNED) && 5625 tp->rcv_nxt == tp->rcv_wup) 5626 tcp_store_ts_recent(tp); 5627 5628 tcp_rcv_rtt_measure_ts(sk, skb); 5629 5630 __skb_pull(skb, tcp_header_len); 5631 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 5632 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 5633 } 5634 if (copied_early) 5635 tcp_cleanup_rbuf(sk, skb->len); 5636 } 5637 if (!eaten) { 5638 if (tcp_checksum_complete_user(sk, skb)) 5639 goto csum_error; 5640 5641 /* Predicted packet is in window by definition. 5642 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5643 * Hence, check seq<=rcv_wup reduces to: 5644 */ 5645 if (tcp_header_len == 5646 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5647 tp->rcv_nxt == tp->rcv_wup) 5648 tcp_store_ts_recent(tp); 5649 5650 tcp_rcv_rtt_measure_ts(sk, skb); 5651 5652 if ((int)skb->truesize > sk->sk_forward_alloc) 5653 goto step5; 5654 5655 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5656 5657 /* Bulk data transfer: receiver */ 5658 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5659 &fragstolen); 5660 } 5661 5662 tcp_event_data_recv(sk, skb); 5663 5664 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 5665 /* Well, only one small jumplet in fast path... */ 5666 tcp_ack(sk, skb, FLAG_DATA); 5667 tcp_data_snd_check(sk); 5668 if (!inet_csk_ack_scheduled(sk)) 5669 goto no_ack; 5670 } 5671 5672 if (!copied_early || tp->rcv_nxt != tp->rcv_wup) 5673 __tcp_ack_snd_check(sk, 0); 5674 no_ack: 5675 #ifdef CONFIG_NET_DMA 5676 if (copied_early) 5677 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 5678 else 5679 #endif 5680 if (eaten) 5681 kfree_skb_partial(skb, fragstolen); 5682 else 5683 sk->sk_data_ready(sk, 0); 5684 return 0; 5685 } 5686 } 5687 5688 slow_path: 5689 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5690 goto csum_error; 5691 5692 /* 5693 * Standard slow path. 5694 */ 5695 5696 res = tcp_validate_incoming(sk, skb, th, 1); 5697 if (res <= 0) 5698 return -res; 5699 5700 step5: 5701 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5702 goto discard; 5703 5704 tcp_rcv_rtt_measure_ts(sk, skb); 5705 5706 /* Process urgent data. */ 5707 tcp_urg(sk, skb, th); 5708 5709 /* step 7: process the segment text */ 5710 tcp_data_queue(sk, skb); 5711 5712 tcp_data_snd_check(sk); 5713 tcp_ack_snd_check(sk); 5714 return 0; 5715 5716 csum_error: 5717 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5718 5719 discard: 5720 __kfree_skb(skb); 5721 return 0; 5722 } 5723 EXPORT_SYMBOL(tcp_rcv_established); 5724 5725 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) 5726 { 5727 struct tcp_sock *tp = tcp_sk(sk); 5728 struct inet_connection_sock *icsk = inet_csk(sk); 5729 5730 tcp_set_state(sk, TCP_ESTABLISHED); 5731 5732 if (skb != NULL) 5733 security_inet_conn_established(sk, skb); 5734 5735 /* Make sure socket is routed, for correct metrics. */ 5736 icsk->icsk_af_ops->rebuild_header(sk); 5737 5738 tcp_init_metrics(sk); 5739 5740 tcp_init_congestion_control(sk); 5741 5742 /* Prevent spurious tcp_cwnd_restart() on first data 5743 * packet. 5744 */ 5745 tp->lsndtime = tcp_time_stamp; 5746 5747 tcp_init_buffer_space(sk); 5748 5749 if (sock_flag(sk, SOCK_KEEPOPEN)) 5750 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 5751 5752 if (!tp->rx_opt.snd_wscale) 5753 __tcp_fast_path_on(tp, tp->snd_wnd); 5754 else 5755 tp->pred_flags = 0; 5756 5757 if (!sock_flag(sk, SOCK_DEAD)) { 5758 sk->sk_state_change(sk); 5759 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5760 } 5761 } 5762 5763 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5764 const struct tcphdr *th, unsigned int len) 5765 { 5766 const u8 *hash_location; 5767 struct inet_connection_sock *icsk = inet_csk(sk); 5768 struct tcp_sock *tp = tcp_sk(sk); 5769 struct tcp_cookie_values *cvp = tp->cookie_values; 5770 int saved_clamp = tp->rx_opt.mss_clamp; 5771 5772 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); 5773 5774 if (th->ack) { 5775 /* rfc793: 5776 * "If the state is SYN-SENT then 5777 * first check the ACK bit 5778 * If the ACK bit is set 5779 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5780 * a reset (unless the RST bit is set, if so drop 5781 * the segment and return)" 5782 * 5783 * We do not send data with SYN, so that RFC-correct 5784 * test reduces to: 5785 */ 5786 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 5787 goto reset_and_undo; 5788 5789 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5790 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5791 tcp_time_stamp)) { 5792 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); 5793 goto reset_and_undo; 5794 } 5795 5796 /* Now ACK is acceptable. 5797 * 5798 * "If the RST bit is set 5799 * If the ACK was acceptable then signal the user "error: 5800 * connection reset", drop the segment, enter CLOSED state, 5801 * delete TCB, and return." 5802 */ 5803 5804 if (th->rst) { 5805 tcp_reset(sk); 5806 goto discard; 5807 } 5808 5809 /* rfc793: 5810 * "fifth, if neither of the SYN or RST bits is set then 5811 * drop the segment and return." 5812 * 5813 * See note below! 5814 * --ANK(990513) 5815 */ 5816 if (!th->syn) 5817 goto discard_and_undo; 5818 5819 /* rfc793: 5820 * "If the SYN bit is on ... 5821 * are acceptable then ... 5822 * (our SYN has been ACKed), change the connection 5823 * state to ESTABLISHED..." 5824 */ 5825 5826 TCP_ECN_rcv_synack(tp, th); 5827 5828 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5829 tcp_ack(sk, skb, FLAG_SLOWPATH); 5830 5831 /* Ok.. it's good. Set up sequence numbers and 5832 * move to established. 5833 */ 5834 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5835 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5836 5837 /* RFC1323: The window in SYN & SYN/ACK segments is 5838 * never scaled. 5839 */ 5840 tp->snd_wnd = ntohs(th->window); 5841 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5842 5843 if (!tp->rx_opt.wscale_ok) { 5844 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5845 tp->window_clamp = min(tp->window_clamp, 65535U); 5846 } 5847 5848 if (tp->rx_opt.saw_tstamp) { 5849 tp->rx_opt.tstamp_ok = 1; 5850 tp->tcp_header_len = 5851 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5852 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5853 tcp_store_ts_recent(tp); 5854 } else { 5855 tp->tcp_header_len = sizeof(struct tcphdr); 5856 } 5857 5858 if (tcp_is_sack(tp) && sysctl_tcp_fack) 5859 tcp_enable_fack(tp); 5860 5861 tcp_mtup_init(sk); 5862 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5863 tcp_initialize_rcv_mss(sk); 5864 5865 /* Remember, tcp_poll() does not lock socket! 5866 * Change state from SYN-SENT only after copied_seq 5867 * is initialized. */ 5868 tp->copied_seq = tp->rcv_nxt; 5869 5870 if (cvp != NULL && 5871 cvp->cookie_pair_size > 0 && 5872 tp->rx_opt.cookie_plus > 0) { 5873 int cookie_size = tp->rx_opt.cookie_plus 5874 - TCPOLEN_COOKIE_BASE; 5875 int cookie_pair_size = cookie_size 5876 + cvp->cookie_desired; 5877 5878 /* A cookie extension option was sent and returned. 5879 * Note that each incoming SYNACK replaces the 5880 * Responder cookie. The initial exchange is most 5881 * fragile, as protection against spoofing relies 5882 * entirely upon the sequence and timestamp (above). 5883 * This replacement strategy allows the correct pair to 5884 * pass through, while any others will be filtered via 5885 * Responder verification later. 5886 */ 5887 if (sizeof(cvp->cookie_pair) >= cookie_pair_size) { 5888 memcpy(&cvp->cookie_pair[cvp->cookie_desired], 5889 hash_location, cookie_size); 5890 cvp->cookie_pair_size = cookie_pair_size; 5891 } 5892 } 5893 5894 smp_mb(); 5895 5896 tcp_finish_connect(sk, skb); 5897 5898 if (sk->sk_write_pending || 5899 icsk->icsk_accept_queue.rskq_defer_accept || 5900 icsk->icsk_ack.pingpong) { 5901 /* Save one ACK. Data will be ready after 5902 * several ticks, if write_pending is set. 5903 * 5904 * It may be deleted, but with this feature tcpdumps 5905 * look so _wonderfully_ clever, that I was not able 5906 * to stand against the temptation 8) --ANK 5907 */ 5908 inet_csk_schedule_ack(sk); 5909 icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5910 tcp_enter_quickack_mode(sk); 5911 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5912 TCP_DELACK_MAX, TCP_RTO_MAX); 5913 5914 discard: 5915 __kfree_skb(skb); 5916 return 0; 5917 } else { 5918 tcp_send_ack(sk); 5919 } 5920 return -1; 5921 } 5922 5923 /* No ACK in the segment */ 5924 5925 if (th->rst) { 5926 /* rfc793: 5927 * "If the RST bit is set 5928 * 5929 * Otherwise (no ACK) drop the segment and return." 5930 */ 5931 5932 goto discard_and_undo; 5933 } 5934 5935 /* PAWS check. */ 5936 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5937 tcp_paws_reject(&tp->rx_opt, 0)) 5938 goto discard_and_undo; 5939 5940 if (th->syn) { 5941 /* We see SYN without ACK. It is attempt of 5942 * simultaneous connect with crossed SYNs. 5943 * Particularly, it can be connect to self. 5944 */ 5945 tcp_set_state(sk, TCP_SYN_RECV); 5946 5947 if (tp->rx_opt.saw_tstamp) { 5948 tp->rx_opt.tstamp_ok = 1; 5949 tcp_store_ts_recent(tp); 5950 tp->tcp_header_len = 5951 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5952 } else { 5953 tp->tcp_header_len = sizeof(struct tcphdr); 5954 } 5955 5956 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5957 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5958 5959 /* RFC1323: The window in SYN & SYN/ACK segments is 5960 * never scaled. 5961 */ 5962 tp->snd_wnd = ntohs(th->window); 5963 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5964 tp->max_window = tp->snd_wnd; 5965 5966 TCP_ECN_rcv_syn(tp, th); 5967 5968 tcp_mtup_init(sk); 5969 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5970 tcp_initialize_rcv_mss(sk); 5971 5972 tcp_send_synack(sk); 5973 #if 0 5974 /* Note, we could accept data and URG from this segment. 5975 * There are no obstacles to make this. 5976 * 5977 * However, if we ignore data in ACKless segments sometimes, 5978 * we have no reasons to accept it sometimes. 5979 * Also, seems the code doing it in step6 of tcp_rcv_state_process 5980 * is not flawless. So, discard packet for sanity. 5981 * Uncomment this return to process the data. 5982 */ 5983 return -1; 5984 #else 5985 goto discard; 5986 #endif 5987 } 5988 /* "fifth, if neither of the SYN or RST bits is set then 5989 * drop the segment and return." 5990 */ 5991 5992 discard_and_undo: 5993 tcp_clear_options(&tp->rx_opt); 5994 tp->rx_opt.mss_clamp = saved_clamp; 5995 goto discard; 5996 5997 reset_and_undo: 5998 tcp_clear_options(&tp->rx_opt); 5999 tp->rx_opt.mss_clamp = saved_clamp; 6000 return 1; 6001 } 6002 6003 /* 6004 * This function implements the receiving procedure of RFC 793 for 6005 * all states except ESTABLISHED and TIME_WAIT. 6006 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 6007 * address independent. 6008 */ 6009 6010 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 6011 const struct tcphdr *th, unsigned int len) 6012 { 6013 struct tcp_sock *tp = tcp_sk(sk); 6014 struct inet_connection_sock *icsk = inet_csk(sk); 6015 int queued = 0; 6016 int res; 6017 6018 tp->rx_opt.saw_tstamp = 0; 6019 6020 switch (sk->sk_state) { 6021 case TCP_CLOSE: 6022 goto discard; 6023 6024 case TCP_LISTEN: 6025 if (th->ack) 6026 return 1; 6027 6028 if (th->rst) 6029 goto discard; 6030 6031 if (th->syn) { 6032 if (th->fin) 6033 goto discard; 6034 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 6035 return 1; 6036 6037 /* Now we have several options: In theory there is 6038 * nothing else in the frame. KA9Q has an option to 6039 * send data with the syn, BSD accepts data with the 6040 * syn up to the [to be] advertised window and 6041 * Solaris 2.1 gives you a protocol error. For now 6042 * we just ignore it, that fits the spec precisely 6043 * and avoids incompatibilities. It would be nice in 6044 * future to drop through and process the data. 6045 * 6046 * Now that TTCP is starting to be used we ought to 6047 * queue this data. 6048 * But, this leaves one open to an easy denial of 6049 * service attack, and SYN cookies can't defend 6050 * against this problem. So, we drop the data 6051 * in the interest of security over speed unless 6052 * it's still in use. 6053 */ 6054 kfree_skb(skb); 6055 return 0; 6056 } 6057 goto discard; 6058 6059 case TCP_SYN_SENT: 6060 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); 6061 if (queued >= 0) 6062 return queued; 6063 6064 /* Do step6 onward by hand. */ 6065 tcp_urg(sk, skb, th); 6066 __kfree_skb(skb); 6067 tcp_data_snd_check(sk); 6068 return 0; 6069 } 6070 6071 res = tcp_validate_incoming(sk, skb, th, 0); 6072 if (res <= 0) 6073 return -res; 6074 6075 /* step 5: check the ACK field */ 6076 if (th->ack) { 6077 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 6078 6079 switch (sk->sk_state) { 6080 case TCP_SYN_RECV: 6081 if (acceptable) { 6082 tp->copied_seq = tp->rcv_nxt; 6083 smp_mb(); 6084 tcp_set_state(sk, TCP_ESTABLISHED); 6085 sk->sk_state_change(sk); 6086 6087 /* Note, that this wakeup is only for marginal 6088 * crossed SYN case. Passively open sockets 6089 * are not waked up, because sk->sk_sleep == 6090 * NULL and sk->sk_socket == NULL. 6091 */ 6092 if (sk->sk_socket) 6093 sk_wake_async(sk, 6094 SOCK_WAKE_IO, POLL_OUT); 6095 6096 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 6097 tp->snd_wnd = ntohs(th->window) << 6098 tp->rx_opt.snd_wscale; 6099 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 6100 6101 if (tp->rx_opt.tstamp_ok) 6102 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 6103 6104 /* Make sure socket is routed, for 6105 * correct metrics. 6106 */ 6107 icsk->icsk_af_ops->rebuild_header(sk); 6108 6109 tcp_init_metrics(sk); 6110 6111 tcp_init_congestion_control(sk); 6112 6113 /* Prevent spurious tcp_cwnd_restart() on 6114 * first data packet. 6115 */ 6116 tp->lsndtime = tcp_time_stamp; 6117 6118 tcp_mtup_init(sk); 6119 tcp_initialize_rcv_mss(sk); 6120 tcp_init_buffer_space(sk); 6121 tcp_fast_path_on(tp); 6122 } else { 6123 return 1; 6124 } 6125 break; 6126 6127 case TCP_FIN_WAIT1: 6128 if (tp->snd_una == tp->write_seq) { 6129 tcp_set_state(sk, TCP_FIN_WAIT2); 6130 sk->sk_shutdown |= SEND_SHUTDOWN; 6131 dst_confirm(__sk_dst_get(sk)); 6132 6133 if (!sock_flag(sk, SOCK_DEAD)) 6134 /* Wake up lingering close() */ 6135 sk->sk_state_change(sk); 6136 else { 6137 int tmo; 6138 6139 if (tp->linger2 < 0 || 6140 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6141 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 6142 tcp_done(sk); 6143 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6144 return 1; 6145 } 6146 6147 tmo = tcp_fin_time(sk); 6148 if (tmo > TCP_TIMEWAIT_LEN) { 6149 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 6150 } else if (th->fin || sock_owned_by_user(sk)) { 6151 /* Bad case. We could lose such FIN otherwise. 6152 * It is not a big problem, but it looks confusing 6153 * and not so rare event. We still can lose it now, 6154 * if it spins in bh_lock_sock(), but it is really 6155 * marginal case. 6156 */ 6157 inet_csk_reset_keepalive_timer(sk, tmo); 6158 } else { 6159 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 6160 goto discard; 6161 } 6162 } 6163 } 6164 break; 6165 6166 case TCP_CLOSING: 6167 if (tp->snd_una == tp->write_seq) { 6168 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 6169 goto discard; 6170 } 6171 break; 6172 6173 case TCP_LAST_ACK: 6174 if (tp->snd_una == tp->write_seq) { 6175 tcp_update_metrics(sk); 6176 tcp_done(sk); 6177 goto discard; 6178 } 6179 break; 6180 } 6181 } else 6182 goto discard; 6183 6184 /* step 6: check the URG bit */ 6185 tcp_urg(sk, skb, th); 6186 6187 /* step 7: process the segment text */ 6188 switch (sk->sk_state) { 6189 case TCP_CLOSE_WAIT: 6190 case TCP_CLOSING: 6191 case TCP_LAST_ACK: 6192 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 6193 break; 6194 case TCP_FIN_WAIT1: 6195 case TCP_FIN_WAIT2: 6196 /* RFC 793 says to queue data in these states, 6197 * RFC 1122 says we MUST send a reset. 6198 * BSD 4.4 also does reset. 6199 */ 6200 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6201 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6202 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6203 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6204 tcp_reset(sk); 6205 return 1; 6206 } 6207 } 6208 /* Fall through */ 6209 case TCP_ESTABLISHED: 6210 tcp_data_queue(sk, skb); 6211 queued = 1; 6212 break; 6213 } 6214 6215 /* tcp_data could move socket to TIME-WAIT */ 6216 if (sk->sk_state != TCP_CLOSE) { 6217 tcp_data_snd_check(sk); 6218 tcp_ack_snd_check(sk); 6219 } 6220 6221 if (!queued) { 6222 discard: 6223 __kfree_skb(skb); 6224 } 6225 return 0; 6226 } 6227 EXPORT_SYMBOL(tcp_rcv_state_process); 6228