1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: 23 * Pedro Roque : Fast Retransmit/Recovery. 24 * Two receive queues. 25 * Retransmit queue handled by TCP. 26 * Better retransmit timer handling. 27 * New congestion avoidance. 28 * Header prediction. 29 * Variable renaming. 30 * 31 * Eric : Fast Retransmit. 32 * Randy Scott : MSS option defines. 33 * Eric Schenk : Fixes to slow start algorithm. 34 * Eric Schenk : Yet another double ACK bug. 35 * Eric Schenk : Delayed ACK bug fixes. 36 * Eric Schenk : Floyd style fast retrans war avoidance. 37 * David S. Miller : Don't allow zero congestion window. 38 * Eric Schenk : Fix retransmitter so that it sends 39 * next packet on ack of previous packet. 40 * Andi Kleen : Moved open_request checking here 41 * and process RSTs for open_requests. 42 * Andi Kleen : Better prune_queue, and other fixes. 43 * Andrey Savochkin: Fix RTT measurements in the presence of 44 * timestamps. 45 * Andrey Savochkin: Check sequence numbers correctly when 46 * removing SACKs due to in sequence incoming 47 * data segments. 48 * Andi Kleen: Make sure we never ack data there is not 49 * enough room for. Also make this condition 50 * a fatal error if it might still happen. 51 * Andi Kleen: Add tcp_measure_rcv_mss to make 52 * connections with MSS<min(MTU,ann. MSS) 53 * work without delayed acks. 54 * Andi Kleen: Process packets with PSH set in the 55 * fast path. 56 * J Hadi Salim: ECN support 57 * Andrei Gurtov, 58 * Pasi Sarolahti, 59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 60 * engine. Lots of bugs are found. 61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 62 */ 63 64 #define pr_fmt(fmt) "TCP: " fmt 65 66 #include <linux/mm.h> 67 #include <linux/slab.h> 68 #include <linux/module.h> 69 #include <linux/sysctl.h> 70 #include <linux/kernel.h> 71 #include <net/dst.h> 72 #include <net/tcp.h> 73 #include <net/inet_common.h> 74 #include <linux/ipsec.h> 75 #include <asm/unaligned.h> 76 #include <net/netdma.h> 77 78 int sysctl_tcp_timestamps __read_mostly = 1; 79 int sysctl_tcp_window_scaling __read_mostly = 1; 80 int sysctl_tcp_sack __read_mostly = 1; 81 int sysctl_tcp_fack __read_mostly = 1; 82 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 83 EXPORT_SYMBOL(sysctl_tcp_reordering); 84 int sysctl_tcp_dsack __read_mostly = 1; 85 int sysctl_tcp_app_win __read_mostly = 31; 86 int sysctl_tcp_adv_win_scale __read_mostly = 1; 87 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 88 89 /* rfc5961 challenge ack rate limiting */ 90 int sysctl_tcp_challenge_ack_limit = 100; 91 92 int sysctl_tcp_stdurg __read_mostly; 93 int sysctl_tcp_rfc1337 __read_mostly; 94 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 95 int sysctl_tcp_frto __read_mostly = 2; 96 97 int sysctl_tcp_thin_dupack __read_mostly; 98 99 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100 int sysctl_tcp_early_retrans __read_mostly = 3; 101 102 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 103 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 104 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 105 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 106 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 107 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 108 #define FLAG_ECE 0x40 /* ECE in this ACK */ 109 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 110 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 111 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 112 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 113 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 114 115 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 116 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 117 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 118 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 119 120 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 121 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 122 123 /* Adapt the MSS value used to make delayed ack decision to the 124 * real world. 125 */ 126 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 127 { 128 struct inet_connection_sock *icsk = inet_csk(sk); 129 const unsigned int lss = icsk->icsk_ack.last_seg_size; 130 unsigned int len; 131 132 icsk->icsk_ack.last_seg_size = 0; 133 134 /* skb->len may jitter because of SACKs, even if peer 135 * sends good full-sized frames. 136 */ 137 len = skb_shinfo(skb)->gso_size ? : skb->len; 138 if (len >= icsk->icsk_ack.rcv_mss) { 139 icsk->icsk_ack.rcv_mss = len; 140 } else { 141 /* Otherwise, we make more careful check taking into account, 142 * that SACKs block is variable. 143 * 144 * "len" is invariant segment length, including TCP header. 145 */ 146 len += skb->data - skb_transport_header(skb); 147 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || 148 /* If PSH is not set, packet should be 149 * full sized, provided peer TCP is not badly broken. 150 * This observation (if it is correct 8)) allows 151 * to handle super-low mtu links fairly. 152 */ 153 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 154 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 155 /* Subtract also invariant (if peer is RFC compliant), 156 * tcp header plus fixed timestamp option length. 157 * Resulting "len" is MSS free of SACK jitter. 158 */ 159 len -= tcp_sk(sk)->tcp_header_len; 160 icsk->icsk_ack.last_seg_size = len; 161 if (len == lss) { 162 icsk->icsk_ack.rcv_mss = len; 163 return; 164 } 165 } 166 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 167 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 168 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 169 } 170 } 171 172 static void tcp_incr_quickack(struct sock *sk) 173 { 174 struct inet_connection_sock *icsk = inet_csk(sk); 175 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 176 177 if (quickacks == 0) 178 quickacks = 2; 179 if (quickacks > icsk->icsk_ack.quick) 180 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 181 } 182 183 static void tcp_enter_quickack_mode(struct sock *sk) 184 { 185 struct inet_connection_sock *icsk = inet_csk(sk); 186 tcp_incr_quickack(sk); 187 icsk->icsk_ack.pingpong = 0; 188 icsk->icsk_ack.ato = TCP_ATO_MIN; 189 } 190 191 /* Send ACKs quickly, if "quick" count is not exhausted 192 * and the session is not interactive. 193 */ 194 195 static inline bool tcp_in_quickack_mode(const struct sock *sk) 196 { 197 const struct inet_connection_sock *icsk = inet_csk(sk); 198 199 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 200 } 201 202 static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) 203 { 204 if (tp->ecn_flags & TCP_ECN_OK) 205 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 206 } 207 208 static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 209 { 210 if (tcp_hdr(skb)->cwr) 211 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 212 } 213 214 static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) 215 { 216 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 217 } 218 219 static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 220 { 221 if (!(tp->ecn_flags & TCP_ECN_OK)) 222 return; 223 224 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 225 case INET_ECN_NOT_ECT: 226 /* Funny extension: if ECT is not set on a segment, 227 * and we already seen ECT on a previous segment, 228 * it is probably a retransmit. 229 */ 230 if (tp->ecn_flags & TCP_ECN_SEEN) 231 tcp_enter_quickack_mode((struct sock *)tp); 232 break; 233 case INET_ECN_CE: 234 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { 235 /* Better not delay acks, sender can have a very low cwnd */ 236 tcp_enter_quickack_mode((struct sock *)tp); 237 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 238 } 239 /* fallinto */ 240 default: 241 tp->ecn_flags |= TCP_ECN_SEEN; 242 } 243 } 244 245 static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 246 { 247 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 248 tp->ecn_flags &= ~TCP_ECN_OK; 249 } 250 251 static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 252 { 253 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 254 tp->ecn_flags &= ~TCP_ECN_OK; 255 } 256 257 static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 258 { 259 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 260 return true; 261 return false; 262 } 263 264 /* Buffer size and advertised window tuning. 265 * 266 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 267 */ 268 269 static void tcp_fixup_sndbuf(struct sock *sk) 270 { 271 int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER); 272 273 sndmem *= TCP_INIT_CWND; 274 if (sk->sk_sndbuf < sndmem) 275 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 276 } 277 278 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 279 * 280 * All tcp_full_space() is split to two parts: "network" buffer, allocated 281 * forward and advertised in receiver window (tp->rcv_wnd) and 282 * "application buffer", required to isolate scheduling/application 283 * latencies from network. 284 * window_clamp is maximal advertised window. It can be less than 285 * tcp_full_space(), in this case tcp_full_space() - window_clamp 286 * is reserved for "application" buffer. The less window_clamp is 287 * the smoother our behaviour from viewpoint of network, but the lower 288 * throughput and the higher sensitivity of the connection to losses. 8) 289 * 290 * rcv_ssthresh is more strict window_clamp used at "slow start" 291 * phase to predict further behaviour of this connection. 292 * It is used for two goals: 293 * - to enforce header prediction at sender, even when application 294 * requires some significant "application buffer". It is check #1. 295 * - to prevent pruning of receive queue because of misprediction 296 * of receiver window. Check #2. 297 * 298 * The scheme does not work when sender sends good segments opening 299 * window and then starts to feed us spaghetti. But it should work 300 * in common situations. Otherwise, we have to rely on queue collapsing. 301 */ 302 303 /* Slow part of check#2. */ 304 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 305 { 306 struct tcp_sock *tp = tcp_sk(sk); 307 /* Optimize this! */ 308 int truesize = tcp_win_from_space(skb->truesize) >> 1; 309 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; 310 311 while (tp->rcv_ssthresh <= window) { 312 if (truesize <= skb->len) 313 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 314 315 truesize >>= 1; 316 window >>= 1; 317 } 318 return 0; 319 } 320 321 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 322 { 323 struct tcp_sock *tp = tcp_sk(sk); 324 325 /* Check #1 */ 326 if (tp->rcv_ssthresh < tp->window_clamp && 327 (int)tp->rcv_ssthresh < tcp_space(sk) && 328 !sk_under_memory_pressure(sk)) { 329 int incr; 330 331 /* Check #2. Increase window, if skb with such overhead 332 * will fit to rcvbuf in future. 333 */ 334 if (tcp_win_from_space(skb->truesize) <= skb->len) 335 incr = 2 * tp->advmss; 336 else 337 incr = __tcp_grow_window(sk, skb); 338 339 if (incr) { 340 incr = max_t(int, incr, 2 * skb->len); 341 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 342 tp->window_clamp); 343 inet_csk(sk)->icsk_ack.quick |= 1; 344 } 345 } 346 } 347 348 /* 3. Tuning rcvbuf, when connection enters established state. */ 349 350 static void tcp_fixup_rcvbuf(struct sock *sk) 351 { 352 u32 mss = tcp_sk(sk)->advmss; 353 u32 icwnd = TCP_DEFAULT_INIT_RCVWND; 354 int rcvmem; 355 356 /* Limit to 10 segments if mss <= 1460, 357 * or 14600/mss segments, with a minimum of two segments. 358 */ 359 if (mss > 1460) 360 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 361 362 rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); 363 while (tcp_win_from_space(rcvmem) < mss) 364 rcvmem += 128; 365 366 rcvmem *= icwnd; 367 368 if (sk->sk_rcvbuf < rcvmem) 369 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); 370 } 371 372 /* 4. Try to fixup all. It is made immediately after connection enters 373 * established state. 374 */ 375 void tcp_init_buffer_space(struct sock *sk) 376 { 377 struct tcp_sock *tp = tcp_sk(sk); 378 int maxwin; 379 380 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 381 tcp_fixup_rcvbuf(sk); 382 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 383 tcp_fixup_sndbuf(sk); 384 385 tp->rcvq_space.space = tp->rcv_wnd; 386 387 maxwin = tcp_full_space(sk); 388 389 if (tp->window_clamp >= maxwin) { 390 tp->window_clamp = maxwin; 391 392 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) 393 tp->window_clamp = max(maxwin - 394 (maxwin >> sysctl_tcp_app_win), 395 4 * tp->advmss); 396 } 397 398 /* Force reservation of one segment. */ 399 if (sysctl_tcp_app_win && 400 tp->window_clamp > 2 * tp->advmss && 401 tp->window_clamp + tp->advmss > maxwin) 402 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 403 404 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 405 tp->snd_cwnd_stamp = tcp_time_stamp; 406 } 407 408 /* 5. Recalculate window clamp after socket hit its memory bounds. */ 409 static void tcp_clamp_window(struct sock *sk) 410 { 411 struct tcp_sock *tp = tcp_sk(sk); 412 struct inet_connection_sock *icsk = inet_csk(sk); 413 414 icsk->icsk_ack.quick = 0; 415 416 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 417 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 418 !sk_under_memory_pressure(sk) && 419 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { 420 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 421 sysctl_tcp_rmem[2]); 422 } 423 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 424 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 425 } 426 427 /* Initialize RCV_MSS value. 428 * RCV_MSS is an our guess about MSS used by the peer. 429 * We haven't any direct information about the MSS. 430 * It's better to underestimate the RCV_MSS rather than overestimate. 431 * Overestimations make us ACKing less frequently than needed. 432 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 433 */ 434 void tcp_initialize_rcv_mss(struct sock *sk) 435 { 436 const struct tcp_sock *tp = tcp_sk(sk); 437 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 438 439 hint = min(hint, tp->rcv_wnd / 2); 440 hint = min(hint, TCP_MSS_DEFAULT); 441 hint = max(hint, TCP_MIN_MSS); 442 443 inet_csk(sk)->icsk_ack.rcv_mss = hint; 444 } 445 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 446 447 /* Receiver "autotuning" code. 448 * 449 * The algorithm for RTT estimation w/o timestamps is based on 450 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 451 * <http://public.lanl.gov/radiant/pubs.html#DRS> 452 * 453 * More detail on this code can be found at 454 * <http://staff.psc.edu/jheffner/>, 455 * though this reference is out of date. A new paper 456 * is pending. 457 */ 458 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 459 { 460 u32 new_sample = tp->rcv_rtt_est.rtt; 461 long m = sample; 462 463 if (m == 0) 464 m = 1; 465 466 if (new_sample != 0) { 467 /* If we sample in larger samples in the non-timestamp 468 * case, we could grossly overestimate the RTT especially 469 * with chatty applications or bulk transfer apps which 470 * are stalled on filesystem I/O. 471 * 472 * Also, since we are only going for a minimum in the 473 * non-timestamp case, we do not smooth things out 474 * else with timestamps disabled convergence takes too 475 * long. 476 */ 477 if (!win_dep) { 478 m -= (new_sample >> 3); 479 new_sample += m; 480 } else { 481 m <<= 3; 482 if (m < new_sample) 483 new_sample = m; 484 } 485 } else { 486 /* No previous measure. */ 487 new_sample = m << 3; 488 } 489 490 if (tp->rcv_rtt_est.rtt != new_sample) 491 tp->rcv_rtt_est.rtt = new_sample; 492 } 493 494 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 495 { 496 if (tp->rcv_rtt_est.time == 0) 497 goto new_measure; 498 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 499 return; 500 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); 501 502 new_measure: 503 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 504 tp->rcv_rtt_est.time = tcp_time_stamp; 505 } 506 507 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 508 const struct sk_buff *skb) 509 { 510 struct tcp_sock *tp = tcp_sk(sk); 511 if (tp->rx_opt.rcv_tsecr && 512 (TCP_SKB_CB(skb)->end_seq - 513 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 514 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 515 } 516 517 /* 518 * This function should be called every time data is copied to user space. 519 * It calculates the appropriate TCP receive buffer space. 520 */ 521 void tcp_rcv_space_adjust(struct sock *sk) 522 { 523 struct tcp_sock *tp = tcp_sk(sk); 524 int time; 525 int space; 526 527 if (tp->rcvq_space.time == 0) 528 goto new_measure; 529 530 time = tcp_time_stamp - tp->rcvq_space.time; 531 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) 532 return; 533 534 space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 535 536 space = max(tp->rcvq_space.space, space); 537 538 if (tp->rcvq_space.space != space) { 539 int rcvmem; 540 541 tp->rcvq_space.space = space; 542 543 if (sysctl_tcp_moderate_rcvbuf && 544 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 545 int new_clamp = space; 546 547 /* Receive space grows, normalize in order to 548 * take into account packet headers and sk_buff 549 * structure overhead. 550 */ 551 space /= tp->advmss; 552 if (!space) 553 space = 1; 554 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); 555 while (tcp_win_from_space(rcvmem) < tp->advmss) 556 rcvmem += 128; 557 space *= rcvmem; 558 space = min(space, sysctl_tcp_rmem[2]); 559 if (space > sk->sk_rcvbuf) { 560 sk->sk_rcvbuf = space; 561 562 /* Make the window clamp follow along. */ 563 tp->window_clamp = new_clamp; 564 } 565 } 566 } 567 568 new_measure: 569 tp->rcvq_space.seq = tp->copied_seq; 570 tp->rcvq_space.time = tcp_time_stamp; 571 } 572 573 /* There is something which you must keep in mind when you analyze the 574 * behavior of the tp->ato delayed ack timeout interval. When a 575 * connection starts up, we want to ack as quickly as possible. The 576 * problem is that "good" TCP's do slow start at the beginning of data 577 * transmission. The means that until we send the first few ACK's the 578 * sender will sit on his end and only queue most of his data, because 579 * he can only send snd_cwnd unacked packets at any given time. For 580 * each ACK we send, he increments snd_cwnd and transmits more of his 581 * queue. -DaveM 582 */ 583 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 584 { 585 struct tcp_sock *tp = tcp_sk(sk); 586 struct inet_connection_sock *icsk = inet_csk(sk); 587 u32 now; 588 589 inet_csk_schedule_ack(sk); 590 591 tcp_measure_rcv_mss(sk, skb); 592 593 tcp_rcv_rtt_measure(tp); 594 595 now = tcp_time_stamp; 596 597 if (!icsk->icsk_ack.ato) { 598 /* The _first_ data packet received, initialize 599 * delayed ACK engine. 600 */ 601 tcp_incr_quickack(sk); 602 icsk->icsk_ack.ato = TCP_ATO_MIN; 603 } else { 604 int m = now - icsk->icsk_ack.lrcvtime; 605 606 if (m <= TCP_ATO_MIN / 2) { 607 /* The fastest case is the first. */ 608 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 609 } else if (m < icsk->icsk_ack.ato) { 610 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 611 if (icsk->icsk_ack.ato > icsk->icsk_rto) 612 icsk->icsk_ack.ato = icsk->icsk_rto; 613 } else if (m > icsk->icsk_rto) { 614 /* Too long gap. Apparently sender failed to 615 * restart window, so that we send ACKs quickly. 616 */ 617 tcp_incr_quickack(sk); 618 sk_mem_reclaim(sk); 619 } 620 } 621 icsk->icsk_ack.lrcvtime = now; 622 623 TCP_ECN_check_ce(tp, skb); 624 625 if (skb->len >= 128) 626 tcp_grow_window(sk, skb); 627 } 628 629 /* Called to compute a smoothed rtt estimate. The data fed to this 630 * routine either comes from timestamps, or from segments that were 631 * known _not_ to have been retransmitted [see Karn/Partridge 632 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 633 * piece by Van Jacobson. 634 * NOTE: the next three routines used to be one big routine. 635 * To save cycles in the RFC 1323 implementation it was better to break 636 * it up into three procedures. -- erics 637 */ 638 static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 639 { 640 struct tcp_sock *tp = tcp_sk(sk); 641 long m = mrtt; /* RTT */ 642 643 /* The following amusing code comes from Jacobson's 644 * article in SIGCOMM '88. Note that rtt and mdev 645 * are scaled versions of rtt and mean deviation. 646 * This is designed to be as fast as possible 647 * m stands for "measurement". 648 * 649 * On a 1990 paper the rto value is changed to: 650 * RTO = rtt + 4 * mdev 651 * 652 * Funny. This algorithm seems to be very broken. 653 * These formulae increase RTO, when it should be decreased, increase 654 * too slowly, when it should be increased quickly, decrease too quickly 655 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 656 * does not matter how to _calculate_ it. Seems, it was trap 657 * that VJ failed to avoid. 8) 658 */ 659 if (m == 0) 660 m = 1; 661 if (tp->srtt != 0) { 662 m -= (tp->srtt >> 3); /* m is now error in rtt est */ 663 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 664 if (m < 0) { 665 m = -m; /* m is now abs(error) */ 666 m -= (tp->mdev >> 2); /* similar update on mdev */ 667 /* This is similar to one of Eifel findings. 668 * Eifel blocks mdev updates when rtt decreases. 669 * This solution is a bit different: we use finer gain 670 * for mdev in this case (alpha*beta). 671 * Like Eifel it also prevents growth of rto, 672 * but also it limits too fast rto decreases, 673 * happening in pure Eifel. 674 */ 675 if (m > 0) 676 m >>= 3; 677 } else { 678 m -= (tp->mdev >> 2); /* similar update on mdev */ 679 } 680 tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 681 if (tp->mdev > tp->mdev_max) { 682 tp->mdev_max = tp->mdev; 683 if (tp->mdev_max > tp->rttvar) 684 tp->rttvar = tp->mdev_max; 685 } 686 if (after(tp->snd_una, tp->rtt_seq)) { 687 if (tp->mdev_max < tp->rttvar) 688 tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; 689 tp->rtt_seq = tp->snd_nxt; 690 tp->mdev_max = tcp_rto_min(sk); 691 } 692 } else { 693 /* no previous measure. */ 694 tp->srtt = m << 3; /* take the measured time to be rtt */ 695 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 696 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 697 tp->rtt_seq = tp->snd_nxt; 698 } 699 } 700 701 /* Calculate rto without backoff. This is the second half of Van Jacobson's 702 * routine referred to above. 703 */ 704 void tcp_set_rto(struct sock *sk) 705 { 706 const struct tcp_sock *tp = tcp_sk(sk); 707 /* Old crap is replaced with new one. 8) 708 * 709 * More seriously: 710 * 1. If rtt variance happened to be less 50msec, it is hallucination. 711 * It cannot be less due to utterly erratic ACK generation made 712 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 713 * to do with delayed acks, because at cwnd>2 true delack timeout 714 * is invisible. Actually, Linux-2.4 also generates erratic 715 * ACKs in some circumstances. 716 */ 717 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); 718 719 /* 2. Fixups made earlier cannot be right. 720 * If we do not estimate RTO correctly without them, 721 * all the algo is pure shit and should be replaced 722 * with correct one. It is exactly, which we pretend to do. 723 */ 724 725 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 726 * guarantees that rto is higher. 727 */ 728 tcp_bound_rto(sk); 729 } 730 731 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 732 { 733 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 734 735 if (!cwnd) 736 cwnd = TCP_INIT_CWND; 737 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 738 } 739 740 /* 741 * Packet counting of FACK is based on in-order assumptions, therefore TCP 742 * disables it when reordering is detected 743 */ 744 void tcp_disable_fack(struct tcp_sock *tp) 745 { 746 /* RFC3517 uses different metric in lost marker => reset on change */ 747 if (tcp_is_fack(tp)) 748 tp->lost_skb_hint = NULL; 749 tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; 750 } 751 752 /* Take a notice that peer is sending D-SACKs */ 753 static void tcp_dsack_seen(struct tcp_sock *tp) 754 { 755 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 756 } 757 758 static void tcp_update_reordering(struct sock *sk, const int metric, 759 const int ts) 760 { 761 struct tcp_sock *tp = tcp_sk(sk); 762 if (metric > tp->reordering) { 763 int mib_idx; 764 765 tp->reordering = min(TCP_MAX_REORDERING, metric); 766 767 /* This exciting event is worth to be remembered. 8) */ 768 if (ts) 769 mib_idx = LINUX_MIB_TCPTSREORDER; 770 else if (tcp_is_reno(tp)) 771 mib_idx = LINUX_MIB_TCPRENOREORDER; 772 else if (tcp_is_fack(tp)) 773 mib_idx = LINUX_MIB_TCPFACKREORDER; 774 else 775 mib_idx = LINUX_MIB_TCPSACKREORDER; 776 777 NET_INC_STATS_BH(sock_net(sk), mib_idx); 778 #if FASTRETRANS_DEBUG > 1 779 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 780 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 781 tp->reordering, 782 tp->fackets_out, 783 tp->sacked_out, 784 tp->undo_marker ? tp->undo_retrans : 0); 785 #endif 786 tcp_disable_fack(tp); 787 } 788 789 if (metric > 0) 790 tcp_disable_early_retrans(tp); 791 } 792 793 /* This must be called before lost_out is incremented */ 794 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 795 { 796 if ((tp->retransmit_skb_hint == NULL) || 797 before(TCP_SKB_CB(skb)->seq, 798 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 799 tp->retransmit_skb_hint = skb; 800 801 if (!tp->lost_out || 802 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) 803 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 804 } 805 806 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) 807 { 808 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 809 tcp_verify_retransmit_hint(tp, skb); 810 811 tp->lost_out += tcp_skb_pcount(skb); 812 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 813 } 814 } 815 816 static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, 817 struct sk_buff *skb) 818 { 819 tcp_verify_retransmit_hint(tp, skb); 820 821 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 822 tp->lost_out += tcp_skb_pcount(skb); 823 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 824 } 825 } 826 827 /* This procedure tags the retransmission queue when SACKs arrive. 828 * 829 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 830 * Packets in queue with these bits set are counted in variables 831 * sacked_out, retrans_out and lost_out, correspondingly. 832 * 833 * Valid combinations are: 834 * Tag InFlight Description 835 * 0 1 - orig segment is in flight. 836 * S 0 - nothing flies, orig reached receiver. 837 * L 0 - nothing flies, orig lost by net. 838 * R 2 - both orig and retransmit are in flight. 839 * L|R 1 - orig is lost, retransmit is in flight. 840 * S|R 1 - orig reached receiver, retrans is still in flight. 841 * (L|S|R is logically valid, it could occur when L|R is sacked, 842 * but it is equivalent to plain S and code short-curcuits it to S. 843 * L|S is logically invalid, it would mean -1 packet in flight 8)) 844 * 845 * These 6 states form finite state machine, controlled by the following events: 846 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 847 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 848 * 3. Loss detection event of two flavors: 849 * A. Scoreboard estimator decided the packet is lost. 850 * A'. Reno "three dupacks" marks head of queue lost. 851 * A''. Its FACK modification, head until snd.fack is lost. 852 * B. SACK arrives sacking SND.NXT at the moment, when the 853 * segment was retransmitted. 854 * 4. D-SACK added new rule: D-SACK changes any tag to S. 855 * 856 * It is pleasant to note, that state diagram turns out to be commutative, 857 * so that we are allowed not to be bothered by order of our actions, 858 * when multiple events arrive simultaneously. (see the function below). 859 * 860 * Reordering detection. 861 * -------------------- 862 * Reordering metric is maximal distance, which a packet can be displaced 863 * in packet stream. With SACKs we can estimate it: 864 * 865 * 1. SACK fills old hole and the corresponding segment was not 866 * ever retransmitted -> reordering. Alas, we cannot use it 867 * when segment was retransmitted. 868 * 2. The last flaw is solved with D-SACK. D-SACK arrives 869 * for retransmitted and already SACKed segment -> reordering.. 870 * Both of these heuristics are not used in Loss state, when we cannot 871 * account for retransmits accurately. 872 * 873 * SACK block validation. 874 * ---------------------- 875 * 876 * SACK block range validation checks that the received SACK block fits to 877 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 878 * Note that SND.UNA is not included to the range though being valid because 879 * it means that the receiver is rather inconsistent with itself reporting 880 * SACK reneging when it should advance SND.UNA. Such SACK block this is 881 * perfectly valid, however, in light of RFC2018 which explicitly states 882 * that "SACK block MUST reflect the newest segment. Even if the newest 883 * segment is going to be discarded ...", not that it looks very clever 884 * in case of head skb. Due to potentional receiver driven attacks, we 885 * choose to avoid immediate execution of a walk in write queue due to 886 * reneging and defer head skb's loss recovery to standard loss recovery 887 * procedure that will eventually trigger (nothing forbids us doing this). 888 * 889 * Implements also blockage to start_seq wrap-around. Problem lies in the 890 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 891 * there's no guarantee that it will be before snd_nxt (n). The problem 892 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 893 * wrap (s_w): 894 * 895 * <- outs wnd -> <- wrapzone -> 896 * u e n u_w e_w s n_w 897 * | | | | | | | 898 * |<------------+------+----- TCP seqno space --------------+---------->| 899 * ...-- <2^31 ->| |<--------... 900 * ...---- >2^31 ------>| |<--------... 901 * 902 * Current code wouldn't be vulnerable but it's better still to discard such 903 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 904 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 905 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 906 * equal to the ideal case (infinite seqno space without wrap caused issues). 907 * 908 * With D-SACK the lower bound is extended to cover sequence space below 909 * SND.UNA down to undo_marker, which is the last point of interest. Yet 910 * again, D-SACK block must not to go across snd_una (for the same reason as 911 * for the normal SACK blocks, explained above). But there all simplicity 912 * ends, TCP might receive valid D-SACKs below that. As long as they reside 913 * fully below undo_marker they do not affect behavior in anyway and can 914 * therefore be safely ignored. In rare cases (which are more or less 915 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 916 * fragmentation and packet reordering past skb's retransmission. To consider 917 * them correctly, the acceptable range must be extended even more though 918 * the exact amount is rather hard to quantify. However, tp->max_window can 919 * be used as an exaggerated estimate. 920 */ 921 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, 922 u32 start_seq, u32 end_seq) 923 { 924 /* Too far in future, or reversed (interpretation is ambiguous) */ 925 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 926 return false; 927 928 /* Nasty start_seq wrap-around check (see comments above) */ 929 if (!before(start_seq, tp->snd_nxt)) 930 return false; 931 932 /* In outstanding window? ...This is valid exit for D-SACKs too. 933 * start_seq == snd_una is non-sensical (see comments above) 934 */ 935 if (after(start_seq, tp->snd_una)) 936 return true; 937 938 if (!is_dsack || !tp->undo_marker) 939 return false; 940 941 /* ...Then it's D-SACK, and must reside below snd_una completely */ 942 if (after(end_seq, tp->snd_una)) 943 return false; 944 945 if (!before(start_seq, tp->undo_marker)) 946 return true; 947 948 /* Too old */ 949 if (!after(end_seq, tp->undo_marker)) 950 return false; 951 952 /* Undo_marker boundary crossing (overestimates a lot). Known already: 953 * start_seq < undo_marker and end_seq >= undo_marker. 954 */ 955 return !before(start_seq, end_seq - tp->max_window); 956 } 957 958 /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 959 * Event "B". Later note: FACK people cheated me again 8), we have to account 960 * for reordering! Ugly, but should help. 961 * 962 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 963 * less than what is now known to be received by the other end (derived from 964 * highest SACK block). Also calculate the lowest snd_nxt among the remaining 965 * retransmitted skbs to avoid some costly processing per ACKs. 966 */ 967 static void tcp_mark_lost_retrans(struct sock *sk) 968 { 969 const struct inet_connection_sock *icsk = inet_csk(sk); 970 struct tcp_sock *tp = tcp_sk(sk); 971 struct sk_buff *skb; 972 int cnt = 0; 973 u32 new_low_seq = tp->snd_nxt; 974 u32 received_upto = tcp_highest_sack_seq(tp); 975 976 if (!tcp_is_fack(tp) || !tp->retrans_out || 977 !after(received_upto, tp->lost_retrans_low) || 978 icsk->icsk_ca_state != TCP_CA_Recovery) 979 return; 980 981 tcp_for_write_queue(skb, sk) { 982 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; 983 984 if (skb == tcp_send_head(sk)) 985 break; 986 if (cnt == tp->retrans_out) 987 break; 988 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 989 continue; 990 991 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) 992 continue; 993 994 /* TODO: We would like to get rid of tcp_is_fack(tp) only 995 * constraint here (see above) but figuring out that at 996 * least tp->reordering SACK blocks reside between ack_seq 997 * and received_upto is not easy task to do cheaply with 998 * the available datastructures. 999 * 1000 * Whether FACK should check here for tp->reordering segs 1001 * in-between one could argue for either way (it would be 1002 * rather simple to implement as we could count fack_count 1003 * during the walk and do tp->fackets_out - fack_count). 1004 */ 1005 if (after(received_upto, ack_seq)) { 1006 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1007 tp->retrans_out -= tcp_skb_pcount(skb); 1008 1009 tcp_skb_mark_lost_uncond_verify(tp, skb); 1010 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 1011 } else { 1012 if (before(ack_seq, new_low_seq)) 1013 new_low_seq = ack_seq; 1014 cnt += tcp_skb_pcount(skb); 1015 } 1016 } 1017 1018 if (tp->retrans_out) 1019 tp->lost_retrans_low = new_low_seq; 1020 } 1021 1022 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1023 struct tcp_sack_block_wire *sp, int num_sacks, 1024 u32 prior_snd_una) 1025 { 1026 struct tcp_sock *tp = tcp_sk(sk); 1027 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1028 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1029 bool dup_sack = false; 1030 1031 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1032 dup_sack = true; 1033 tcp_dsack_seen(tp); 1034 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1035 } else if (num_sacks > 1) { 1036 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1037 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1038 1039 if (!after(end_seq_0, end_seq_1) && 1040 !before(start_seq_0, start_seq_1)) { 1041 dup_sack = true; 1042 tcp_dsack_seen(tp); 1043 NET_INC_STATS_BH(sock_net(sk), 1044 LINUX_MIB_TCPDSACKOFORECV); 1045 } 1046 } 1047 1048 /* D-SACK for already forgotten data... Do dumb counting. */ 1049 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1050 !after(end_seq_0, prior_snd_una) && 1051 after(end_seq_0, tp->undo_marker)) 1052 tp->undo_retrans--; 1053 1054 return dup_sack; 1055 } 1056 1057 struct tcp_sacktag_state { 1058 int reord; 1059 int fack_count; 1060 int flag; 1061 }; 1062 1063 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1064 * the incoming SACK may not exactly match but we can find smaller MSS 1065 * aligned portion of it that matches. Therefore we might need to fragment 1066 * which may fail and creates some hassle (caller must handle error case 1067 * returns). 1068 * 1069 * FIXME: this could be merged to shift decision code 1070 */ 1071 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1072 u32 start_seq, u32 end_seq) 1073 { 1074 int err; 1075 bool in_sack; 1076 unsigned int pkt_len; 1077 unsigned int mss; 1078 1079 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1080 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1081 1082 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1083 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1084 mss = tcp_skb_mss(skb); 1085 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1086 1087 if (!in_sack) { 1088 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1089 if (pkt_len < mss) 1090 pkt_len = mss; 1091 } else { 1092 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1093 if (pkt_len < mss) 1094 return -EINVAL; 1095 } 1096 1097 /* Round if necessary so that SACKs cover only full MSSes 1098 * and/or the remaining small portion (if present) 1099 */ 1100 if (pkt_len > mss) { 1101 unsigned int new_len = (pkt_len / mss) * mss; 1102 if (!in_sack && new_len < pkt_len) { 1103 new_len += mss; 1104 if (new_len > skb->len) 1105 return 0; 1106 } 1107 pkt_len = new_len; 1108 } 1109 err = tcp_fragment(sk, skb, pkt_len, mss); 1110 if (err < 0) 1111 return err; 1112 } 1113 1114 return in_sack; 1115 } 1116 1117 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ 1118 static u8 tcp_sacktag_one(struct sock *sk, 1119 struct tcp_sacktag_state *state, u8 sacked, 1120 u32 start_seq, u32 end_seq, 1121 bool dup_sack, int pcount) 1122 { 1123 struct tcp_sock *tp = tcp_sk(sk); 1124 int fack_count = state->fack_count; 1125 1126 /* Account D-SACK for retransmitted packet. */ 1127 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1128 if (tp->undo_marker && tp->undo_retrans && 1129 after(end_seq, tp->undo_marker)) 1130 tp->undo_retrans--; 1131 if (sacked & TCPCB_SACKED_ACKED) 1132 state->reord = min(fack_count, state->reord); 1133 } 1134 1135 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1136 if (!after(end_seq, tp->snd_una)) 1137 return sacked; 1138 1139 if (!(sacked & TCPCB_SACKED_ACKED)) { 1140 if (sacked & TCPCB_SACKED_RETRANS) { 1141 /* If the segment is not tagged as lost, 1142 * we do not clear RETRANS, believing 1143 * that retransmission is still in flight. 1144 */ 1145 if (sacked & TCPCB_LOST) { 1146 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1147 tp->lost_out -= pcount; 1148 tp->retrans_out -= pcount; 1149 } 1150 } else { 1151 if (!(sacked & TCPCB_RETRANS)) { 1152 /* New sack for not retransmitted frame, 1153 * which was in hole. It is reordering. 1154 */ 1155 if (before(start_seq, 1156 tcp_highest_sack_seq(tp))) 1157 state->reord = min(fack_count, 1158 state->reord); 1159 if (!after(end_seq, tp->high_seq)) 1160 state->flag |= FLAG_ORIG_SACK_ACKED; 1161 } 1162 1163 if (sacked & TCPCB_LOST) { 1164 sacked &= ~TCPCB_LOST; 1165 tp->lost_out -= pcount; 1166 } 1167 } 1168 1169 sacked |= TCPCB_SACKED_ACKED; 1170 state->flag |= FLAG_DATA_SACKED; 1171 tp->sacked_out += pcount; 1172 1173 fack_count += pcount; 1174 1175 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1176 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1177 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1178 tp->lost_cnt_hint += pcount; 1179 1180 if (fack_count > tp->fackets_out) 1181 tp->fackets_out = fack_count; 1182 } 1183 1184 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1185 * frames and clear it. undo_retrans is decreased above, L|R frames 1186 * are accounted above as well. 1187 */ 1188 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { 1189 sacked &= ~TCPCB_SACKED_RETRANS; 1190 tp->retrans_out -= pcount; 1191 } 1192 1193 return sacked; 1194 } 1195 1196 /* Shift newly-SACKed bytes from this skb to the immediately previous 1197 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1198 */ 1199 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1200 struct tcp_sacktag_state *state, 1201 unsigned int pcount, int shifted, int mss, 1202 bool dup_sack) 1203 { 1204 struct tcp_sock *tp = tcp_sk(sk); 1205 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1206 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1207 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1208 1209 BUG_ON(!pcount); 1210 1211 /* Adjust counters and hints for the newly sacked sequence 1212 * range but discard the return value since prev is already 1213 * marked. We must tag the range first because the seq 1214 * advancement below implicitly advances 1215 * tcp_highest_sack_seq() when skb is highest_sack. 1216 */ 1217 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1218 start_seq, end_seq, dup_sack, pcount); 1219 1220 if (skb == tp->lost_skb_hint) 1221 tp->lost_cnt_hint += pcount; 1222 1223 TCP_SKB_CB(prev)->end_seq += shifted; 1224 TCP_SKB_CB(skb)->seq += shifted; 1225 1226 skb_shinfo(prev)->gso_segs += pcount; 1227 BUG_ON(skb_shinfo(skb)->gso_segs < pcount); 1228 skb_shinfo(skb)->gso_segs -= pcount; 1229 1230 /* When we're adding to gso_segs == 1, gso_size will be zero, 1231 * in theory this shouldn't be necessary but as long as DSACK 1232 * code can come after this skb later on it's better to keep 1233 * setting gso_size to something. 1234 */ 1235 if (!skb_shinfo(prev)->gso_size) { 1236 skb_shinfo(prev)->gso_size = mss; 1237 skb_shinfo(prev)->gso_type = sk->sk_gso_type; 1238 } 1239 1240 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1241 if (skb_shinfo(skb)->gso_segs <= 1) { 1242 skb_shinfo(skb)->gso_size = 0; 1243 skb_shinfo(skb)->gso_type = 0; 1244 } 1245 1246 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1247 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1248 1249 if (skb->len > 0) { 1250 BUG_ON(!tcp_skb_pcount(skb)); 1251 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1252 return false; 1253 } 1254 1255 /* Whole SKB was eaten :-) */ 1256 1257 if (skb == tp->retransmit_skb_hint) 1258 tp->retransmit_skb_hint = prev; 1259 if (skb == tp->scoreboard_skb_hint) 1260 tp->scoreboard_skb_hint = prev; 1261 if (skb == tp->lost_skb_hint) { 1262 tp->lost_skb_hint = prev; 1263 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1264 } 1265 1266 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1267 if (skb == tcp_highest_sack(sk)) 1268 tcp_advance_highest_sack(sk, skb); 1269 1270 tcp_unlink_write_queue(skb, sk); 1271 sk_wmem_free_skb(sk, skb); 1272 1273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1274 1275 return true; 1276 } 1277 1278 /* I wish gso_size would have a bit more sane initialization than 1279 * something-or-zero which complicates things 1280 */ 1281 static int tcp_skb_seglen(const struct sk_buff *skb) 1282 { 1283 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1284 } 1285 1286 /* Shifting pages past head area doesn't work */ 1287 static int skb_can_shift(const struct sk_buff *skb) 1288 { 1289 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1290 } 1291 1292 /* Try collapsing SACK blocks spanning across multiple skbs to a single 1293 * skb. 1294 */ 1295 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1296 struct tcp_sacktag_state *state, 1297 u32 start_seq, u32 end_seq, 1298 bool dup_sack) 1299 { 1300 struct tcp_sock *tp = tcp_sk(sk); 1301 struct sk_buff *prev; 1302 int mss; 1303 int pcount = 0; 1304 int len; 1305 int in_sack; 1306 1307 if (!sk_can_gso(sk)) 1308 goto fallback; 1309 1310 /* Normally R but no L won't result in plain S */ 1311 if (!dup_sack && 1312 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) 1313 goto fallback; 1314 if (!skb_can_shift(skb)) 1315 goto fallback; 1316 /* This frame is about to be dropped (was ACKed). */ 1317 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1318 goto fallback; 1319 1320 /* Can only happen with delayed DSACK + discard craziness */ 1321 if (unlikely(skb == tcp_write_queue_head(sk))) 1322 goto fallback; 1323 prev = tcp_write_queue_prev(sk, skb); 1324 1325 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) 1326 goto fallback; 1327 1328 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1329 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1330 1331 if (in_sack) { 1332 len = skb->len; 1333 pcount = tcp_skb_pcount(skb); 1334 mss = tcp_skb_seglen(skb); 1335 1336 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1337 * drop this restriction as unnecessary 1338 */ 1339 if (mss != tcp_skb_seglen(prev)) 1340 goto fallback; 1341 } else { 1342 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) 1343 goto noop; 1344 /* CHECKME: This is non-MSS split case only?, this will 1345 * cause skipped skbs due to advancing loop btw, original 1346 * has that feature too 1347 */ 1348 if (tcp_skb_pcount(skb) <= 1) 1349 goto noop; 1350 1351 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1352 if (!in_sack) { 1353 /* TODO: head merge to next could be attempted here 1354 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), 1355 * though it might not be worth of the additional hassle 1356 * 1357 * ...we can probably just fallback to what was done 1358 * previously. We could try merging non-SACKed ones 1359 * as well but it probably isn't going to buy off 1360 * because later SACKs might again split them, and 1361 * it would make skb timestamp tracking considerably 1362 * harder problem. 1363 */ 1364 goto fallback; 1365 } 1366 1367 len = end_seq - TCP_SKB_CB(skb)->seq; 1368 BUG_ON(len < 0); 1369 BUG_ON(len > skb->len); 1370 1371 /* MSS boundaries should be honoured or else pcount will 1372 * severely break even though it makes things bit trickier. 1373 * Optimize common case to avoid most of the divides 1374 */ 1375 mss = tcp_skb_mss(skb); 1376 1377 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1378 * drop this restriction as unnecessary 1379 */ 1380 if (mss != tcp_skb_seglen(prev)) 1381 goto fallback; 1382 1383 if (len == mss) { 1384 pcount = 1; 1385 } else if (len < mss) { 1386 goto noop; 1387 } else { 1388 pcount = len / mss; 1389 len = pcount * mss; 1390 } 1391 } 1392 1393 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ 1394 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) 1395 goto fallback; 1396 1397 if (!skb_shift(prev, skb, len)) 1398 goto fallback; 1399 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) 1400 goto out; 1401 1402 /* Hole filled allows collapsing with the next as well, this is very 1403 * useful when hole on every nth skb pattern happens 1404 */ 1405 if (prev == tcp_write_queue_tail(sk)) 1406 goto out; 1407 skb = tcp_write_queue_next(sk, prev); 1408 1409 if (!skb_can_shift(skb) || 1410 (skb == tcp_send_head(sk)) || 1411 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || 1412 (mss != tcp_skb_seglen(skb))) 1413 goto out; 1414 1415 len = skb->len; 1416 if (skb_shift(prev, skb, len)) { 1417 pcount += tcp_skb_pcount(skb); 1418 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); 1419 } 1420 1421 out: 1422 state->fack_count += pcount; 1423 return prev; 1424 1425 noop: 1426 return skb; 1427 1428 fallback: 1429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1430 return NULL; 1431 } 1432 1433 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1434 struct tcp_sack_block *next_dup, 1435 struct tcp_sacktag_state *state, 1436 u32 start_seq, u32 end_seq, 1437 bool dup_sack_in) 1438 { 1439 struct tcp_sock *tp = tcp_sk(sk); 1440 struct sk_buff *tmp; 1441 1442 tcp_for_write_queue_from(skb, sk) { 1443 int in_sack = 0; 1444 bool dup_sack = dup_sack_in; 1445 1446 if (skb == tcp_send_head(sk)) 1447 break; 1448 1449 /* queue is in-order => we can short-circuit the walk early */ 1450 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1451 break; 1452 1453 if ((next_dup != NULL) && 1454 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1455 in_sack = tcp_match_skb_to_sack(sk, skb, 1456 next_dup->start_seq, 1457 next_dup->end_seq); 1458 if (in_sack > 0) 1459 dup_sack = true; 1460 } 1461 1462 /* skb reference here is a bit tricky to get right, since 1463 * shifting can eat and free both this skb and the next, 1464 * so not even _safe variant of the loop is enough. 1465 */ 1466 if (in_sack <= 0) { 1467 tmp = tcp_shift_skb_data(sk, skb, state, 1468 start_seq, end_seq, dup_sack); 1469 if (tmp != NULL) { 1470 if (tmp != skb) { 1471 skb = tmp; 1472 continue; 1473 } 1474 1475 in_sack = 0; 1476 } else { 1477 in_sack = tcp_match_skb_to_sack(sk, skb, 1478 start_seq, 1479 end_seq); 1480 } 1481 } 1482 1483 if (unlikely(in_sack < 0)) 1484 break; 1485 1486 if (in_sack) { 1487 TCP_SKB_CB(skb)->sacked = 1488 tcp_sacktag_one(sk, 1489 state, 1490 TCP_SKB_CB(skb)->sacked, 1491 TCP_SKB_CB(skb)->seq, 1492 TCP_SKB_CB(skb)->end_seq, 1493 dup_sack, 1494 tcp_skb_pcount(skb)); 1495 1496 if (!before(TCP_SKB_CB(skb)->seq, 1497 tcp_highest_sack_seq(tp))) 1498 tcp_advance_highest_sack(sk, skb); 1499 } 1500 1501 state->fack_count += tcp_skb_pcount(skb); 1502 } 1503 return skb; 1504 } 1505 1506 /* Avoid all extra work that is being done by sacktag while walking in 1507 * a normal way 1508 */ 1509 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1510 struct tcp_sacktag_state *state, 1511 u32 skip_to_seq) 1512 { 1513 tcp_for_write_queue_from(skb, sk) { 1514 if (skb == tcp_send_head(sk)) 1515 break; 1516 1517 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 1518 break; 1519 1520 state->fack_count += tcp_skb_pcount(skb); 1521 } 1522 return skb; 1523 } 1524 1525 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1526 struct sock *sk, 1527 struct tcp_sack_block *next_dup, 1528 struct tcp_sacktag_state *state, 1529 u32 skip_to_seq) 1530 { 1531 if (next_dup == NULL) 1532 return skb; 1533 1534 if (before(next_dup->start_seq, skip_to_seq)) { 1535 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); 1536 skb = tcp_sacktag_walk(skb, sk, NULL, state, 1537 next_dup->start_seq, next_dup->end_seq, 1538 1); 1539 } 1540 1541 return skb; 1542 } 1543 1544 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1545 { 1546 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1547 } 1548 1549 static int 1550 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1551 u32 prior_snd_una) 1552 { 1553 struct tcp_sock *tp = tcp_sk(sk); 1554 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1555 TCP_SKB_CB(ack_skb)->sacked); 1556 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1557 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1558 struct tcp_sack_block *cache; 1559 struct tcp_sacktag_state state; 1560 struct sk_buff *skb; 1561 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1562 int used_sacks; 1563 bool found_dup_sack = false; 1564 int i, j; 1565 int first_sack_index; 1566 1567 state.flag = 0; 1568 state.reord = tp->packets_out; 1569 1570 if (!tp->sacked_out) { 1571 if (WARN_ON(tp->fackets_out)) 1572 tp->fackets_out = 0; 1573 tcp_highest_sack_reset(sk); 1574 } 1575 1576 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1577 num_sacks, prior_snd_una); 1578 if (found_dup_sack) 1579 state.flag |= FLAG_DSACKING_ACK; 1580 1581 /* Eliminate too old ACKs, but take into 1582 * account more or less fresh ones, they can 1583 * contain valid SACK info. 1584 */ 1585 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1586 return 0; 1587 1588 if (!tp->packets_out) 1589 goto out; 1590 1591 used_sacks = 0; 1592 first_sack_index = 0; 1593 for (i = 0; i < num_sacks; i++) { 1594 bool dup_sack = !i && found_dup_sack; 1595 1596 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1597 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1598 1599 if (!tcp_is_sackblock_valid(tp, dup_sack, 1600 sp[used_sacks].start_seq, 1601 sp[used_sacks].end_seq)) { 1602 int mib_idx; 1603 1604 if (dup_sack) { 1605 if (!tp->undo_marker) 1606 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1607 else 1608 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1609 } else { 1610 /* Don't count olds caused by ACK reordering */ 1611 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1612 !after(sp[used_sacks].end_seq, tp->snd_una)) 1613 continue; 1614 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1615 } 1616 1617 NET_INC_STATS_BH(sock_net(sk), mib_idx); 1618 if (i == 0) 1619 first_sack_index = -1; 1620 continue; 1621 } 1622 1623 /* Ignore very old stuff early */ 1624 if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1625 continue; 1626 1627 used_sacks++; 1628 } 1629 1630 /* order SACK blocks to allow in order walk of the retrans queue */ 1631 for (i = used_sacks - 1; i > 0; i--) { 1632 for (j = 0; j < i; j++) { 1633 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1634 swap(sp[j], sp[j + 1]); 1635 1636 /* Track where the first SACK block goes to */ 1637 if (j == first_sack_index) 1638 first_sack_index = j + 1; 1639 } 1640 } 1641 } 1642 1643 skb = tcp_write_queue_head(sk); 1644 state.fack_count = 0; 1645 i = 0; 1646 1647 if (!tp->sacked_out) { 1648 /* It's already past, so skip checking against it */ 1649 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1650 } else { 1651 cache = tp->recv_sack_cache; 1652 /* Skip empty blocks in at head of the cache */ 1653 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1654 !cache->end_seq) 1655 cache++; 1656 } 1657 1658 while (i < used_sacks) { 1659 u32 start_seq = sp[i].start_seq; 1660 u32 end_seq = sp[i].end_seq; 1661 bool dup_sack = (found_dup_sack && (i == first_sack_index)); 1662 struct tcp_sack_block *next_dup = NULL; 1663 1664 if (found_dup_sack && ((i + 1) == first_sack_index)) 1665 next_dup = &sp[i + 1]; 1666 1667 /* Skip too early cached blocks */ 1668 while (tcp_sack_cache_ok(tp, cache) && 1669 !before(start_seq, cache->end_seq)) 1670 cache++; 1671 1672 /* Can skip some work by looking recv_sack_cache? */ 1673 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1674 after(end_seq, cache->start_seq)) { 1675 1676 /* Head todo? */ 1677 if (before(start_seq, cache->start_seq)) { 1678 skb = tcp_sacktag_skip(skb, sk, &state, 1679 start_seq); 1680 skb = tcp_sacktag_walk(skb, sk, next_dup, 1681 &state, 1682 start_seq, 1683 cache->start_seq, 1684 dup_sack); 1685 } 1686 1687 /* Rest of the block already fully processed? */ 1688 if (!after(end_seq, cache->end_seq)) 1689 goto advance_sp; 1690 1691 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1692 &state, 1693 cache->end_seq); 1694 1695 /* ...tail remains todo... */ 1696 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1697 /* ...but better entrypoint exists! */ 1698 skb = tcp_highest_sack(sk); 1699 if (skb == NULL) 1700 break; 1701 state.fack_count = tp->fackets_out; 1702 cache++; 1703 goto walk; 1704 } 1705 1706 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); 1707 /* Check overlap against next cached too (past this one already) */ 1708 cache++; 1709 continue; 1710 } 1711 1712 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1713 skb = tcp_highest_sack(sk); 1714 if (skb == NULL) 1715 break; 1716 state.fack_count = tp->fackets_out; 1717 } 1718 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); 1719 1720 walk: 1721 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, 1722 start_seq, end_seq, dup_sack); 1723 1724 advance_sp: 1725 i++; 1726 } 1727 1728 /* Clear the head of the cache sack blocks so we can skip it next time */ 1729 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 1730 tp->recv_sack_cache[i].start_seq = 0; 1731 tp->recv_sack_cache[i].end_seq = 0; 1732 } 1733 for (j = 0; j < used_sacks; j++) 1734 tp->recv_sack_cache[i++] = sp[j]; 1735 1736 tcp_mark_lost_retrans(sk); 1737 1738 tcp_verify_left_out(tp); 1739 1740 if ((state.reord < tp->fackets_out) && 1741 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) 1742 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); 1743 1744 out: 1745 1746 #if FASTRETRANS_DEBUG > 0 1747 WARN_ON((int)tp->sacked_out < 0); 1748 WARN_ON((int)tp->lost_out < 0); 1749 WARN_ON((int)tp->retrans_out < 0); 1750 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1751 #endif 1752 return state.flag; 1753 } 1754 1755 /* Limits sacked_out so that sum with lost_out isn't ever larger than 1756 * packets_out. Returns false if sacked_out adjustement wasn't necessary. 1757 */ 1758 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) 1759 { 1760 u32 holes; 1761 1762 holes = max(tp->lost_out, 1U); 1763 holes = min(holes, tp->packets_out); 1764 1765 if ((tp->sacked_out + holes) > tp->packets_out) { 1766 tp->sacked_out = tp->packets_out - holes; 1767 return true; 1768 } 1769 return false; 1770 } 1771 1772 /* If we receive more dupacks than we expected counting segments 1773 * in assumption of absent reordering, interpret this as reordering. 1774 * The only another reason could be bug in receiver TCP. 1775 */ 1776 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1777 { 1778 struct tcp_sock *tp = tcp_sk(sk); 1779 if (tcp_limit_reno_sacked(tp)) 1780 tcp_update_reordering(sk, tp->packets_out + addend, 0); 1781 } 1782 1783 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 1784 1785 static void tcp_add_reno_sack(struct sock *sk) 1786 { 1787 struct tcp_sock *tp = tcp_sk(sk); 1788 tp->sacked_out++; 1789 tcp_check_reno_reordering(sk, 0); 1790 tcp_verify_left_out(tp); 1791 } 1792 1793 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 1794 1795 static void tcp_remove_reno_sacks(struct sock *sk, int acked) 1796 { 1797 struct tcp_sock *tp = tcp_sk(sk); 1798 1799 if (acked > 0) { 1800 /* One ACK acked hole. The rest eat duplicate ACKs. */ 1801 if (acked - 1 >= tp->sacked_out) 1802 tp->sacked_out = 0; 1803 else 1804 tp->sacked_out -= acked - 1; 1805 } 1806 tcp_check_reno_reordering(sk, acked); 1807 tcp_verify_left_out(tp); 1808 } 1809 1810 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 1811 { 1812 tp->sacked_out = 0; 1813 } 1814 1815 static void tcp_clear_retrans_partial(struct tcp_sock *tp) 1816 { 1817 tp->retrans_out = 0; 1818 tp->lost_out = 0; 1819 1820 tp->undo_marker = 0; 1821 tp->undo_retrans = 0; 1822 } 1823 1824 void tcp_clear_retrans(struct tcp_sock *tp) 1825 { 1826 tcp_clear_retrans_partial(tp); 1827 1828 tp->fackets_out = 0; 1829 tp->sacked_out = 0; 1830 } 1831 1832 /* Enter Loss state. If "how" is not zero, forget all SACK information 1833 * and reset tags completely, otherwise preserve SACKs. If receiver 1834 * dropped its ofo queue, we will know this due to reneging detection. 1835 */ 1836 void tcp_enter_loss(struct sock *sk, int how) 1837 { 1838 const struct inet_connection_sock *icsk = inet_csk(sk); 1839 struct tcp_sock *tp = tcp_sk(sk); 1840 struct sk_buff *skb; 1841 bool new_recovery = false; 1842 1843 /* Reduce ssthresh if it has not yet been made inside this window. */ 1844 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1845 !after(tp->high_seq, tp->snd_una) || 1846 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1847 new_recovery = true; 1848 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1849 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1850 tcp_ca_event(sk, CA_EVENT_LOSS); 1851 } 1852 tp->snd_cwnd = 1; 1853 tp->snd_cwnd_cnt = 0; 1854 tp->snd_cwnd_stamp = tcp_time_stamp; 1855 1856 tcp_clear_retrans_partial(tp); 1857 1858 if (tcp_is_reno(tp)) 1859 tcp_reset_reno_sack(tp); 1860 1861 tp->undo_marker = tp->snd_una; 1862 if (how) { 1863 tp->sacked_out = 0; 1864 tp->fackets_out = 0; 1865 } 1866 tcp_clear_all_retrans_hints(tp); 1867 1868 tcp_for_write_queue(skb, sk) { 1869 if (skb == tcp_send_head(sk)) 1870 break; 1871 1872 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1873 tp->undo_marker = 0; 1874 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 1875 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 1876 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 1877 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1878 tp->lost_out += tcp_skb_pcount(skb); 1879 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 1880 } 1881 } 1882 tcp_verify_left_out(tp); 1883 1884 tp->reordering = min_t(unsigned int, tp->reordering, 1885 sysctl_tcp_reordering); 1886 tcp_set_ca_state(sk, TCP_CA_Loss); 1887 tp->high_seq = tp->snd_nxt; 1888 TCP_ECN_queue_cwr(tp); 1889 1890 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 1891 * loss recovery is underway except recurring timeout(s) on 1892 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing 1893 */ 1894 tp->frto = sysctl_tcp_frto && 1895 (new_recovery || icsk->icsk_retransmits) && 1896 !inet_csk(sk)->icsk_mtup.probe_size; 1897 } 1898 1899 /* If ACK arrived pointing to a remembered SACK, it means that our 1900 * remembered SACKs do not reflect real state of receiver i.e. 1901 * receiver _host_ is heavily congested (or buggy). 1902 * 1903 * Do processing similar to RTO timeout. 1904 */ 1905 static bool tcp_check_sack_reneging(struct sock *sk, int flag) 1906 { 1907 if (flag & FLAG_SACK_RENEGING) { 1908 struct inet_connection_sock *icsk = inet_csk(sk); 1909 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1910 1911 tcp_enter_loss(sk, 1); 1912 icsk->icsk_retransmits++; 1913 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 1914 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1915 icsk->icsk_rto, TCP_RTO_MAX); 1916 return true; 1917 } 1918 return false; 1919 } 1920 1921 static inline int tcp_fackets_out(const struct tcp_sock *tp) 1922 { 1923 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 1924 } 1925 1926 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 1927 * counter when SACK is enabled (without SACK, sacked_out is used for 1928 * that purpose). 1929 * 1930 * Instead, with FACK TCP uses fackets_out that includes both SACKed 1931 * segments up to the highest received SACK block so far and holes in 1932 * between them. 1933 * 1934 * With reordering, holes may still be in flight, so RFC3517 recovery 1935 * uses pure sacked_out (total number of SACKed segments) even though 1936 * it violates the RFC that uses duplicate ACKs, often these are equal 1937 * but when e.g. out-of-window ACKs or packet duplication occurs, 1938 * they differ. Since neither occurs due to loss, TCP should really 1939 * ignore them. 1940 */ 1941 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 1942 { 1943 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 1944 } 1945 1946 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) 1947 { 1948 struct tcp_sock *tp = tcp_sk(sk); 1949 unsigned long delay; 1950 1951 /* Delay early retransmit and entering fast recovery for 1952 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples 1953 * available, or RTO is scheduled to fire first. 1954 */ 1955 if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || 1956 (flag & FLAG_ECE) || !tp->srtt) 1957 return false; 1958 1959 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); 1960 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) 1961 return false; 1962 1963 inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, 1964 TCP_RTO_MAX); 1965 return true; 1966 } 1967 1968 static inline int tcp_skb_timedout(const struct sock *sk, 1969 const struct sk_buff *skb) 1970 { 1971 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; 1972 } 1973 1974 static inline int tcp_head_timedout(const struct sock *sk) 1975 { 1976 const struct tcp_sock *tp = tcp_sk(sk); 1977 1978 return tp->packets_out && 1979 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 1980 } 1981 1982 /* Linux NewReno/SACK/FACK/ECN state machine. 1983 * -------------------------------------- 1984 * 1985 * "Open" Normal state, no dubious events, fast path. 1986 * "Disorder" In all the respects it is "Open", 1987 * but requires a bit more attention. It is entered when 1988 * we see some SACKs or dupacks. It is split of "Open" 1989 * mainly to move some processing from fast path to slow one. 1990 * "CWR" CWND was reduced due to some Congestion Notification event. 1991 * It can be ECN, ICMP source quench, local device congestion. 1992 * "Recovery" CWND was reduced, we are fast-retransmitting. 1993 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 1994 * 1995 * tcp_fastretrans_alert() is entered: 1996 * - each incoming ACK, if state is not "Open" 1997 * - when arrived ACK is unusual, namely: 1998 * * SACK 1999 * * Duplicate ACK. 2000 * * ECN ECE. 2001 * 2002 * Counting packets in flight is pretty simple. 2003 * 2004 * in_flight = packets_out - left_out + retrans_out 2005 * 2006 * packets_out is SND.NXT-SND.UNA counted in packets. 2007 * 2008 * retrans_out is number of retransmitted segments. 2009 * 2010 * left_out is number of segments left network, but not ACKed yet. 2011 * 2012 * left_out = sacked_out + lost_out 2013 * 2014 * sacked_out: Packets, which arrived to receiver out of order 2015 * and hence not ACKed. With SACKs this number is simply 2016 * amount of SACKed data. Even without SACKs 2017 * it is easy to give pretty reliable estimate of this number, 2018 * counting duplicate ACKs. 2019 * 2020 * lost_out: Packets lost by network. TCP has no explicit 2021 * "loss notification" feedback from network (for now). 2022 * It means that this number can be only _guessed_. 2023 * Actually, it is the heuristics to predict lossage that 2024 * distinguishes different algorithms. 2025 * 2026 * F.e. after RTO, when all the queue is considered as lost, 2027 * lost_out = packets_out and in_flight = retrans_out. 2028 * 2029 * Essentially, we have now two algorithms counting 2030 * lost packets. 2031 * 2032 * FACK: It is the simplest heuristics. As soon as we decided 2033 * that something is lost, we decide that _all_ not SACKed 2034 * packets until the most forward SACK are lost. I.e. 2035 * lost_out = fackets_out - sacked_out and left_out = fackets_out. 2036 * It is absolutely correct estimate, if network does not reorder 2037 * packets. And it loses any connection to reality when reordering 2038 * takes place. We use FACK by default until reordering 2039 * is suspected on the path to this destination. 2040 * 2041 * NewReno: when Recovery is entered, we assume that one segment 2042 * is lost (classic Reno). While we are in Recovery and 2043 * a partial ACK arrives, we assume that one more packet 2044 * is lost (NewReno). This heuristics are the same in NewReno 2045 * and SACK. 2046 * 2047 * Imagine, that's all! Forget about all this shamanism about CWND inflation 2048 * deflation etc. CWND is real congestion window, never inflated, changes 2049 * only according to classic VJ rules. 2050 * 2051 * Really tricky (and requiring careful tuning) part of algorithm 2052 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2053 * The first determines the moment _when_ we should reduce CWND and, 2054 * hence, slow down forward transmission. In fact, it determines the moment 2055 * when we decide that hole is caused by loss, rather than by a reorder. 2056 * 2057 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2058 * holes, caused by lost packets. 2059 * 2060 * And the most logically complicated part of algorithm is undo 2061 * heuristics. We detect false retransmits due to both too early 2062 * fast retransmit (reordering) and underestimated RTO, analyzing 2063 * timestamps and D-SACKs. When we detect that some segments were 2064 * retransmitted by mistake and CWND reduction was wrong, we undo 2065 * window reduction and abort recovery phase. This logic is hidden 2066 * inside several functions named tcp_try_undo_<something>. 2067 */ 2068 2069 /* This function decides, when we should leave Disordered state 2070 * and enter Recovery phase, reducing congestion window. 2071 * 2072 * Main question: may we further continue forward transmission 2073 * with the same cwnd? 2074 */ 2075 static bool tcp_time_to_recover(struct sock *sk, int flag) 2076 { 2077 struct tcp_sock *tp = tcp_sk(sk); 2078 __u32 packets_out; 2079 2080 /* Trick#1: The loss is proven. */ 2081 if (tp->lost_out) 2082 return true; 2083 2084 /* Not-A-Trick#2 : Classic rule... */ 2085 if (tcp_dupack_heuristics(tp) > tp->reordering) 2086 return true; 2087 2088 /* Trick#3 : when we use RFC2988 timer restart, fast 2089 * retransmit can be triggered by timeout of queue head. 2090 */ 2091 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2092 return true; 2093 2094 /* Trick#4: It is still not OK... But will it be useful to delay 2095 * recovery more? 2096 */ 2097 packets_out = tp->packets_out; 2098 if (packets_out <= tp->reordering && 2099 tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && 2100 !tcp_may_send_now(sk)) { 2101 /* We have nothing to send. This connection is limited 2102 * either by receiver window or by application. 2103 */ 2104 return true; 2105 } 2106 2107 /* If a thin stream is detected, retransmit after first 2108 * received dupack. Employ only if SACK is supported in order 2109 * to avoid possible corner-case series of spurious retransmissions 2110 * Use only if there are no unsent data. 2111 */ 2112 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && 2113 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && 2114 tcp_is_sack(tp) && !tcp_send_head(sk)) 2115 return true; 2116 2117 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious 2118 * retransmissions due to small network reorderings, we implement 2119 * Mitigation A.3 in the RFC and delay the retransmission for a short 2120 * interval if appropriate. 2121 */ 2122 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && 2123 (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && 2124 !tcp_may_send_now(sk)) 2125 return !tcp_pause_early_retransmit(sk, flag); 2126 2127 return false; 2128 } 2129 2130 /* New heuristics: it is possible only after we switched to restart timer 2131 * each time when something is ACKed. Hence, we can detect timed out packets 2132 * during fast retransmit without falling to slow start. 2133 * 2134 * Usefulness of this as is very questionable, since we should know which of 2135 * the segments is the next to timeout which is relatively expensive to find 2136 * in general case unless we add some data structure just for that. The 2137 * current approach certainly won't find the right one too often and when it 2138 * finally does find _something_ it usually marks large part of the window 2139 * right away (because a retransmission with a larger timestamp blocks the 2140 * loop from advancing). -ij 2141 */ 2142 static void tcp_timeout_skbs(struct sock *sk) 2143 { 2144 struct tcp_sock *tp = tcp_sk(sk); 2145 struct sk_buff *skb; 2146 2147 if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) 2148 return; 2149 2150 skb = tp->scoreboard_skb_hint; 2151 if (tp->scoreboard_skb_hint == NULL) 2152 skb = tcp_write_queue_head(sk); 2153 2154 tcp_for_write_queue_from(skb, sk) { 2155 if (skb == tcp_send_head(sk)) 2156 break; 2157 if (!tcp_skb_timedout(sk, skb)) 2158 break; 2159 2160 tcp_skb_mark_lost(tp, skb); 2161 } 2162 2163 tp->scoreboard_skb_hint = skb; 2164 2165 tcp_verify_left_out(tp); 2166 } 2167 2168 /* Detect loss in event "A" above by marking head of queue up as lost. 2169 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments 2170 * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2171 * has at least tp->reordering SACKed seqments above it; "packets" refers to 2172 * the maximum SACKed segments to pass before reaching this limit. 2173 */ 2174 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2175 { 2176 struct tcp_sock *tp = tcp_sk(sk); 2177 struct sk_buff *skb; 2178 int cnt, oldcnt; 2179 int err; 2180 unsigned int mss; 2181 /* Use SACK to deduce losses of new sequences sent during recovery */ 2182 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2183 2184 WARN_ON(packets > tp->packets_out); 2185 if (tp->lost_skb_hint) { 2186 skb = tp->lost_skb_hint; 2187 cnt = tp->lost_cnt_hint; 2188 /* Head already handled? */ 2189 if (mark_head && skb != tcp_write_queue_head(sk)) 2190 return; 2191 } else { 2192 skb = tcp_write_queue_head(sk); 2193 cnt = 0; 2194 } 2195 2196 tcp_for_write_queue_from(skb, sk) { 2197 if (skb == tcp_send_head(sk)) 2198 break; 2199 /* TODO: do this better */ 2200 /* this is not the most efficient way to do this... */ 2201 tp->lost_skb_hint = skb; 2202 tp->lost_cnt_hint = cnt; 2203 2204 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2205 break; 2206 2207 oldcnt = cnt; 2208 if (tcp_is_fack(tp) || tcp_is_reno(tp) || 2209 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2210 cnt += tcp_skb_pcount(skb); 2211 2212 if (cnt > packets) { 2213 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2214 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 2215 (oldcnt >= packets)) 2216 break; 2217 2218 mss = skb_shinfo(skb)->gso_size; 2219 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2220 if (err < 0) 2221 break; 2222 cnt = packets; 2223 } 2224 2225 tcp_skb_mark_lost(tp, skb); 2226 2227 if (mark_head) 2228 break; 2229 } 2230 tcp_verify_left_out(tp); 2231 } 2232 2233 /* Account newly detected lost packet(s) */ 2234 2235 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2236 { 2237 struct tcp_sock *tp = tcp_sk(sk); 2238 2239 if (tcp_is_reno(tp)) { 2240 tcp_mark_head_lost(sk, 1, 1); 2241 } else if (tcp_is_fack(tp)) { 2242 int lost = tp->fackets_out - tp->reordering; 2243 if (lost <= 0) 2244 lost = 1; 2245 tcp_mark_head_lost(sk, lost, 0); 2246 } else { 2247 int sacked_upto = tp->sacked_out - tp->reordering; 2248 if (sacked_upto >= 0) 2249 tcp_mark_head_lost(sk, sacked_upto, 0); 2250 else if (fast_rexmit) 2251 tcp_mark_head_lost(sk, 1, 1); 2252 } 2253 2254 tcp_timeout_skbs(sk); 2255 } 2256 2257 /* CWND moderation, preventing bursts due to too big ACKs 2258 * in dubious situations. 2259 */ 2260 static inline void tcp_moderate_cwnd(struct tcp_sock *tp) 2261 { 2262 tp->snd_cwnd = min(tp->snd_cwnd, 2263 tcp_packets_in_flight(tp) + tcp_max_burst(tp)); 2264 tp->snd_cwnd_stamp = tcp_time_stamp; 2265 } 2266 2267 /* Nothing was retransmitted or returned timestamp is less 2268 * than timestamp of the first retransmission. 2269 */ 2270 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) 2271 { 2272 return !tp->retrans_stamp || 2273 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2274 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); 2275 } 2276 2277 /* Undo procedures. */ 2278 2279 #if FASTRETRANS_DEBUG > 1 2280 static void DBGUNDO(struct sock *sk, const char *msg) 2281 { 2282 struct tcp_sock *tp = tcp_sk(sk); 2283 struct inet_sock *inet = inet_sk(sk); 2284 2285 if (sk->sk_family == AF_INET) { 2286 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2287 msg, 2288 &inet->inet_daddr, ntohs(inet->inet_dport), 2289 tp->snd_cwnd, tcp_left_out(tp), 2290 tp->snd_ssthresh, tp->prior_ssthresh, 2291 tp->packets_out); 2292 } 2293 #if IS_ENABLED(CONFIG_IPV6) 2294 else if (sk->sk_family == AF_INET6) { 2295 struct ipv6_pinfo *np = inet6_sk(sk); 2296 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2297 msg, 2298 &np->daddr, ntohs(inet->inet_dport), 2299 tp->snd_cwnd, tcp_left_out(tp), 2300 tp->snd_ssthresh, tp->prior_ssthresh, 2301 tp->packets_out); 2302 } 2303 #endif 2304 } 2305 #else 2306 #define DBGUNDO(x...) do { } while (0) 2307 #endif 2308 2309 static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) 2310 { 2311 struct tcp_sock *tp = tcp_sk(sk); 2312 2313 if (tp->prior_ssthresh) { 2314 const struct inet_connection_sock *icsk = inet_csk(sk); 2315 2316 if (icsk->icsk_ca_ops->undo_cwnd) 2317 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2318 else 2319 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 2320 2321 if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { 2322 tp->snd_ssthresh = tp->prior_ssthresh; 2323 TCP_ECN_withdraw_cwr(tp); 2324 } 2325 } else { 2326 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2327 } 2328 tp->snd_cwnd_stamp = tcp_time_stamp; 2329 } 2330 2331 static inline bool tcp_may_undo(const struct tcp_sock *tp) 2332 { 2333 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2334 } 2335 2336 /* People celebrate: "We love our President!" */ 2337 static bool tcp_try_undo_recovery(struct sock *sk) 2338 { 2339 struct tcp_sock *tp = tcp_sk(sk); 2340 2341 if (tcp_may_undo(tp)) { 2342 int mib_idx; 2343 2344 /* Happy end! We did not retransmit anything 2345 * or our original transmission succeeded. 2346 */ 2347 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2348 tcp_undo_cwr(sk, true); 2349 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2350 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2351 else 2352 mib_idx = LINUX_MIB_TCPFULLUNDO; 2353 2354 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2355 tp->undo_marker = 0; 2356 } 2357 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2358 /* Hold old state until something *above* high_seq 2359 * is ACKed. For Reno it is MUST to prevent false 2360 * fast retransmits (RFC2582). SACK TCP is safe. */ 2361 tcp_moderate_cwnd(tp); 2362 return true; 2363 } 2364 tcp_set_ca_state(sk, TCP_CA_Open); 2365 return false; 2366 } 2367 2368 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2369 static void tcp_try_undo_dsack(struct sock *sk) 2370 { 2371 struct tcp_sock *tp = tcp_sk(sk); 2372 2373 if (tp->undo_marker && !tp->undo_retrans) { 2374 DBGUNDO(sk, "D-SACK"); 2375 tcp_undo_cwr(sk, true); 2376 tp->undo_marker = 0; 2377 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2378 } 2379 } 2380 2381 /* We can clear retrans_stamp when there are no retransmissions in the 2382 * window. It would seem that it is trivially available for us in 2383 * tp->retrans_out, however, that kind of assumptions doesn't consider 2384 * what will happen if errors occur when sending retransmission for the 2385 * second time. ...It could the that such segment has only 2386 * TCPCB_EVER_RETRANS set at the present time. It seems that checking 2387 * the head skb is enough except for some reneging corner cases that 2388 * are not worth the effort. 2389 * 2390 * Main reason for all this complexity is the fact that connection dying 2391 * time now depends on the validity of the retrans_stamp, in particular, 2392 * that successive retransmissions of a segment must not advance 2393 * retrans_stamp under any conditions. 2394 */ 2395 static bool tcp_any_retrans_done(const struct sock *sk) 2396 { 2397 const struct tcp_sock *tp = tcp_sk(sk); 2398 struct sk_buff *skb; 2399 2400 if (tp->retrans_out) 2401 return true; 2402 2403 skb = tcp_write_queue_head(sk); 2404 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2405 return true; 2406 2407 return false; 2408 } 2409 2410 /* Undo during fast recovery after partial ACK. */ 2411 2412 static int tcp_try_undo_partial(struct sock *sk, int acked) 2413 { 2414 struct tcp_sock *tp = tcp_sk(sk); 2415 /* Partial ACK arrived. Force Hoe's retransmit. */ 2416 int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); 2417 2418 if (tcp_may_undo(tp)) { 2419 /* Plain luck! Hole if filled with delayed 2420 * packet, rather than with a retransmit. 2421 */ 2422 if (!tcp_any_retrans_done(sk)) 2423 tp->retrans_stamp = 0; 2424 2425 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2426 2427 DBGUNDO(sk, "Hoe"); 2428 tcp_undo_cwr(sk, false); 2429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2430 2431 /* So... Do not make Hoe's retransmit yet. 2432 * If the first packet was delayed, the rest 2433 * ones are most probably delayed as well. 2434 */ 2435 failed = 0; 2436 } 2437 return failed; 2438 } 2439 2440 /* Undo during loss recovery after partial ACK or using F-RTO. */ 2441 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2442 { 2443 struct tcp_sock *tp = tcp_sk(sk); 2444 2445 if (frto_undo || tcp_may_undo(tp)) { 2446 struct sk_buff *skb; 2447 tcp_for_write_queue(skb, sk) { 2448 if (skb == tcp_send_head(sk)) 2449 break; 2450 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2451 } 2452 2453 tcp_clear_all_retrans_hints(tp); 2454 2455 DBGUNDO(sk, "partial loss"); 2456 tp->lost_out = 0; 2457 tcp_undo_cwr(sk, true); 2458 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2459 if (frto_undo) 2460 NET_INC_STATS_BH(sock_net(sk), 2461 LINUX_MIB_TCPSPURIOUSRTOS); 2462 inet_csk(sk)->icsk_retransmits = 0; 2463 tp->undo_marker = 0; 2464 if (frto_undo || tcp_is_sack(tp)) 2465 tcp_set_ca_state(sk, TCP_CA_Open); 2466 return true; 2467 } 2468 return false; 2469 } 2470 2471 /* The cwnd reduction in CWR and Recovery use the PRR algorithm 2472 * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/ 2473 * It computes the number of packets to send (sndcnt) based on packets newly 2474 * delivered: 2475 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 2476 * cwnd reductions across a full RTT. 2477 * 2) If packets in flight is lower than ssthresh (such as due to excess 2478 * losses and/or application stalls), do not perform any further cwnd 2479 * reductions, but instead slow start up to ssthresh. 2480 */ 2481 static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) 2482 { 2483 struct tcp_sock *tp = tcp_sk(sk); 2484 2485 tp->high_seq = tp->snd_nxt; 2486 tp->tlp_high_seq = 0; 2487 tp->snd_cwnd_cnt = 0; 2488 tp->prior_cwnd = tp->snd_cwnd; 2489 tp->prr_delivered = 0; 2490 tp->prr_out = 0; 2491 if (set_ssthresh) 2492 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 2493 TCP_ECN_queue_cwr(tp); 2494 } 2495 2496 static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, 2497 int fast_rexmit) 2498 { 2499 struct tcp_sock *tp = tcp_sk(sk); 2500 int sndcnt = 0; 2501 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); 2502 2503 tp->prr_delivered += newly_acked_sacked; 2504 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { 2505 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + 2506 tp->prior_cwnd - 1; 2507 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; 2508 } else { 2509 sndcnt = min_t(int, delta, 2510 max_t(int, tp->prr_delivered - tp->prr_out, 2511 newly_acked_sacked) + 1); 2512 } 2513 2514 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); 2515 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 2516 } 2517 2518 static inline void tcp_end_cwnd_reduction(struct sock *sk) 2519 { 2520 struct tcp_sock *tp = tcp_sk(sk); 2521 2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2523 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2524 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2525 tp->snd_cwnd = tp->snd_ssthresh; 2526 tp->snd_cwnd_stamp = tcp_time_stamp; 2527 } 2528 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2529 } 2530 2531 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ 2532 void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) 2533 { 2534 struct tcp_sock *tp = tcp_sk(sk); 2535 2536 tp->prior_ssthresh = 0; 2537 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2538 tp->undo_marker = 0; 2539 tcp_init_cwnd_reduction(sk, set_ssthresh); 2540 tcp_set_ca_state(sk, TCP_CA_CWR); 2541 } 2542 } 2543 2544 static void tcp_try_keep_open(struct sock *sk) 2545 { 2546 struct tcp_sock *tp = tcp_sk(sk); 2547 int state = TCP_CA_Open; 2548 2549 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) 2550 state = TCP_CA_Disorder; 2551 2552 if (inet_csk(sk)->icsk_ca_state != state) { 2553 tcp_set_ca_state(sk, state); 2554 tp->high_seq = tp->snd_nxt; 2555 } 2556 } 2557 2558 static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked) 2559 { 2560 struct tcp_sock *tp = tcp_sk(sk); 2561 2562 tcp_verify_left_out(tp); 2563 2564 if (!tcp_any_retrans_done(sk)) 2565 tp->retrans_stamp = 0; 2566 2567 if (flag & FLAG_ECE) 2568 tcp_enter_cwr(sk, 1); 2569 2570 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2571 tcp_try_keep_open(sk); 2572 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 2573 tcp_moderate_cwnd(tp); 2574 } else { 2575 tcp_cwnd_reduction(sk, newly_acked_sacked, 0); 2576 } 2577 } 2578 2579 static void tcp_mtup_probe_failed(struct sock *sk) 2580 { 2581 struct inet_connection_sock *icsk = inet_csk(sk); 2582 2583 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2584 icsk->icsk_mtup.probe_size = 0; 2585 } 2586 2587 static void tcp_mtup_probe_success(struct sock *sk) 2588 { 2589 struct tcp_sock *tp = tcp_sk(sk); 2590 struct inet_connection_sock *icsk = inet_csk(sk); 2591 2592 /* FIXME: breaks with very large cwnd */ 2593 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2594 tp->snd_cwnd = tp->snd_cwnd * 2595 tcp_mss_to_mtu(sk, tp->mss_cache) / 2596 icsk->icsk_mtup.probe_size; 2597 tp->snd_cwnd_cnt = 0; 2598 tp->snd_cwnd_stamp = tcp_time_stamp; 2599 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2600 2601 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2602 icsk->icsk_mtup.probe_size = 0; 2603 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2604 } 2605 2606 /* Do a simple retransmit without using the backoff mechanisms in 2607 * tcp_timer. This is used for path mtu discovery. 2608 * The socket is already locked here. 2609 */ 2610 void tcp_simple_retransmit(struct sock *sk) 2611 { 2612 const struct inet_connection_sock *icsk = inet_csk(sk); 2613 struct tcp_sock *tp = tcp_sk(sk); 2614 struct sk_buff *skb; 2615 unsigned int mss = tcp_current_mss(sk); 2616 u32 prior_lost = tp->lost_out; 2617 2618 tcp_for_write_queue(skb, sk) { 2619 if (skb == tcp_send_head(sk)) 2620 break; 2621 if (tcp_skb_seglen(skb) > mss && 2622 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 2623 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2624 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2625 tp->retrans_out -= tcp_skb_pcount(skb); 2626 } 2627 tcp_skb_mark_lost_uncond_verify(tp, skb); 2628 } 2629 } 2630 2631 tcp_clear_retrans_hints_partial(tp); 2632 2633 if (prior_lost == tp->lost_out) 2634 return; 2635 2636 if (tcp_is_reno(tp)) 2637 tcp_limit_reno_sacked(tp); 2638 2639 tcp_verify_left_out(tp); 2640 2641 /* Don't muck with the congestion window here. 2642 * Reason is that we do not increase amount of _data_ 2643 * in network, but units changed and effective 2644 * cwnd/ssthresh really reduced now. 2645 */ 2646 if (icsk->icsk_ca_state != TCP_CA_Loss) { 2647 tp->high_seq = tp->snd_nxt; 2648 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2649 tp->prior_ssthresh = 0; 2650 tp->undo_marker = 0; 2651 tcp_set_ca_state(sk, TCP_CA_Loss); 2652 } 2653 tcp_xmit_retransmit_queue(sk); 2654 } 2655 EXPORT_SYMBOL(tcp_simple_retransmit); 2656 2657 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) 2658 { 2659 struct tcp_sock *tp = tcp_sk(sk); 2660 int mib_idx; 2661 2662 if (tcp_is_reno(tp)) 2663 mib_idx = LINUX_MIB_TCPRENORECOVERY; 2664 else 2665 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2666 2667 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2668 2669 tp->prior_ssthresh = 0; 2670 tp->undo_marker = tp->snd_una; 2671 tp->undo_retrans = tp->retrans_out; 2672 2673 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2674 if (!ece_ack) 2675 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2676 tcp_init_cwnd_reduction(sk, true); 2677 } 2678 tcp_set_ca_state(sk, TCP_CA_Recovery); 2679 } 2680 2681 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are 2682 * recovered or spurious. Otherwise retransmits more on partial ACKs. 2683 */ 2684 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) 2685 { 2686 struct inet_connection_sock *icsk = inet_csk(sk); 2687 struct tcp_sock *tp = tcp_sk(sk); 2688 bool recovered = !before(tp->snd_una, tp->high_seq); 2689 2690 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2691 if (flag & FLAG_ORIG_SACK_ACKED) { 2692 /* Step 3.b. A timeout is spurious if not all data are 2693 * lost, i.e., never-retransmitted data are (s)acked. 2694 */ 2695 tcp_try_undo_loss(sk, true); 2696 return; 2697 } 2698 if (after(tp->snd_nxt, tp->high_seq) && 2699 (flag & FLAG_DATA_SACKED || is_dupack)) { 2700 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ 2701 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2702 tp->high_seq = tp->snd_nxt; 2703 __tcp_push_pending_frames(sk, tcp_current_mss(sk), 2704 TCP_NAGLE_OFF); 2705 if (after(tp->snd_nxt, tp->high_seq)) 2706 return; /* Step 2.b */ 2707 tp->frto = 0; 2708 } 2709 } 2710 2711 if (recovered) { 2712 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ 2713 icsk->icsk_retransmits = 0; 2714 tcp_try_undo_recovery(sk); 2715 return; 2716 } 2717 if (flag & FLAG_DATA_ACKED) 2718 icsk->icsk_retransmits = 0; 2719 if (tcp_is_reno(tp)) { 2720 /* A Reno DUPACK means new data in F-RTO step 2.b above are 2721 * delivered. Lower inflight to clock out (re)tranmissions. 2722 */ 2723 if (after(tp->snd_nxt, tp->high_seq) && is_dupack) 2724 tcp_add_reno_sack(sk); 2725 else if (flag & FLAG_SND_UNA_ADVANCED) 2726 tcp_reset_reno_sack(tp); 2727 } 2728 if (tcp_try_undo_loss(sk, false)) 2729 return; 2730 tcp_xmit_retransmit_queue(sk); 2731 } 2732 2733 /* Process an event, which can update packets-in-flight not trivially. 2734 * Main goal of this function is to calculate new estimate for left_out, 2735 * taking into account both packets sitting in receiver's buffer and 2736 * packets lost by network. 2737 * 2738 * Besides that it does CWND reduction, when packet loss is detected 2739 * and changes state of machine. 2740 * 2741 * It does _not_ decide what to send, it is made in function 2742 * tcp_xmit_retransmit_queue(). 2743 */ 2744 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 2745 int prior_sacked, bool is_dupack, 2746 int flag) 2747 { 2748 struct inet_connection_sock *icsk = inet_csk(sk); 2749 struct tcp_sock *tp = tcp_sk(sk); 2750 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2751 (tcp_fackets_out(tp) > tp->reordering)); 2752 int newly_acked_sacked = 0; 2753 int fast_rexmit = 0; 2754 2755 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2756 tp->sacked_out = 0; 2757 if (WARN_ON(!tp->sacked_out && tp->fackets_out)) 2758 tp->fackets_out = 0; 2759 2760 /* Now state machine starts. 2761 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 2762 if (flag & FLAG_ECE) 2763 tp->prior_ssthresh = 0; 2764 2765 /* B. In all the states check for reneging SACKs. */ 2766 if (tcp_check_sack_reneging(sk, flag)) 2767 return; 2768 2769 /* C. Check consistency of the current state. */ 2770 tcp_verify_left_out(tp); 2771 2772 /* D. Check state exit conditions. State can be terminated 2773 * when high_seq is ACKed. */ 2774 if (icsk->icsk_ca_state == TCP_CA_Open) { 2775 WARN_ON(tp->retrans_out != 0); 2776 tp->retrans_stamp = 0; 2777 } else if (!before(tp->snd_una, tp->high_seq)) { 2778 switch (icsk->icsk_ca_state) { 2779 case TCP_CA_CWR: 2780 /* CWR is to be held something *above* high_seq 2781 * is ACKed for CWR bit to reach receiver. */ 2782 if (tp->snd_una != tp->high_seq) { 2783 tcp_end_cwnd_reduction(sk); 2784 tcp_set_ca_state(sk, TCP_CA_Open); 2785 } 2786 break; 2787 2788 case TCP_CA_Recovery: 2789 if (tcp_is_reno(tp)) 2790 tcp_reset_reno_sack(tp); 2791 if (tcp_try_undo_recovery(sk)) 2792 return; 2793 tcp_end_cwnd_reduction(sk); 2794 break; 2795 } 2796 } 2797 2798 /* E. Process state. */ 2799 switch (icsk->icsk_ca_state) { 2800 case TCP_CA_Recovery: 2801 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 2802 if (tcp_is_reno(tp) && is_dupack) 2803 tcp_add_reno_sack(sk); 2804 } else 2805 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2806 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; 2807 break; 2808 case TCP_CA_Loss: 2809 tcp_process_loss(sk, flag, is_dupack); 2810 if (icsk->icsk_ca_state != TCP_CA_Open) 2811 return; 2812 /* Fall through to processing in Open state. */ 2813 default: 2814 if (tcp_is_reno(tp)) { 2815 if (flag & FLAG_SND_UNA_ADVANCED) 2816 tcp_reset_reno_sack(tp); 2817 if (is_dupack) 2818 tcp_add_reno_sack(sk); 2819 } 2820 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; 2821 2822 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 2823 tcp_try_undo_dsack(sk); 2824 2825 if (!tcp_time_to_recover(sk, flag)) { 2826 tcp_try_to_open(sk, flag, newly_acked_sacked); 2827 return; 2828 } 2829 2830 /* MTU probe failure: don't reduce cwnd */ 2831 if (icsk->icsk_ca_state < TCP_CA_CWR && 2832 icsk->icsk_mtup.probe_size && 2833 tp->snd_una == tp->mtu_probe.probe_seq_start) { 2834 tcp_mtup_probe_failed(sk); 2835 /* Restores the reduction we did in tcp_mtup_probe() */ 2836 tp->snd_cwnd++; 2837 tcp_simple_retransmit(sk); 2838 return; 2839 } 2840 2841 /* Otherwise enter Recovery state */ 2842 tcp_enter_recovery(sk, (flag & FLAG_ECE)); 2843 fast_rexmit = 1; 2844 } 2845 2846 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 2847 tcp_update_scoreboard(sk, fast_rexmit); 2848 tcp_cwnd_reduction(sk, newly_acked_sacked, fast_rexmit); 2849 tcp_xmit_retransmit_queue(sk); 2850 } 2851 2852 void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) 2853 { 2854 tcp_rtt_estimator(sk, seq_rtt); 2855 tcp_set_rto(sk); 2856 inet_csk(sk)->icsk_backoff = 0; 2857 } 2858 EXPORT_SYMBOL(tcp_valid_rtt_meas); 2859 2860 /* Read draft-ietf-tcplw-high-performance before mucking 2861 * with this code. (Supersedes RFC1323) 2862 */ 2863 static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 2864 { 2865 /* RTTM Rule: A TSecr value received in a segment is used to 2866 * update the averaged RTT measurement only if the segment 2867 * acknowledges some new data, i.e., only if it advances the 2868 * left edge of the send window. 2869 * 2870 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2871 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 2872 * 2873 * Changed: reset backoff as soon as we see the first valid sample. 2874 * If we do not, we get strongly overestimated rto. With timestamps 2875 * samples are accepted even from very old segments: f.e., when rtt=1 2876 * increases to 8, we retransmit 5 times and after 8 seconds delayed 2877 * answer arrives rto becomes 120 seconds! If at least one of segments 2878 * in window is lost... Voila. --ANK (010210) 2879 */ 2880 struct tcp_sock *tp = tcp_sk(sk); 2881 2882 tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); 2883 } 2884 2885 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) 2886 { 2887 /* We don't have a timestamp. Can only use 2888 * packets that are not retransmitted to determine 2889 * rtt estimates. Also, we must not reset the 2890 * backoff for rto until we get a non-retransmitted 2891 * packet. This allows us to deal with a situation 2892 * where the network delay has increased suddenly. 2893 * I.e. Karn's algorithm. (SIGCOMM '87, p5.) 2894 */ 2895 2896 if (flag & FLAG_RETRANS_DATA_ACKED) 2897 return; 2898 2899 tcp_valid_rtt_meas(sk, seq_rtt); 2900 } 2901 2902 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 2903 const s32 seq_rtt) 2904 { 2905 const struct tcp_sock *tp = tcp_sk(sk); 2906 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 2907 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 2908 tcp_ack_saw_tstamp(sk, flag); 2909 else if (seq_rtt >= 0) 2910 tcp_ack_no_tstamp(sk, seq_rtt, flag); 2911 } 2912 2913 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2914 { 2915 const struct inet_connection_sock *icsk = inet_csk(sk); 2916 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); 2917 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2918 } 2919 2920 /* Restart timer after forward progress on connection. 2921 * RFC2988 recommends to restart timer to now+rto. 2922 */ 2923 void tcp_rearm_rto(struct sock *sk) 2924 { 2925 const struct inet_connection_sock *icsk = inet_csk(sk); 2926 struct tcp_sock *tp = tcp_sk(sk); 2927 2928 /* If the retrans timer is currently being used by Fast Open 2929 * for SYN-ACK retrans purpose, stay put. 2930 */ 2931 if (tp->fastopen_rsk) 2932 return; 2933 2934 if (!tp->packets_out) { 2935 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 2936 } else { 2937 u32 rto = inet_csk(sk)->icsk_rto; 2938 /* Offset the time elapsed after installing regular RTO */ 2939 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2940 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 2941 struct sk_buff *skb = tcp_write_queue_head(sk); 2942 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; 2943 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); 2944 /* delta may not be positive if the socket is locked 2945 * when the retrans timer fires and is rescheduled. 2946 */ 2947 if (delta > 0) 2948 rto = delta; 2949 } 2950 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 2951 TCP_RTO_MAX); 2952 } 2953 } 2954 2955 /* This function is called when the delayed ER timer fires. TCP enters 2956 * fast recovery and performs fast-retransmit. 2957 */ 2958 void tcp_resume_early_retransmit(struct sock *sk) 2959 { 2960 struct tcp_sock *tp = tcp_sk(sk); 2961 2962 tcp_rearm_rto(sk); 2963 2964 /* Stop if ER is disabled after the delayed ER timer is scheduled */ 2965 if (!tp->do_early_retrans) 2966 return; 2967 2968 tcp_enter_recovery(sk, false); 2969 tcp_update_scoreboard(sk, 1); 2970 tcp_xmit_retransmit_queue(sk); 2971 } 2972 2973 /* If we get here, the whole TSO packet has not been acked. */ 2974 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 2975 { 2976 struct tcp_sock *tp = tcp_sk(sk); 2977 u32 packets_acked; 2978 2979 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 2980 2981 packets_acked = tcp_skb_pcount(skb); 2982 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2983 return 0; 2984 packets_acked -= tcp_skb_pcount(skb); 2985 2986 if (packets_acked) { 2987 BUG_ON(tcp_skb_pcount(skb) == 0); 2988 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 2989 } 2990 2991 return packets_acked; 2992 } 2993 2994 /* Remove acknowledged frames from the retransmission queue. If our packet 2995 * is before the ack sequence we can discard it as it's confirmed to have 2996 * arrived at the other end. 2997 */ 2998 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, 2999 u32 prior_snd_una) 3000 { 3001 struct tcp_sock *tp = tcp_sk(sk); 3002 const struct inet_connection_sock *icsk = inet_csk(sk); 3003 struct sk_buff *skb; 3004 u32 now = tcp_time_stamp; 3005 int fully_acked = true; 3006 int flag = 0; 3007 u32 pkts_acked = 0; 3008 u32 reord = tp->packets_out; 3009 u32 prior_sacked = tp->sacked_out; 3010 s32 seq_rtt = -1; 3011 s32 ca_seq_rtt = -1; 3012 ktime_t last_ackt = net_invalid_timestamp(); 3013 3014 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3015 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3016 u32 acked_pcount; 3017 u8 sacked = scb->sacked; 3018 3019 /* Determine how many packets and what bytes were acked, tso and else */ 3020 if (after(scb->end_seq, tp->snd_una)) { 3021 if (tcp_skb_pcount(skb) == 1 || 3022 !after(tp->snd_una, scb->seq)) 3023 break; 3024 3025 acked_pcount = tcp_tso_acked(sk, skb); 3026 if (!acked_pcount) 3027 break; 3028 3029 fully_acked = false; 3030 } else { 3031 acked_pcount = tcp_skb_pcount(skb); 3032 } 3033 3034 if (sacked & TCPCB_RETRANS) { 3035 if (sacked & TCPCB_SACKED_RETRANS) 3036 tp->retrans_out -= acked_pcount; 3037 flag |= FLAG_RETRANS_DATA_ACKED; 3038 ca_seq_rtt = -1; 3039 seq_rtt = -1; 3040 } else { 3041 ca_seq_rtt = now - scb->when; 3042 last_ackt = skb->tstamp; 3043 if (seq_rtt < 0) { 3044 seq_rtt = ca_seq_rtt; 3045 } 3046 if (!(sacked & TCPCB_SACKED_ACKED)) 3047 reord = min(pkts_acked, reord); 3048 if (!after(scb->end_seq, tp->high_seq)) 3049 flag |= FLAG_ORIG_SACK_ACKED; 3050 } 3051 3052 if (sacked & TCPCB_SACKED_ACKED) 3053 tp->sacked_out -= acked_pcount; 3054 if (sacked & TCPCB_LOST) 3055 tp->lost_out -= acked_pcount; 3056 3057 tp->packets_out -= acked_pcount; 3058 pkts_acked += acked_pcount; 3059 3060 /* Initial outgoing SYN's get put onto the write_queue 3061 * just like anything else we transmit. It is not 3062 * true data, and if we misinform our callers that 3063 * this ACK acks real data, we will erroneously exit 3064 * connection startup slow start one packet too 3065 * quickly. This is severely frowned upon behavior. 3066 */ 3067 if (!(scb->tcp_flags & TCPHDR_SYN)) { 3068 flag |= FLAG_DATA_ACKED; 3069 } else { 3070 flag |= FLAG_SYN_ACKED; 3071 tp->retrans_stamp = 0; 3072 } 3073 3074 if (!fully_acked) 3075 break; 3076 3077 tcp_unlink_write_queue(skb, sk); 3078 sk_wmem_free_skb(sk, skb); 3079 tp->scoreboard_skb_hint = NULL; 3080 if (skb == tp->retransmit_skb_hint) 3081 tp->retransmit_skb_hint = NULL; 3082 if (skb == tp->lost_skb_hint) 3083 tp->lost_skb_hint = NULL; 3084 } 3085 3086 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) 3087 tp->snd_up = tp->snd_una; 3088 3089 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3090 flag |= FLAG_SACK_RENEGING; 3091 3092 if (flag & FLAG_ACKED) { 3093 const struct tcp_congestion_ops *ca_ops 3094 = inet_csk(sk)->icsk_ca_ops; 3095 3096 if (unlikely(icsk->icsk_mtup.probe_size && 3097 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3098 tcp_mtup_probe_success(sk); 3099 } 3100 3101 tcp_ack_update_rtt(sk, flag, seq_rtt); 3102 tcp_rearm_rto(sk); 3103 3104 if (tcp_is_reno(tp)) { 3105 tcp_remove_reno_sacks(sk, pkts_acked); 3106 } else { 3107 int delta; 3108 3109 /* Non-retransmitted hole got filled? That's reordering */ 3110 if (reord < prior_fackets) 3111 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3112 3113 delta = tcp_is_fack(tp) ? pkts_acked : 3114 prior_sacked - tp->sacked_out; 3115 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3116 } 3117 3118 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 3119 3120 if (ca_ops->pkts_acked) { 3121 s32 rtt_us = -1; 3122 3123 /* Is the ACK triggering packet unambiguous? */ 3124 if (!(flag & FLAG_RETRANS_DATA_ACKED)) { 3125 /* High resolution needed and available? */ 3126 if (ca_ops->flags & TCP_CONG_RTT_STAMP && 3127 !ktime_equal(last_ackt, 3128 net_invalid_timestamp())) 3129 rtt_us = ktime_us_delta(ktime_get_real(), 3130 last_ackt); 3131 else if (ca_seq_rtt >= 0) 3132 rtt_us = jiffies_to_usecs(ca_seq_rtt); 3133 } 3134 3135 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3136 } 3137 } 3138 3139 #if FASTRETRANS_DEBUG > 0 3140 WARN_ON((int)tp->sacked_out < 0); 3141 WARN_ON((int)tp->lost_out < 0); 3142 WARN_ON((int)tp->retrans_out < 0); 3143 if (!tp->packets_out && tcp_is_sack(tp)) { 3144 icsk = inet_csk(sk); 3145 if (tp->lost_out) { 3146 pr_debug("Leak l=%u %d\n", 3147 tp->lost_out, icsk->icsk_ca_state); 3148 tp->lost_out = 0; 3149 } 3150 if (tp->sacked_out) { 3151 pr_debug("Leak s=%u %d\n", 3152 tp->sacked_out, icsk->icsk_ca_state); 3153 tp->sacked_out = 0; 3154 } 3155 if (tp->retrans_out) { 3156 pr_debug("Leak r=%u %d\n", 3157 tp->retrans_out, icsk->icsk_ca_state); 3158 tp->retrans_out = 0; 3159 } 3160 } 3161 #endif 3162 return flag; 3163 } 3164 3165 static void tcp_ack_probe(struct sock *sk) 3166 { 3167 const struct tcp_sock *tp = tcp_sk(sk); 3168 struct inet_connection_sock *icsk = inet_csk(sk); 3169 3170 /* Was it a usable window open? */ 3171 3172 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { 3173 icsk->icsk_backoff = 0; 3174 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3175 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3176 * This function is not for random using! 3177 */ 3178 } else { 3179 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3180 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3181 TCP_RTO_MAX); 3182 } 3183 } 3184 3185 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) 3186 { 3187 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3188 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3189 } 3190 3191 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3192 { 3193 const struct tcp_sock *tp = tcp_sk(sk); 3194 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3195 !tcp_in_cwnd_reduction(sk); 3196 } 3197 3198 /* Check that window update is acceptable. 3199 * The function assumes that snd_una<=ack<=snd_next. 3200 */ 3201 static inline bool tcp_may_update_window(const struct tcp_sock *tp, 3202 const u32 ack, const u32 ack_seq, 3203 const u32 nwin) 3204 { 3205 return after(ack, tp->snd_una) || 3206 after(ack_seq, tp->snd_wl1) || 3207 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); 3208 } 3209 3210 /* Update our send window. 3211 * 3212 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3213 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3214 */ 3215 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3216 u32 ack_seq) 3217 { 3218 struct tcp_sock *tp = tcp_sk(sk); 3219 int flag = 0; 3220 u32 nwin = ntohs(tcp_hdr(skb)->window); 3221 3222 if (likely(!tcp_hdr(skb)->syn)) 3223 nwin <<= tp->rx_opt.snd_wscale; 3224 3225 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3226 flag |= FLAG_WIN_UPDATE; 3227 tcp_update_wl(tp, ack_seq); 3228 3229 if (tp->snd_wnd != nwin) { 3230 tp->snd_wnd = nwin; 3231 3232 /* Note, it is the only place, where 3233 * fast path is recovered for sending TCP. 3234 */ 3235 tp->pred_flags = 0; 3236 tcp_fast_path_check(sk); 3237 3238 if (nwin > tp->max_window) { 3239 tp->max_window = nwin; 3240 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3241 } 3242 } 3243 } 3244 3245 tp->snd_una = ack; 3246 3247 return flag; 3248 } 3249 3250 /* RFC 5961 7 [ACK Throttling] */ 3251 static void tcp_send_challenge_ack(struct sock *sk) 3252 { 3253 /* unprotected vars, we dont care of overwrites */ 3254 static u32 challenge_timestamp; 3255 static unsigned int challenge_count; 3256 u32 now = jiffies / HZ; 3257 3258 if (now != challenge_timestamp) { 3259 challenge_timestamp = now; 3260 challenge_count = 0; 3261 } 3262 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { 3263 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); 3264 tcp_send_ack(sk); 3265 } 3266 } 3267 3268 /* This routine deals with acks during a TLP episode. 3269 * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. 3270 */ 3271 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) 3272 { 3273 struct tcp_sock *tp = tcp_sk(sk); 3274 bool is_tlp_dupack = (ack == tp->tlp_high_seq) && 3275 !(flag & (FLAG_SND_UNA_ADVANCED | 3276 FLAG_NOT_DUP | FLAG_DATA_SACKED)); 3277 3278 /* Mark the end of TLP episode on receiving TLP dupack or when 3279 * ack is after tlp_high_seq. 3280 */ 3281 if (is_tlp_dupack) { 3282 tp->tlp_high_seq = 0; 3283 return; 3284 } 3285 3286 if (after(ack, tp->tlp_high_seq)) { 3287 tp->tlp_high_seq = 0; 3288 /* Don't reduce cwnd if DSACK arrives for TLP retrans. */ 3289 if (!(flag & FLAG_DSACKING_ACK)) { 3290 tcp_init_cwnd_reduction(sk, true); 3291 tcp_set_ca_state(sk, TCP_CA_CWR); 3292 tcp_end_cwnd_reduction(sk); 3293 tcp_set_ca_state(sk, TCP_CA_Open); 3294 NET_INC_STATS_BH(sock_net(sk), 3295 LINUX_MIB_TCPLOSSPROBERECOVERY); 3296 } 3297 } 3298 } 3299 3300 /* This routine deals with incoming acks, but not outgoing ones. */ 3301 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3302 { 3303 struct inet_connection_sock *icsk = inet_csk(sk); 3304 struct tcp_sock *tp = tcp_sk(sk); 3305 u32 prior_snd_una = tp->snd_una; 3306 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3307 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3308 bool is_dupack = false; 3309 u32 prior_in_flight; 3310 u32 prior_fackets; 3311 int prior_packets; 3312 int prior_sacked = tp->sacked_out; 3313 int pkts_acked = 0; 3314 3315 /* If the ack is older than previous acks 3316 * then we can probably ignore it. 3317 */ 3318 if (before(ack, prior_snd_una)) { 3319 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ 3320 if (before(ack, prior_snd_una - tp->max_window)) { 3321 tcp_send_challenge_ack(sk); 3322 return -1; 3323 } 3324 goto old_ack; 3325 } 3326 3327 /* If the ack includes data we haven't sent yet, discard 3328 * this segment (RFC793 Section 3.9). 3329 */ 3330 if (after(ack, tp->snd_nxt)) 3331 goto invalid_ack; 3332 3333 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 3334 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 3335 tcp_rearm_rto(sk); 3336 3337 if (after(ack, prior_snd_una)) 3338 flag |= FLAG_SND_UNA_ADVANCED; 3339 3340 prior_fackets = tp->fackets_out; 3341 prior_in_flight = tcp_packets_in_flight(tp); 3342 3343 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3344 /* Window is constant, pure forward advance. 3345 * No more checks are required. 3346 * Note, we use the fact that SND.UNA>=SND.WL2. 3347 */ 3348 tcp_update_wl(tp, ack_seq); 3349 tp->snd_una = ack; 3350 flag |= FLAG_WIN_UPDATE; 3351 3352 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3353 3354 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); 3355 } else { 3356 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3357 flag |= FLAG_DATA; 3358 else 3359 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3360 3361 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3362 3363 if (TCP_SKB_CB(skb)->sacked) 3364 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3365 3366 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 3367 flag |= FLAG_ECE; 3368 3369 tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 3370 } 3371 3372 /* We passed data and got it acked, remove any soft error 3373 * log. Something worked... 3374 */ 3375 sk->sk_err_soft = 0; 3376 icsk->icsk_probes_out = 0; 3377 tp->rcv_tstamp = tcp_time_stamp; 3378 prior_packets = tp->packets_out; 3379 if (!prior_packets) 3380 goto no_queue; 3381 3382 /* See if we can take anything off of the retransmit queue. */ 3383 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3384 3385 pkts_acked = prior_packets - tp->packets_out; 3386 3387 if (tcp_ack_is_dubious(sk, flag)) { 3388 /* Advance CWND, if state allows this. */ 3389 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) 3390 tcp_cong_avoid(sk, ack, prior_in_flight); 3391 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3392 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3393 is_dupack, flag); 3394 } else { 3395 if (flag & FLAG_DATA_ACKED) 3396 tcp_cong_avoid(sk, ack, prior_in_flight); 3397 } 3398 3399 if (tp->tlp_high_seq) 3400 tcp_process_tlp_ack(sk, ack, flag); 3401 3402 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { 3403 struct dst_entry *dst = __sk_dst_get(sk); 3404 if (dst) 3405 dst_confirm(dst); 3406 } 3407 3408 if (icsk->icsk_pending == ICSK_TIME_RETRANS) 3409 tcp_schedule_loss_probe(sk); 3410 return 1; 3411 3412 no_queue: 3413 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3414 if (flag & FLAG_DSACKING_ACK) 3415 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3416 is_dupack, flag); 3417 /* If this ack opens up a zero window, clear backoff. It was 3418 * being used to time the probes, and is probably far higher than 3419 * it needs to be for normal retransmission. 3420 */ 3421 if (tcp_send_head(sk)) 3422 tcp_ack_probe(sk); 3423 3424 if (tp->tlp_high_seq) 3425 tcp_process_tlp_ack(sk, ack, flag); 3426 return 1; 3427 3428 invalid_ack: 3429 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3430 return -1; 3431 3432 old_ack: 3433 /* If data was SACKed, tag it and see if we should send more data. 3434 * If data was DSACKed, see if we can undo a cwnd reduction. 3435 */ 3436 if (TCP_SKB_CB(skb)->sacked) { 3437 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3438 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3439 is_dupack, flag); 3440 } 3441 3442 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3443 return 0; 3444 } 3445 3446 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 3447 * But, this can also be called on packets in the established flow when 3448 * the fast version below fails. 3449 */ 3450 void tcp_parse_options(const struct sk_buff *skb, 3451 struct tcp_options_received *opt_rx, int estab, 3452 struct tcp_fastopen_cookie *foc) 3453 { 3454 const unsigned char *ptr; 3455 const struct tcphdr *th = tcp_hdr(skb); 3456 int length = (th->doff * 4) - sizeof(struct tcphdr); 3457 3458 ptr = (const unsigned char *)(th + 1); 3459 opt_rx->saw_tstamp = 0; 3460 3461 while (length > 0) { 3462 int opcode = *ptr++; 3463 int opsize; 3464 3465 switch (opcode) { 3466 case TCPOPT_EOL: 3467 return; 3468 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 3469 length--; 3470 continue; 3471 default: 3472 opsize = *ptr++; 3473 if (opsize < 2) /* "silly options" */ 3474 return; 3475 if (opsize > length) 3476 return; /* don't parse partial options */ 3477 switch (opcode) { 3478 case TCPOPT_MSS: 3479 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 3480 u16 in_mss = get_unaligned_be16(ptr); 3481 if (in_mss) { 3482 if (opt_rx->user_mss && 3483 opt_rx->user_mss < in_mss) 3484 in_mss = opt_rx->user_mss; 3485 opt_rx->mss_clamp = in_mss; 3486 } 3487 } 3488 break; 3489 case TCPOPT_WINDOW: 3490 if (opsize == TCPOLEN_WINDOW && th->syn && 3491 !estab && sysctl_tcp_window_scaling) { 3492 __u8 snd_wscale = *(__u8 *)ptr; 3493 opt_rx->wscale_ok = 1; 3494 if (snd_wscale > 14) { 3495 net_info_ratelimited("%s: Illegal window scaling value %d >14 received\n", 3496 __func__, 3497 snd_wscale); 3498 snd_wscale = 14; 3499 } 3500 opt_rx->snd_wscale = snd_wscale; 3501 } 3502 break; 3503 case TCPOPT_TIMESTAMP: 3504 if ((opsize == TCPOLEN_TIMESTAMP) && 3505 ((estab && opt_rx->tstamp_ok) || 3506 (!estab && sysctl_tcp_timestamps))) { 3507 opt_rx->saw_tstamp = 1; 3508 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3509 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3510 } 3511 break; 3512 case TCPOPT_SACK_PERM: 3513 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3514 !estab && sysctl_tcp_sack) { 3515 opt_rx->sack_ok = TCP_SACK_SEEN; 3516 tcp_sack_reset(opt_rx); 3517 } 3518 break; 3519 3520 case TCPOPT_SACK: 3521 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 3522 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 3523 opt_rx->sack_ok) { 3524 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 3525 } 3526 break; 3527 #ifdef CONFIG_TCP_MD5SIG 3528 case TCPOPT_MD5SIG: 3529 /* 3530 * The MD5 Hash has already been 3531 * checked (see tcp_v{4,6}_do_rcv()). 3532 */ 3533 break; 3534 #endif 3535 case TCPOPT_EXP: 3536 /* Fast Open option shares code 254 using a 3537 * 16 bits magic number. It's valid only in 3538 * SYN or SYN-ACK with an even size. 3539 */ 3540 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || 3541 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || 3542 foc == NULL || !th->syn || (opsize & 1)) 3543 break; 3544 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; 3545 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && 3546 foc->len <= TCP_FASTOPEN_COOKIE_MAX) 3547 memcpy(foc->val, ptr + 2, foc->len); 3548 else if (foc->len != 0) 3549 foc->len = -1; 3550 break; 3551 3552 } 3553 ptr += opsize-2; 3554 length -= opsize; 3555 } 3556 } 3557 } 3558 EXPORT_SYMBOL(tcp_parse_options); 3559 3560 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 3561 { 3562 const __be32 *ptr = (const __be32 *)(th + 1); 3563 3564 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3565 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 3566 tp->rx_opt.saw_tstamp = 1; 3567 ++ptr; 3568 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3569 ++ptr; 3570 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3571 return true; 3572 } 3573 return false; 3574 } 3575 3576 /* Fast parse options. This hopes to only see timestamps. 3577 * If it is wrong it falls back on tcp_parse_options(). 3578 */ 3579 static bool tcp_fast_parse_options(const struct sk_buff *skb, 3580 const struct tcphdr *th, struct tcp_sock *tp) 3581 { 3582 /* In the spirit of fast parsing, compare doff directly to constant 3583 * values. Because equality is used, short doff can be ignored here. 3584 */ 3585 if (th->doff == (sizeof(*th) / 4)) { 3586 tp->rx_opt.saw_tstamp = 0; 3587 return false; 3588 } else if (tp->rx_opt.tstamp_ok && 3589 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 3590 if (tcp_parse_aligned_timestamp(tp, th)) 3591 return true; 3592 } 3593 3594 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3595 if (tp->rx_opt.saw_tstamp) 3596 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3597 3598 return true; 3599 } 3600 3601 #ifdef CONFIG_TCP_MD5SIG 3602 /* 3603 * Parse MD5 Signature option 3604 */ 3605 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 3606 { 3607 int length = (th->doff << 2) - sizeof(*th); 3608 const u8 *ptr = (const u8 *)(th + 1); 3609 3610 /* If the TCP option is too short, we can short cut */ 3611 if (length < TCPOLEN_MD5SIG) 3612 return NULL; 3613 3614 while (length > 0) { 3615 int opcode = *ptr++; 3616 int opsize; 3617 3618 switch(opcode) { 3619 case TCPOPT_EOL: 3620 return NULL; 3621 case TCPOPT_NOP: 3622 length--; 3623 continue; 3624 default: 3625 opsize = *ptr++; 3626 if (opsize < 2 || opsize > length) 3627 return NULL; 3628 if (opcode == TCPOPT_MD5SIG) 3629 return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 3630 } 3631 ptr += opsize - 2; 3632 length -= opsize; 3633 } 3634 return NULL; 3635 } 3636 EXPORT_SYMBOL(tcp_parse_md5sig_option); 3637 #endif 3638 3639 static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3640 { 3641 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3642 tp->rx_opt.ts_recent_stamp = get_seconds(); 3643 } 3644 3645 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 3646 { 3647 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 3648 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 3649 * extra check below makes sure this can only happen 3650 * for pure ACK frames. -DaveM 3651 * 3652 * Not only, also it occurs for expired timestamps. 3653 */ 3654 3655 if (tcp_paws_check(&tp->rx_opt, 0)) 3656 tcp_store_ts_recent(tp); 3657 } 3658 } 3659 3660 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 3661 * 3662 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 3663 * it can pass through stack. So, the following predicate verifies that 3664 * this segment is not used for anything but congestion avoidance or 3665 * fast retransmit. Moreover, we even are able to eliminate most of such 3666 * second order effects, if we apply some small "replay" window (~RTO) 3667 * to timestamp space. 3668 * 3669 * All these measures still do not guarantee that we reject wrapped ACKs 3670 * on networks with high bandwidth, when sequence space is recycled fastly, 3671 * but it guarantees that such events will be very rare and do not affect 3672 * connection seriously. This doesn't look nice, but alas, PAWS is really 3673 * buggy extension. 3674 * 3675 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 3676 * states that events when retransmit arrives after original data are rare. 3677 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 3678 * the biggest problem on large power networks even with minor reordering. 3679 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 3680 * up to bandwidth of 18Gigabit/sec. 8) ] 3681 */ 3682 3683 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 3684 { 3685 const struct tcp_sock *tp = tcp_sk(sk); 3686 const struct tcphdr *th = tcp_hdr(skb); 3687 u32 seq = TCP_SKB_CB(skb)->seq; 3688 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3689 3690 return (/* 1. Pure ACK with correct sequence number. */ 3691 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 3692 3693 /* 2. ... and duplicate ACK. */ 3694 ack == tp->snd_una && 3695 3696 /* 3. ... and does not update window. */ 3697 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 3698 3699 /* 4. ... and sits in replay window. */ 3700 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 3701 } 3702 3703 static inline bool tcp_paws_discard(const struct sock *sk, 3704 const struct sk_buff *skb) 3705 { 3706 const struct tcp_sock *tp = tcp_sk(sk); 3707 3708 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && 3709 !tcp_disordered_ack(sk, skb); 3710 } 3711 3712 /* Check segment sequence number for validity. 3713 * 3714 * Segment controls are considered valid, if the segment 3715 * fits to the window after truncation to the window. Acceptability 3716 * of data (and SYN, FIN, of course) is checked separately. 3717 * See tcp_data_queue(), for example. 3718 * 3719 * Also, controls (RST is main one) are accepted using RCV.WUP instead 3720 * of RCV.NXT. Peer still did not advance his SND.UNA when we 3721 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 3722 * (borrowed from freebsd) 3723 */ 3724 3725 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 3726 { 3727 return !before(end_seq, tp->rcv_wup) && 3728 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 3729 } 3730 3731 /* When we get a reset we do this. */ 3732 void tcp_reset(struct sock *sk) 3733 { 3734 /* We want the right error as BSD sees it (and indeed as we do). */ 3735 switch (sk->sk_state) { 3736 case TCP_SYN_SENT: 3737 sk->sk_err = ECONNREFUSED; 3738 break; 3739 case TCP_CLOSE_WAIT: 3740 sk->sk_err = EPIPE; 3741 break; 3742 case TCP_CLOSE: 3743 return; 3744 default: 3745 sk->sk_err = ECONNRESET; 3746 } 3747 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 3748 smp_wmb(); 3749 3750 if (!sock_flag(sk, SOCK_DEAD)) 3751 sk->sk_error_report(sk); 3752 3753 tcp_done(sk); 3754 } 3755 3756 /* 3757 * Process the FIN bit. This now behaves as it is supposed to work 3758 * and the FIN takes effect when it is validly part of sequence 3759 * space. Not before when we get holes. 3760 * 3761 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 3762 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 3763 * TIME-WAIT) 3764 * 3765 * If we are in FINWAIT-1, a received FIN indicates simultaneous 3766 * close and we go into CLOSING (and later onto TIME-WAIT) 3767 * 3768 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 3769 */ 3770 static void tcp_fin(struct sock *sk) 3771 { 3772 struct tcp_sock *tp = tcp_sk(sk); 3773 3774 inet_csk_schedule_ack(sk); 3775 3776 sk->sk_shutdown |= RCV_SHUTDOWN; 3777 sock_set_flag(sk, SOCK_DONE); 3778 3779 switch (sk->sk_state) { 3780 case TCP_SYN_RECV: 3781 case TCP_ESTABLISHED: 3782 /* Move to CLOSE_WAIT */ 3783 tcp_set_state(sk, TCP_CLOSE_WAIT); 3784 inet_csk(sk)->icsk_ack.pingpong = 1; 3785 break; 3786 3787 case TCP_CLOSE_WAIT: 3788 case TCP_CLOSING: 3789 /* Received a retransmission of the FIN, do 3790 * nothing. 3791 */ 3792 break; 3793 case TCP_LAST_ACK: 3794 /* RFC793: Remain in the LAST-ACK state. */ 3795 break; 3796 3797 case TCP_FIN_WAIT1: 3798 /* This case occurs when a simultaneous close 3799 * happens, we must ack the received FIN and 3800 * enter the CLOSING state. 3801 */ 3802 tcp_send_ack(sk); 3803 tcp_set_state(sk, TCP_CLOSING); 3804 break; 3805 case TCP_FIN_WAIT2: 3806 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 3807 tcp_send_ack(sk); 3808 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 3809 break; 3810 default: 3811 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 3812 * cases we should never reach this piece of code. 3813 */ 3814 pr_err("%s: Impossible, sk->sk_state=%d\n", 3815 __func__, sk->sk_state); 3816 break; 3817 } 3818 3819 /* It _is_ possible, that we have something out-of-order _after_ FIN. 3820 * Probably, we should reset in this case. For now drop them. 3821 */ 3822 __skb_queue_purge(&tp->out_of_order_queue); 3823 if (tcp_is_sack(tp)) 3824 tcp_sack_reset(&tp->rx_opt); 3825 sk_mem_reclaim(sk); 3826 3827 if (!sock_flag(sk, SOCK_DEAD)) { 3828 sk->sk_state_change(sk); 3829 3830 /* Do not send POLL_HUP for half duplex close. */ 3831 if (sk->sk_shutdown == SHUTDOWN_MASK || 3832 sk->sk_state == TCP_CLOSE) 3833 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 3834 else 3835 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 3836 } 3837 } 3838 3839 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 3840 u32 end_seq) 3841 { 3842 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 3843 if (before(seq, sp->start_seq)) 3844 sp->start_seq = seq; 3845 if (after(end_seq, sp->end_seq)) 3846 sp->end_seq = end_seq; 3847 return true; 3848 } 3849 return false; 3850 } 3851 3852 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 3853 { 3854 struct tcp_sock *tp = tcp_sk(sk); 3855 3856 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3857 int mib_idx; 3858 3859 if (before(seq, tp->rcv_nxt)) 3860 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 3861 else 3862 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 3863 3864 NET_INC_STATS_BH(sock_net(sk), mib_idx); 3865 3866 tp->rx_opt.dsack = 1; 3867 tp->duplicate_sack[0].start_seq = seq; 3868 tp->duplicate_sack[0].end_seq = end_seq; 3869 } 3870 } 3871 3872 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 3873 { 3874 struct tcp_sock *tp = tcp_sk(sk); 3875 3876 if (!tp->rx_opt.dsack) 3877 tcp_dsack_set(sk, seq, end_seq); 3878 else 3879 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 3880 } 3881 3882 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 3883 { 3884 struct tcp_sock *tp = tcp_sk(sk); 3885 3886 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3887 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3888 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 3889 tcp_enter_quickack_mode(sk); 3890 3891 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3892 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3893 3894 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 3895 end_seq = tp->rcv_nxt; 3896 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 3897 } 3898 } 3899 3900 tcp_send_ack(sk); 3901 } 3902 3903 /* These routines update the SACK block as out-of-order packets arrive or 3904 * in-order packets close up the sequence space. 3905 */ 3906 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 3907 { 3908 int this_sack; 3909 struct tcp_sack_block *sp = &tp->selective_acks[0]; 3910 struct tcp_sack_block *swalk = sp + 1; 3911 3912 /* See if the recent change to the first SACK eats into 3913 * or hits the sequence space of other SACK blocks, if so coalesce. 3914 */ 3915 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 3916 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 3917 int i; 3918 3919 /* Zap SWALK, by moving every further SACK up by one slot. 3920 * Decrease num_sacks. 3921 */ 3922 tp->rx_opt.num_sacks--; 3923 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 3924 sp[i] = sp[i + 1]; 3925 continue; 3926 } 3927 this_sack++, swalk++; 3928 } 3929 } 3930 3931 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 3932 { 3933 struct tcp_sock *tp = tcp_sk(sk); 3934 struct tcp_sack_block *sp = &tp->selective_acks[0]; 3935 int cur_sacks = tp->rx_opt.num_sacks; 3936 int this_sack; 3937 3938 if (!cur_sacks) 3939 goto new_sack; 3940 3941 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 3942 if (tcp_sack_extend(sp, seq, end_seq)) { 3943 /* Rotate this_sack to the first one. */ 3944 for (; this_sack > 0; this_sack--, sp--) 3945 swap(*sp, *(sp - 1)); 3946 if (cur_sacks > 1) 3947 tcp_sack_maybe_coalesce(tp); 3948 return; 3949 } 3950 } 3951 3952 /* Could not find an adjacent existing SACK, build a new one, 3953 * put it at the front, and shift everyone else down. We 3954 * always know there is at least one SACK present already here. 3955 * 3956 * If the sack array is full, forget about the last one. 3957 */ 3958 if (this_sack >= TCP_NUM_SACKS) { 3959 this_sack--; 3960 tp->rx_opt.num_sacks--; 3961 sp--; 3962 } 3963 for (; this_sack > 0; this_sack--, sp--) 3964 *sp = *(sp - 1); 3965 3966 new_sack: 3967 /* Build the new head SACK, and we're done. */ 3968 sp->start_seq = seq; 3969 sp->end_seq = end_seq; 3970 tp->rx_opt.num_sacks++; 3971 } 3972 3973 /* RCV.NXT advances, some SACKs should be eaten. */ 3974 3975 static void tcp_sack_remove(struct tcp_sock *tp) 3976 { 3977 struct tcp_sack_block *sp = &tp->selective_acks[0]; 3978 int num_sacks = tp->rx_opt.num_sacks; 3979 int this_sack; 3980 3981 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 3982 if (skb_queue_empty(&tp->out_of_order_queue)) { 3983 tp->rx_opt.num_sacks = 0; 3984 return; 3985 } 3986 3987 for (this_sack = 0; this_sack < num_sacks;) { 3988 /* Check if the start of the sack is covered by RCV.NXT. */ 3989 if (!before(tp->rcv_nxt, sp->start_seq)) { 3990 int i; 3991 3992 /* RCV.NXT must cover all the block! */ 3993 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 3994 3995 /* Zap this SACK, by moving forward any other SACKS. */ 3996 for (i=this_sack+1; i < num_sacks; i++) 3997 tp->selective_acks[i-1] = tp->selective_acks[i]; 3998 num_sacks--; 3999 continue; 4000 } 4001 this_sack++; 4002 sp++; 4003 } 4004 tp->rx_opt.num_sacks = num_sacks; 4005 } 4006 4007 /* This one checks to see if we can put data from the 4008 * out_of_order queue into the receive_queue. 4009 */ 4010 static void tcp_ofo_queue(struct sock *sk) 4011 { 4012 struct tcp_sock *tp = tcp_sk(sk); 4013 __u32 dsack_high = tp->rcv_nxt; 4014 struct sk_buff *skb; 4015 4016 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { 4017 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 4018 break; 4019 4020 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 4021 __u32 dsack = dsack_high; 4022 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 4023 dsack_high = TCP_SKB_CB(skb)->end_seq; 4024 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 4025 } 4026 4027 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4028 SOCK_DEBUG(sk, "ofo packet was already received\n"); 4029 __skb_unlink(skb, &tp->out_of_order_queue); 4030 __kfree_skb(skb); 4031 continue; 4032 } 4033 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 4034 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4035 TCP_SKB_CB(skb)->end_seq); 4036 4037 __skb_unlink(skb, &tp->out_of_order_queue); 4038 __skb_queue_tail(&sk->sk_receive_queue, skb); 4039 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4040 if (tcp_hdr(skb)->fin) 4041 tcp_fin(sk); 4042 } 4043 } 4044 4045 static bool tcp_prune_ofo_queue(struct sock *sk); 4046 static int tcp_prune_queue(struct sock *sk); 4047 4048 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, 4049 unsigned int size) 4050 { 4051 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4052 !sk_rmem_schedule(sk, skb, size)) { 4053 4054 if (tcp_prune_queue(sk) < 0) 4055 return -1; 4056 4057 if (!sk_rmem_schedule(sk, skb, size)) { 4058 if (!tcp_prune_ofo_queue(sk)) 4059 return -1; 4060 4061 if (!sk_rmem_schedule(sk, skb, size)) 4062 return -1; 4063 } 4064 } 4065 return 0; 4066 } 4067 4068 /** 4069 * tcp_try_coalesce - try to merge skb to prior one 4070 * @sk: socket 4071 * @to: prior buffer 4072 * @from: buffer to add in queue 4073 * @fragstolen: pointer to boolean 4074 * 4075 * Before queueing skb @from after @to, try to merge them 4076 * to reduce overall memory use and queue lengths, if cost is small. 4077 * Packets in ofo or receive queues can stay a long time. 4078 * Better try to coalesce them right now to avoid future collapses. 4079 * Returns true if caller should free @from instead of queueing it 4080 */ 4081 static bool tcp_try_coalesce(struct sock *sk, 4082 struct sk_buff *to, 4083 struct sk_buff *from, 4084 bool *fragstolen) 4085 { 4086 int delta; 4087 4088 *fragstolen = false; 4089 4090 if (tcp_hdr(from)->fin) 4091 return false; 4092 4093 /* Its possible this segment overlaps with prior segment in queue */ 4094 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) 4095 return false; 4096 4097 if (!skb_try_coalesce(to, from, fragstolen, &delta)) 4098 return false; 4099 4100 atomic_add(delta, &sk->sk_rmem_alloc); 4101 sk_mem_charge(sk, delta); 4102 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4103 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4104 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4105 return true; 4106 } 4107 4108 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4109 { 4110 struct tcp_sock *tp = tcp_sk(sk); 4111 struct sk_buff *skb1; 4112 u32 seq, end_seq; 4113 4114 TCP_ECN_check_ce(tp, skb); 4115 4116 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4117 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); 4118 __kfree_skb(skb); 4119 return; 4120 } 4121 4122 /* Disable header prediction. */ 4123 tp->pred_flags = 0; 4124 inet_csk_schedule_ack(sk); 4125 4126 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); 4127 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4128 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4129 4130 skb1 = skb_peek_tail(&tp->out_of_order_queue); 4131 if (!skb1) { 4132 /* Initial out of order segment, build 1 SACK. */ 4133 if (tcp_is_sack(tp)) { 4134 tp->rx_opt.num_sacks = 1; 4135 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 4136 tp->selective_acks[0].end_seq = 4137 TCP_SKB_CB(skb)->end_seq; 4138 } 4139 __skb_queue_head(&tp->out_of_order_queue, skb); 4140 goto end; 4141 } 4142 4143 seq = TCP_SKB_CB(skb)->seq; 4144 end_seq = TCP_SKB_CB(skb)->end_seq; 4145 4146 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4147 bool fragstolen; 4148 4149 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { 4150 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4151 } else { 4152 kfree_skb_partial(skb, fragstolen); 4153 skb = NULL; 4154 } 4155 4156 if (!tp->rx_opt.num_sacks || 4157 tp->selective_acks[0].end_seq != seq) 4158 goto add_sack; 4159 4160 /* Common case: data arrive in order after hole. */ 4161 tp->selective_acks[0].end_seq = end_seq; 4162 goto end; 4163 } 4164 4165 /* Find place to insert this segment. */ 4166 while (1) { 4167 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4168 break; 4169 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { 4170 skb1 = NULL; 4171 break; 4172 } 4173 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); 4174 } 4175 4176 /* Do skb overlap to previous one? */ 4177 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4178 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4179 /* All the bits are present. Drop. */ 4180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4181 __kfree_skb(skb); 4182 skb = NULL; 4183 tcp_dsack_set(sk, seq, end_seq); 4184 goto add_sack; 4185 } 4186 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4187 /* Partial overlap. */ 4188 tcp_dsack_set(sk, seq, 4189 TCP_SKB_CB(skb1)->end_seq); 4190 } else { 4191 if (skb_queue_is_first(&tp->out_of_order_queue, 4192 skb1)) 4193 skb1 = NULL; 4194 else 4195 skb1 = skb_queue_prev( 4196 &tp->out_of_order_queue, 4197 skb1); 4198 } 4199 } 4200 if (!skb1) 4201 __skb_queue_head(&tp->out_of_order_queue, skb); 4202 else 4203 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4204 4205 /* And clean segments covered by new one as whole. */ 4206 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { 4207 skb1 = skb_queue_next(&tp->out_of_order_queue, skb); 4208 4209 if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) 4210 break; 4211 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4212 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4213 end_seq); 4214 break; 4215 } 4216 __skb_unlink(skb1, &tp->out_of_order_queue); 4217 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4218 TCP_SKB_CB(skb1)->end_seq); 4219 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4220 __kfree_skb(skb1); 4221 } 4222 4223 add_sack: 4224 if (tcp_is_sack(tp)) 4225 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4226 end: 4227 if (skb) 4228 skb_set_owner_r(skb, sk); 4229 } 4230 4231 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4232 bool *fragstolen) 4233 { 4234 int eaten; 4235 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4236 4237 __skb_pull(skb, hdrlen); 4238 eaten = (tail && 4239 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; 4240 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4241 if (!eaten) { 4242 __skb_queue_tail(&sk->sk_receive_queue, skb); 4243 skb_set_owner_r(skb, sk); 4244 } 4245 return eaten; 4246 } 4247 4248 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4249 { 4250 struct sk_buff *skb = NULL; 4251 struct tcphdr *th; 4252 bool fragstolen; 4253 4254 if (size == 0) 4255 return 0; 4256 4257 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); 4258 if (!skb) 4259 goto err; 4260 4261 if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) 4262 goto err_free; 4263 4264 th = (struct tcphdr *)skb_put(skb, sizeof(*th)); 4265 skb_reset_transport_header(skb); 4266 memset(th, 0, sizeof(*th)); 4267 4268 if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) 4269 goto err_free; 4270 4271 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 4272 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 4273 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 4274 4275 if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { 4276 WARN_ON_ONCE(fragstolen); /* should not happen */ 4277 __kfree_skb(skb); 4278 } 4279 return size; 4280 4281 err_free: 4282 kfree_skb(skb); 4283 err: 4284 return -ENOMEM; 4285 } 4286 4287 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4288 { 4289 const struct tcphdr *th = tcp_hdr(skb); 4290 struct tcp_sock *tp = tcp_sk(sk); 4291 int eaten = -1; 4292 bool fragstolen = false; 4293 4294 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4295 goto drop; 4296 4297 skb_dst_drop(skb); 4298 __skb_pull(skb, th->doff * 4); 4299 4300 TCP_ECN_accept_cwr(tp, skb); 4301 4302 tp->rx_opt.dsack = 0; 4303 4304 /* Queue data for delivery to the user. 4305 * Packets in sequence go to the receive queue. 4306 * Out of sequence packets to the out_of_order_queue. 4307 */ 4308 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4309 if (tcp_receive_window(tp) == 0) 4310 goto out_of_window; 4311 4312 /* Ok. In sequence. In window. */ 4313 if (tp->ucopy.task == current && 4314 tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && 4315 sock_owned_by_user(sk) && !tp->urg_data) { 4316 int chunk = min_t(unsigned int, skb->len, 4317 tp->ucopy.len); 4318 4319 __set_current_state(TASK_RUNNING); 4320 4321 local_bh_enable(); 4322 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 4323 tp->ucopy.len -= chunk; 4324 tp->copied_seq += chunk; 4325 eaten = (chunk == skb->len); 4326 tcp_rcv_space_adjust(sk); 4327 } 4328 local_bh_disable(); 4329 } 4330 4331 if (eaten <= 0) { 4332 queue_and_out: 4333 if (eaten < 0 && 4334 tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4335 goto drop; 4336 4337 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4338 } 4339 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4340 if (skb->len) 4341 tcp_event_data_recv(sk, skb); 4342 if (th->fin) 4343 tcp_fin(sk); 4344 4345 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4346 tcp_ofo_queue(sk); 4347 4348 /* RFC2581. 4.2. SHOULD send immediate ACK, when 4349 * gap in queue is filled. 4350 */ 4351 if (skb_queue_empty(&tp->out_of_order_queue)) 4352 inet_csk(sk)->icsk_ack.pingpong = 0; 4353 } 4354 4355 if (tp->rx_opt.num_sacks) 4356 tcp_sack_remove(tp); 4357 4358 tcp_fast_path_check(sk); 4359 4360 if (eaten > 0) 4361 kfree_skb_partial(skb, fragstolen); 4362 if (!sock_flag(sk, SOCK_DEAD)) 4363 sk->sk_data_ready(sk, 0); 4364 return; 4365 } 4366 4367 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4368 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4369 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4370 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4371 4372 out_of_window: 4373 tcp_enter_quickack_mode(sk); 4374 inet_csk_schedule_ack(sk); 4375 drop: 4376 __kfree_skb(skb); 4377 return; 4378 } 4379 4380 /* Out of window. F.e. zero window probe. */ 4381 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4382 goto out_of_window; 4383 4384 tcp_enter_quickack_mode(sk); 4385 4386 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4387 /* Partial packet, seq < rcv_next < end_seq */ 4388 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4389 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4390 TCP_SKB_CB(skb)->end_seq); 4391 4392 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4393 4394 /* If window is closed, drop tail of packet. But after 4395 * remembering D-SACK for its head made in previous line. 4396 */ 4397 if (!tcp_receive_window(tp)) 4398 goto out_of_window; 4399 goto queue_and_out; 4400 } 4401 4402 tcp_data_queue_ofo(sk, skb); 4403 } 4404 4405 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4406 struct sk_buff_head *list) 4407 { 4408 struct sk_buff *next = NULL; 4409 4410 if (!skb_queue_is_last(list, skb)) 4411 next = skb_queue_next(list, skb); 4412 4413 __skb_unlink(skb, list); 4414 __kfree_skb(skb); 4415 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4416 4417 return next; 4418 } 4419 4420 /* Collapse contiguous sequence of skbs head..tail with 4421 * sequence numbers start..end. 4422 * 4423 * If tail is NULL, this means until the end of the list. 4424 * 4425 * Segments with FIN/SYN are not collapsed (only because this 4426 * simplifies code) 4427 */ 4428 static void 4429 tcp_collapse(struct sock *sk, struct sk_buff_head *list, 4430 struct sk_buff *head, struct sk_buff *tail, 4431 u32 start, u32 end) 4432 { 4433 struct sk_buff *skb, *n; 4434 bool end_of_skbs; 4435 4436 /* First, check that queue is collapsible and find 4437 * the point where collapsing can be useful. */ 4438 skb = head; 4439 restart: 4440 end_of_skbs = true; 4441 skb_queue_walk_from_safe(list, skb, n) { 4442 if (skb == tail) 4443 break; 4444 /* No new bits? It is possible on ofo queue. */ 4445 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4446 skb = tcp_collapse_one(sk, skb, list); 4447 if (!skb) 4448 break; 4449 goto restart; 4450 } 4451 4452 /* The first skb to collapse is: 4453 * - not SYN/FIN and 4454 * - bloated or contains data before "start" or 4455 * overlaps to the next one. 4456 */ 4457 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4458 (tcp_win_from_space(skb->truesize) > skb->len || 4459 before(TCP_SKB_CB(skb)->seq, start))) { 4460 end_of_skbs = false; 4461 break; 4462 } 4463 4464 if (!skb_queue_is_last(list, skb)) { 4465 struct sk_buff *next = skb_queue_next(list, skb); 4466 if (next != tail && 4467 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { 4468 end_of_skbs = false; 4469 break; 4470 } 4471 } 4472 4473 /* Decided to skip this, advance start seq. */ 4474 start = TCP_SKB_CB(skb)->end_seq; 4475 } 4476 if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4477 return; 4478 4479 while (before(start, end)) { 4480 struct sk_buff *nskb; 4481 unsigned int header = skb_headroom(skb); 4482 int copy = SKB_MAX_ORDER(header, 0); 4483 4484 /* Too big header? This can happen with IPv6. */ 4485 if (copy < 0) 4486 return; 4487 if (end - start < copy) 4488 copy = end - start; 4489 nskb = alloc_skb(copy + header, GFP_ATOMIC); 4490 if (!nskb) 4491 return; 4492 4493 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 4494 skb_set_network_header(nskb, (skb_network_header(skb) - 4495 skb->head)); 4496 skb_set_transport_header(nskb, (skb_transport_header(skb) - 4497 skb->head)); 4498 skb_reserve(nskb, header); 4499 memcpy(nskb->head, skb->head, header); 4500 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4501 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4502 __skb_queue_before(list, skb, nskb); 4503 skb_set_owner_r(nskb, sk); 4504 4505 /* Copy data, releasing collapsed skbs. */ 4506 while (copy > 0) { 4507 int offset = start - TCP_SKB_CB(skb)->seq; 4508 int size = TCP_SKB_CB(skb)->end_seq - start; 4509 4510 BUG_ON(offset < 0); 4511 if (size > 0) { 4512 size = min(copy, size); 4513 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 4514 BUG(); 4515 TCP_SKB_CB(nskb)->end_seq += size; 4516 copy -= size; 4517 start += size; 4518 } 4519 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4520 skb = tcp_collapse_one(sk, skb, list); 4521 if (!skb || 4522 skb == tail || 4523 tcp_hdr(skb)->syn || 4524 tcp_hdr(skb)->fin) 4525 return; 4526 } 4527 } 4528 } 4529 } 4530 4531 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 4532 * and tcp_collapse() them until all the queue is collapsed. 4533 */ 4534 static void tcp_collapse_ofo_queue(struct sock *sk) 4535 { 4536 struct tcp_sock *tp = tcp_sk(sk); 4537 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 4538 struct sk_buff *head; 4539 u32 start, end; 4540 4541 if (skb == NULL) 4542 return; 4543 4544 start = TCP_SKB_CB(skb)->seq; 4545 end = TCP_SKB_CB(skb)->end_seq; 4546 head = skb; 4547 4548 for (;;) { 4549 struct sk_buff *next = NULL; 4550 4551 if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) 4552 next = skb_queue_next(&tp->out_of_order_queue, skb); 4553 skb = next; 4554 4555 /* Segment is terminated when we see gap or when 4556 * we are at the end of all the queue. */ 4557 if (!skb || 4558 after(TCP_SKB_CB(skb)->seq, end) || 4559 before(TCP_SKB_CB(skb)->end_seq, start)) { 4560 tcp_collapse(sk, &tp->out_of_order_queue, 4561 head, skb, start, end); 4562 head = skb; 4563 if (!skb) 4564 break; 4565 /* Start new segment */ 4566 start = TCP_SKB_CB(skb)->seq; 4567 end = TCP_SKB_CB(skb)->end_seq; 4568 } else { 4569 if (before(TCP_SKB_CB(skb)->seq, start)) 4570 start = TCP_SKB_CB(skb)->seq; 4571 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4572 end = TCP_SKB_CB(skb)->end_seq; 4573 } 4574 } 4575 } 4576 4577 /* 4578 * Purge the out-of-order queue. 4579 * Return true if queue was pruned. 4580 */ 4581 static bool tcp_prune_ofo_queue(struct sock *sk) 4582 { 4583 struct tcp_sock *tp = tcp_sk(sk); 4584 bool res = false; 4585 4586 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4587 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 4588 __skb_queue_purge(&tp->out_of_order_queue); 4589 4590 /* Reset SACK state. A conforming SACK implementation will 4591 * do the same at a timeout based retransmit. When a connection 4592 * is in a sad state like this, we care only about integrity 4593 * of the connection not performance. 4594 */ 4595 if (tp->rx_opt.sack_ok) 4596 tcp_sack_reset(&tp->rx_opt); 4597 sk_mem_reclaim(sk); 4598 res = true; 4599 } 4600 return res; 4601 } 4602 4603 /* Reduce allocated memory if we can, trying to get 4604 * the socket within its memory limits again. 4605 * 4606 * Return less than zero if we should start dropping frames 4607 * until the socket owning process reads some of the data 4608 * to stabilize the situation. 4609 */ 4610 static int tcp_prune_queue(struct sock *sk) 4611 { 4612 struct tcp_sock *tp = tcp_sk(sk); 4613 4614 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4615 4616 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); 4617 4618 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4619 tcp_clamp_window(sk); 4620 else if (sk_under_memory_pressure(sk)) 4621 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4622 4623 tcp_collapse_ofo_queue(sk); 4624 if (!skb_queue_empty(&sk->sk_receive_queue)) 4625 tcp_collapse(sk, &sk->sk_receive_queue, 4626 skb_peek(&sk->sk_receive_queue), 4627 NULL, 4628 tp->copied_seq, tp->rcv_nxt); 4629 sk_mem_reclaim(sk); 4630 4631 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4632 return 0; 4633 4634 /* Collapsing did not help, destructive actions follow. 4635 * This must not ever occur. */ 4636 4637 tcp_prune_ofo_queue(sk); 4638 4639 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4640 return 0; 4641 4642 /* If we are really being abused, tell the caller to silently 4643 * drop receive data on the floor. It will get retransmitted 4644 * and hopefully then we'll have sufficient space. 4645 */ 4646 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); 4647 4648 /* Massive buffer overcommit. */ 4649 tp->pred_flags = 0; 4650 return -1; 4651 } 4652 4653 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 4654 * As additional protections, we do not touch cwnd in retransmission phases, 4655 * and if application hit its sndbuf limit recently. 4656 */ 4657 void tcp_cwnd_application_limited(struct sock *sk) 4658 { 4659 struct tcp_sock *tp = tcp_sk(sk); 4660 4661 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 4662 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 4663 /* Limited by application or receiver window. */ 4664 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 4665 u32 win_used = max(tp->snd_cwnd_used, init_win); 4666 if (win_used < tp->snd_cwnd) { 4667 tp->snd_ssthresh = tcp_current_ssthresh(sk); 4668 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 4669 } 4670 tp->snd_cwnd_used = 0; 4671 } 4672 tp->snd_cwnd_stamp = tcp_time_stamp; 4673 } 4674 4675 static bool tcp_should_expand_sndbuf(const struct sock *sk) 4676 { 4677 const struct tcp_sock *tp = tcp_sk(sk); 4678 4679 /* If the user specified a specific send buffer setting, do 4680 * not modify it. 4681 */ 4682 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 4683 return false; 4684 4685 /* If we are under global TCP memory pressure, do not expand. */ 4686 if (sk_under_memory_pressure(sk)) 4687 return false; 4688 4689 /* If we are under soft global TCP memory pressure, do not expand. */ 4690 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 4691 return false; 4692 4693 /* If we filled the congestion window, do not expand. */ 4694 if (tp->packets_out >= tp->snd_cwnd) 4695 return false; 4696 4697 return true; 4698 } 4699 4700 /* When incoming ACK allowed to free some skb from write_queue, 4701 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 4702 * on the exit from tcp input handler. 4703 * 4704 * PROBLEM: sndbuf expansion does not work well with largesend. 4705 */ 4706 static void tcp_new_space(struct sock *sk) 4707 { 4708 struct tcp_sock *tp = tcp_sk(sk); 4709 4710 if (tcp_should_expand_sndbuf(sk)) { 4711 int sndmem = SKB_TRUESIZE(max_t(u32, 4712 tp->rx_opt.mss_clamp, 4713 tp->mss_cache) + 4714 MAX_TCP_HEADER); 4715 int demanded = max_t(unsigned int, tp->snd_cwnd, 4716 tp->reordering + 1); 4717 sndmem *= 2 * demanded; 4718 if (sndmem > sk->sk_sndbuf) 4719 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 4720 tp->snd_cwnd_stamp = tcp_time_stamp; 4721 } 4722 4723 sk->sk_write_space(sk); 4724 } 4725 4726 static void tcp_check_space(struct sock *sk) 4727 { 4728 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 4729 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 4730 if (sk->sk_socket && 4731 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 4732 tcp_new_space(sk); 4733 } 4734 } 4735 4736 static inline void tcp_data_snd_check(struct sock *sk) 4737 { 4738 tcp_push_pending_frames(sk); 4739 tcp_check_space(sk); 4740 } 4741 4742 /* 4743 * Check if sending an ack is needed. 4744 */ 4745 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 4746 { 4747 struct tcp_sock *tp = tcp_sk(sk); 4748 4749 /* More than one full frame received... */ 4750 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && 4751 /* ... and right edge of window advances far enough. 4752 * (tcp_recvmsg() will send ACK otherwise). Or... 4753 */ 4754 __tcp_select_window(sk) >= tp->rcv_wnd) || 4755 /* We ACK each frame or... */ 4756 tcp_in_quickack_mode(sk) || 4757 /* We have out of order data. */ 4758 (ofo_possible && skb_peek(&tp->out_of_order_queue))) { 4759 /* Then ack it now */ 4760 tcp_send_ack(sk); 4761 } else { 4762 /* Else, send delayed ack. */ 4763 tcp_send_delayed_ack(sk); 4764 } 4765 } 4766 4767 static inline void tcp_ack_snd_check(struct sock *sk) 4768 { 4769 if (!inet_csk_ack_scheduled(sk)) { 4770 /* We sent a data segment already. */ 4771 return; 4772 } 4773 __tcp_ack_snd_check(sk, 1); 4774 } 4775 4776 /* 4777 * This routine is only called when we have urgent data 4778 * signaled. Its the 'slow' part of tcp_urg. It could be 4779 * moved inline now as tcp_urg is only called from one 4780 * place. We handle URGent data wrong. We have to - as 4781 * BSD still doesn't use the correction from RFC961. 4782 * For 1003.1g we should support a new option TCP_STDURG to permit 4783 * either form (or just set the sysctl tcp_stdurg). 4784 */ 4785 4786 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 4787 { 4788 struct tcp_sock *tp = tcp_sk(sk); 4789 u32 ptr = ntohs(th->urg_ptr); 4790 4791 if (ptr && !sysctl_tcp_stdurg) 4792 ptr--; 4793 ptr += ntohl(th->seq); 4794 4795 /* Ignore urgent data that we've already seen and read. */ 4796 if (after(tp->copied_seq, ptr)) 4797 return; 4798 4799 /* Do not replay urg ptr. 4800 * 4801 * NOTE: interesting situation not covered by specs. 4802 * Misbehaving sender may send urg ptr, pointing to segment, 4803 * which we already have in ofo queue. We are not able to fetch 4804 * such data and will stay in TCP_URG_NOTYET until will be eaten 4805 * by recvmsg(). Seems, we are not obliged to handle such wicked 4806 * situations. But it is worth to think about possibility of some 4807 * DoSes using some hypothetical application level deadlock. 4808 */ 4809 if (before(ptr, tp->rcv_nxt)) 4810 return; 4811 4812 /* Do we already have a newer (or duplicate) urgent pointer? */ 4813 if (tp->urg_data && !after(ptr, tp->urg_seq)) 4814 return; 4815 4816 /* Tell the world about our new urgent pointer. */ 4817 sk_send_sigurg(sk); 4818 4819 /* We may be adding urgent data when the last byte read was 4820 * urgent. To do this requires some care. We cannot just ignore 4821 * tp->copied_seq since we would read the last urgent byte again 4822 * as data, nor can we alter copied_seq until this data arrives 4823 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 4824 * 4825 * NOTE. Double Dutch. Rendering to plain English: author of comment 4826 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 4827 * and expect that both A and B disappear from stream. This is _wrong_. 4828 * Though this happens in BSD with high probability, this is occasional. 4829 * Any application relying on this is buggy. Note also, that fix "works" 4830 * only in this artificial test. Insert some normal data between A and B and we will 4831 * decline of BSD again. Verdict: it is better to remove to trap 4832 * buggy users. 4833 */ 4834 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 4835 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 4836 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 4837 tp->copied_seq++; 4838 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 4839 __skb_unlink(skb, &sk->sk_receive_queue); 4840 __kfree_skb(skb); 4841 } 4842 } 4843 4844 tp->urg_data = TCP_URG_NOTYET; 4845 tp->urg_seq = ptr; 4846 4847 /* Disable header prediction. */ 4848 tp->pred_flags = 0; 4849 } 4850 4851 /* This is the 'fast' part of urgent handling. */ 4852 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 4853 { 4854 struct tcp_sock *tp = tcp_sk(sk); 4855 4856 /* Check if we get a new urgent pointer - normally not. */ 4857 if (th->urg) 4858 tcp_check_urg(sk, th); 4859 4860 /* Do we wait for any urgent data? - normally not... */ 4861 if (tp->urg_data == TCP_URG_NOTYET) { 4862 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 4863 th->syn; 4864 4865 /* Is the urgent pointer pointing into this packet? */ 4866 if (ptr < skb->len) { 4867 u8 tmp; 4868 if (skb_copy_bits(skb, ptr, &tmp, 1)) 4869 BUG(); 4870 tp->urg_data = TCP_URG_VALID | tmp; 4871 if (!sock_flag(sk, SOCK_DEAD)) 4872 sk->sk_data_ready(sk, 0); 4873 } 4874 } 4875 } 4876 4877 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 4878 { 4879 struct tcp_sock *tp = tcp_sk(sk); 4880 int chunk = skb->len - hlen; 4881 int err; 4882 4883 local_bh_enable(); 4884 if (skb_csum_unnecessary(skb)) 4885 err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); 4886 else 4887 err = skb_copy_and_csum_datagram_iovec(skb, hlen, 4888 tp->ucopy.iov); 4889 4890 if (!err) { 4891 tp->ucopy.len -= chunk; 4892 tp->copied_seq += chunk; 4893 tcp_rcv_space_adjust(sk); 4894 } 4895 4896 local_bh_disable(); 4897 return err; 4898 } 4899 4900 static __sum16 __tcp_checksum_complete_user(struct sock *sk, 4901 struct sk_buff *skb) 4902 { 4903 __sum16 result; 4904 4905 if (sock_owned_by_user(sk)) { 4906 local_bh_enable(); 4907 result = __tcp_checksum_complete(skb); 4908 local_bh_disable(); 4909 } else { 4910 result = __tcp_checksum_complete(skb); 4911 } 4912 return result; 4913 } 4914 4915 static inline bool tcp_checksum_complete_user(struct sock *sk, 4916 struct sk_buff *skb) 4917 { 4918 return !skb_csum_unnecessary(skb) && 4919 __tcp_checksum_complete_user(sk, skb); 4920 } 4921 4922 #ifdef CONFIG_NET_DMA 4923 static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 4924 int hlen) 4925 { 4926 struct tcp_sock *tp = tcp_sk(sk); 4927 int chunk = skb->len - hlen; 4928 int dma_cookie; 4929 bool copied_early = false; 4930 4931 if (tp->ucopy.wakeup) 4932 return false; 4933 4934 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 4935 tp->ucopy.dma_chan = net_dma_find_channel(); 4936 4937 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 4938 4939 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, 4940 skb, hlen, 4941 tp->ucopy.iov, chunk, 4942 tp->ucopy.pinned_list); 4943 4944 if (dma_cookie < 0) 4945 goto out; 4946 4947 tp->ucopy.dma_cookie = dma_cookie; 4948 copied_early = true; 4949 4950 tp->ucopy.len -= chunk; 4951 tp->copied_seq += chunk; 4952 tcp_rcv_space_adjust(sk); 4953 4954 if ((tp->ucopy.len == 0) || 4955 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 4956 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 4957 tp->ucopy.wakeup = 1; 4958 sk->sk_data_ready(sk, 0); 4959 } 4960 } else if (chunk > 0) { 4961 tp->ucopy.wakeup = 1; 4962 sk->sk_data_ready(sk, 0); 4963 } 4964 out: 4965 return copied_early; 4966 } 4967 #endif /* CONFIG_NET_DMA */ 4968 4969 /* Does PAWS and seqno based validation of an incoming segment, flags will 4970 * play significant role here. 4971 */ 4972 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 4973 const struct tcphdr *th, int syn_inerr) 4974 { 4975 struct tcp_sock *tp = tcp_sk(sk); 4976 4977 /* RFC1323: H1. Apply PAWS check first. */ 4978 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4979 tcp_paws_discard(sk, skb)) { 4980 if (!th->rst) { 4981 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 4982 tcp_send_dupack(sk, skb); 4983 goto discard; 4984 } 4985 /* Reset is accepted even if it did not pass PAWS. */ 4986 } 4987 4988 /* Step 1: check sequence number */ 4989 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 4990 /* RFC793, page 37: "In all states except SYN-SENT, all reset 4991 * (RST) segments are validated by checking their SEQ-fields." 4992 * And page 69: "If an incoming segment is not acceptable, 4993 * an acknowledgment should be sent in reply (unless the RST 4994 * bit is set, if so drop the segment and return)". 4995 */ 4996 if (!th->rst) { 4997 if (th->syn) 4998 goto syn_challenge; 4999 tcp_send_dupack(sk, skb); 5000 } 5001 goto discard; 5002 } 5003 5004 /* Step 2: check RST bit */ 5005 if (th->rst) { 5006 /* RFC 5961 3.2 : 5007 * If sequence number exactly matches RCV.NXT, then 5008 * RESET the connection 5009 * else 5010 * Send a challenge ACK 5011 */ 5012 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) 5013 tcp_reset(sk); 5014 else 5015 tcp_send_challenge_ack(sk); 5016 goto discard; 5017 } 5018 5019 /* step 3: check security and precedence [ignored] */ 5020 5021 /* step 4: Check for a SYN 5022 * RFC 5691 4.2 : Send a challenge ack 5023 */ 5024 if (th->syn) { 5025 syn_challenge: 5026 if (syn_inerr) 5027 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5028 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5029 tcp_send_challenge_ack(sk); 5030 goto discard; 5031 } 5032 5033 return true; 5034 5035 discard: 5036 __kfree_skb(skb); 5037 return false; 5038 } 5039 5040 /* 5041 * TCP receive function for the ESTABLISHED state. 5042 * 5043 * It is split into a fast path and a slow path. The fast path is 5044 * disabled when: 5045 * - A zero window was announced from us - zero window probing 5046 * is only handled properly in the slow path. 5047 * - Out of order segments arrived. 5048 * - Urgent data is expected. 5049 * - There is no buffer space left 5050 * - Unexpected TCP flags/window values/header lengths are received 5051 * (detected by checking the TCP header against pred_flags) 5052 * - Data is sent in both directions. Fast path only supports pure senders 5053 * or pure receivers (this means either the sequence number or the ack 5054 * value must stay constant) 5055 * - Unexpected TCP option. 5056 * 5057 * When these conditions are not satisfied it drops into a standard 5058 * receive procedure patterned after RFC793 to handle all cases. 5059 * The first three cases are guaranteed by proper pred_flags setting, 5060 * the rest is checked inline. Fast processing is turned on in 5061 * tcp_data_queue when everything is OK. 5062 */ 5063 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5064 const struct tcphdr *th, unsigned int len) 5065 { 5066 struct tcp_sock *tp = tcp_sk(sk); 5067 5068 if (unlikely(sk->sk_rx_dst == NULL)) 5069 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5070 /* 5071 * Header prediction. 5072 * The code loosely follows the one in the famous 5073 * "30 instruction TCP receive" Van Jacobson mail. 5074 * 5075 * Van's trick is to deposit buffers into socket queue 5076 * on a device interrupt, to call tcp_recv function 5077 * on the receive process context and checksum and copy 5078 * the buffer to user space. smart... 5079 * 5080 * Our current scheme is not silly either but we take the 5081 * extra cost of the net_bh soft interrupt processing... 5082 * We do checksum and copy also but from device to kernel. 5083 */ 5084 5085 tp->rx_opt.saw_tstamp = 0; 5086 5087 /* pred_flags is 0xS?10 << 16 + snd_wnd 5088 * if header_prediction is to be made 5089 * 'S' will always be tp->tcp_header_len >> 2 5090 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 5091 * turn it off (when there are holes in the receive 5092 * space for instance) 5093 * PSH flag is ignored. 5094 */ 5095 5096 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5097 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && 5098 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 5099 int tcp_header_len = tp->tcp_header_len; 5100 5101 /* Timestamp header prediction: tcp_header_len 5102 * is automatically equal to th->doff*4 due to pred_flags 5103 * match. 5104 */ 5105 5106 /* Check timestamp */ 5107 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 5108 /* No? Slow path! */ 5109 if (!tcp_parse_aligned_timestamp(tp, th)) 5110 goto slow_path; 5111 5112 /* If PAWS failed, check it more carefully in slow path */ 5113 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 5114 goto slow_path; 5115 5116 /* DO NOT update ts_recent here, if checksum fails 5117 * and timestamp was corrupted part, it will result 5118 * in a hung connection since we will drop all 5119 * future packets due to the PAWS test. 5120 */ 5121 } 5122 5123 if (len <= tcp_header_len) { 5124 /* Bulk data transfer: sender */ 5125 if (len == tcp_header_len) { 5126 /* Predicted packet is in window by definition. 5127 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5128 * Hence, check seq<=rcv_wup reduces to: 5129 */ 5130 if (tcp_header_len == 5131 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5132 tp->rcv_nxt == tp->rcv_wup) 5133 tcp_store_ts_recent(tp); 5134 5135 /* We know that such packets are checksummed 5136 * on entry. 5137 */ 5138 tcp_ack(sk, skb, 0); 5139 __kfree_skb(skb); 5140 tcp_data_snd_check(sk); 5141 return 0; 5142 } else { /* Header too small */ 5143 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5144 goto discard; 5145 } 5146 } else { 5147 int eaten = 0; 5148 int copied_early = 0; 5149 bool fragstolen = false; 5150 5151 if (tp->copied_seq == tp->rcv_nxt && 5152 len - tcp_header_len <= tp->ucopy.len) { 5153 #ifdef CONFIG_NET_DMA 5154 if (tp->ucopy.task == current && 5155 sock_owned_by_user(sk) && 5156 tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 5157 copied_early = 1; 5158 eaten = 1; 5159 } 5160 #endif 5161 if (tp->ucopy.task == current && 5162 sock_owned_by_user(sk) && !copied_early) { 5163 __set_current_state(TASK_RUNNING); 5164 5165 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 5166 eaten = 1; 5167 } 5168 if (eaten) { 5169 /* Predicted packet is in window by definition. 5170 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5171 * Hence, check seq<=rcv_wup reduces to: 5172 */ 5173 if (tcp_header_len == 5174 (sizeof(struct tcphdr) + 5175 TCPOLEN_TSTAMP_ALIGNED) && 5176 tp->rcv_nxt == tp->rcv_wup) 5177 tcp_store_ts_recent(tp); 5178 5179 tcp_rcv_rtt_measure_ts(sk, skb); 5180 5181 __skb_pull(skb, tcp_header_len); 5182 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 5183 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 5184 } 5185 if (copied_early) 5186 tcp_cleanup_rbuf(sk, skb->len); 5187 } 5188 if (!eaten) { 5189 if (tcp_checksum_complete_user(sk, skb)) 5190 goto csum_error; 5191 5192 if ((int)skb->truesize > sk->sk_forward_alloc) 5193 goto step5; 5194 5195 /* Predicted packet is in window by definition. 5196 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5197 * Hence, check seq<=rcv_wup reduces to: 5198 */ 5199 if (tcp_header_len == 5200 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5201 tp->rcv_nxt == tp->rcv_wup) 5202 tcp_store_ts_recent(tp); 5203 5204 tcp_rcv_rtt_measure_ts(sk, skb); 5205 5206 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5207 5208 /* Bulk data transfer: receiver */ 5209 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5210 &fragstolen); 5211 } 5212 5213 tcp_event_data_recv(sk, skb); 5214 5215 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 5216 /* Well, only one small jumplet in fast path... */ 5217 tcp_ack(sk, skb, FLAG_DATA); 5218 tcp_data_snd_check(sk); 5219 if (!inet_csk_ack_scheduled(sk)) 5220 goto no_ack; 5221 } 5222 5223 if (!copied_early || tp->rcv_nxt != tp->rcv_wup) 5224 __tcp_ack_snd_check(sk, 0); 5225 no_ack: 5226 #ifdef CONFIG_NET_DMA 5227 if (copied_early) 5228 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 5229 else 5230 #endif 5231 if (eaten) 5232 kfree_skb_partial(skb, fragstolen); 5233 sk->sk_data_ready(sk, 0); 5234 return 0; 5235 } 5236 } 5237 5238 slow_path: 5239 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5240 goto csum_error; 5241 5242 if (!th->ack && !th->rst) 5243 goto discard; 5244 5245 /* 5246 * Standard slow path. 5247 */ 5248 5249 if (!tcp_validate_incoming(sk, skb, th, 1)) 5250 return 0; 5251 5252 step5: 5253 if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5254 goto discard; 5255 5256 /* ts_recent update must be made after we are sure that the packet 5257 * is in window. 5258 */ 5259 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 5260 5261 tcp_rcv_rtt_measure_ts(sk, skb); 5262 5263 /* Process urgent data. */ 5264 tcp_urg(sk, skb, th); 5265 5266 /* step 7: process the segment text */ 5267 tcp_data_queue(sk, skb); 5268 5269 tcp_data_snd_check(sk); 5270 tcp_ack_snd_check(sk); 5271 return 0; 5272 5273 csum_error: 5274 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5275 5276 discard: 5277 __kfree_skb(skb); 5278 return 0; 5279 } 5280 EXPORT_SYMBOL(tcp_rcv_established); 5281 5282 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) 5283 { 5284 struct tcp_sock *tp = tcp_sk(sk); 5285 struct inet_connection_sock *icsk = inet_csk(sk); 5286 5287 tcp_set_state(sk, TCP_ESTABLISHED); 5288 5289 if (skb != NULL) { 5290 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5291 security_inet_conn_established(sk, skb); 5292 } 5293 5294 /* Make sure socket is routed, for correct metrics. */ 5295 icsk->icsk_af_ops->rebuild_header(sk); 5296 5297 tcp_init_metrics(sk); 5298 5299 tcp_init_congestion_control(sk); 5300 5301 /* Prevent spurious tcp_cwnd_restart() on first data 5302 * packet. 5303 */ 5304 tp->lsndtime = tcp_time_stamp; 5305 5306 tcp_init_buffer_space(sk); 5307 5308 if (sock_flag(sk, SOCK_KEEPOPEN)) 5309 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 5310 5311 if (!tp->rx_opt.snd_wscale) 5312 __tcp_fast_path_on(tp, tp->snd_wnd); 5313 else 5314 tp->pred_flags = 0; 5315 5316 if (!sock_flag(sk, SOCK_DEAD)) { 5317 sk->sk_state_change(sk); 5318 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5319 } 5320 } 5321 5322 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, 5323 struct tcp_fastopen_cookie *cookie) 5324 { 5325 struct tcp_sock *tp = tcp_sk(sk); 5326 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; 5327 u16 mss = tp->rx_opt.mss_clamp; 5328 bool syn_drop; 5329 5330 if (mss == tp->rx_opt.user_mss) { 5331 struct tcp_options_received opt; 5332 5333 /* Get original SYNACK MSS value if user MSS sets mss_clamp */ 5334 tcp_clear_options(&opt); 5335 opt.user_mss = opt.mss_clamp = 0; 5336 tcp_parse_options(synack, &opt, 0, NULL); 5337 mss = opt.mss_clamp; 5338 } 5339 5340 if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */ 5341 cookie->len = -1; 5342 5343 /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably 5344 * the remote receives only the retransmitted (regular) SYNs: either 5345 * the original SYN-data or the corresponding SYN-ACK is lost. 5346 */ 5347 syn_drop = (cookie->len <= 0 && data && tp->total_retrans); 5348 5349 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); 5350 5351 if (data) { /* Retransmit unacked data in SYN */ 5352 tcp_for_write_queue_from(data, sk) { 5353 if (data == tcp_send_head(sk) || 5354 __tcp_retransmit_skb(sk, data)) 5355 break; 5356 } 5357 tcp_rearm_rto(sk); 5358 return true; 5359 } 5360 tp->syn_data_acked = tp->syn_data; 5361 return false; 5362 } 5363 5364 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5365 const struct tcphdr *th, unsigned int len) 5366 { 5367 struct inet_connection_sock *icsk = inet_csk(sk); 5368 struct tcp_sock *tp = tcp_sk(sk); 5369 struct tcp_fastopen_cookie foc = { .len = -1 }; 5370 int saved_clamp = tp->rx_opt.mss_clamp; 5371 5372 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5373 if (tp->rx_opt.saw_tstamp) 5374 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5375 5376 if (th->ack) { 5377 /* rfc793: 5378 * "If the state is SYN-SENT then 5379 * first check the ACK bit 5380 * If the ACK bit is set 5381 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5382 * a reset (unless the RST bit is set, if so drop 5383 * the segment and return)" 5384 */ 5385 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || 5386 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) 5387 goto reset_and_undo; 5388 5389 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5390 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5391 tcp_time_stamp)) { 5392 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); 5393 goto reset_and_undo; 5394 } 5395 5396 /* Now ACK is acceptable. 5397 * 5398 * "If the RST bit is set 5399 * If the ACK was acceptable then signal the user "error: 5400 * connection reset", drop the segment, enter CLOSED state, 5401 * delete TCB, and return." 5402 */ 5403 5404 if (th->rst) { 5405 tcp_reset(sk); 5406 goto discard; 5407 } 5408 5409 /* rfc793: 5410 * "fifth, if neither of the SYN or RST bits is set then 5411 * drop the segment and return." 5412 * 5413 * See note below! 5414 * --ANK(990513) 5415 */ 5416 if (!th->syn) 5417 goto discard_and_undo; 5418 5419 /* rfc793: 5420 * "If the SYN bit is on ... 5421 * are acceptable then ... 5422 * (our SYN has been ACKed), change the connection 5423 * state to ESTABLISHED..." 5424 */ 5425 5426 TCP_ECN_rcv_synack(tp, th); 5427 5428 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5429 tcp_ack(sk, skb, FLAG_SLOWPATH); 5430 5431 /* Ok.. it's good. Set up sequence numbers and 5432 * move to established. 5433 */ 5434 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5435 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5436 5437 /* RFC1323: The window in SYN & SYN/ACK segments is 5438 * never scaled. 5439 */ 5440 tp->snd_wnd = ntohs(th->window); 5441 5442 if (!tp->rx_opt.wscale_ok) { 5443 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5444 tp->window_clamp = min(tp->window_clamp, 65535U); 5445 } 5446 5447 if (tp->rx_opt.saw_tstamp) { 5448 tp->rx_opt.tstamp_ok = 1; 5449 tp->tcp_header_len = 5450 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5451 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5452 tcp_store_ts_recent(tp); 5453 } else { 5454 tp->tcp_header_len = sizeof(struct tcphdr); 5455 } 5456 5457 if (tcp_is_sack(tp) && sysctl_tcp_fack) 5458 tcp_enable_fack(tp); 5459 5460 tcp_mtup_init(sk); 5461 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5462 tcp_initialize_rcv_mss(sk); 5463 5464 /* Remember, tcp_poll() does not lock socket! 5465 * Change state from SYN-SENT only after copied_seq 5466 * is initialized. */ 5467 tp->copied_seq = tp->rcv_nxt; 5468 5469 smp_mb(); 5470 5471 tcp_finish_connect(sk, skb); 5472 5473 if ((tp->syn_fastopen || tp->syn_data) && 5474 tcp_rcv_fastopen_synack(sk, skb, &foc)) 5475 return -1; 5476 5477 if (sk->sk_write_pending || 5478 icsk->icsk_accept_queue.rskq_defer_accept || 5479 icsk->icsk_ack.pingpong) { 5480 /* Save one ACK. Data will be ready after 5481 * several ticks, if write_pending is set. 5482 * 5483 * It may be deleted, but with this feature tcpdumps 5484 * look so _wonderfully_ clever, that I was not able 5485 * to stand against the temptation 8) --ANK 5486 */ 5487 inet_csk_schedule_ack(sk); 5488 icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5489 tcp_enter_quickack_mode(sk); 5490 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5491 TCP_DELACK_MAX, TCP_RTO_MAX); 5492 5493 discard: 5494 __kfree_skb(skb); 5495 return 0; 5496 } else { 5497 tcp_send_ack(sk); 5498 } 5499 return -1; 5500 } 5501 5502 /* No ACK in the segment */ 5503 5504 if (th->rst) { 5505 /* rfc793: 5506 * "If the RST bit is set 5507 * 5508 * Otherwise (no ACK) drop the segment and return." 5509 */ 5510 5511 goto discard_and_undo; 5512 } 5513 5514 /* PAWS check. */ 5515 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5516 tcp_paws_reject(&tp->rx_opt, 0)) 5517 goto discard_and_undo; 5518 5519 if (th->syn) { 5520 /* We see SYN without ACK. It is attempt of 5521 * simultaneous connect with crossed SYNs. 5522 * Particularly, it can be connect to self. 5523 */ 5524 tcp_set_state(sk, TCP_SYN_RECV); 5525 5526 if (tp->rx_opt.saw_tstamp) { 5527 tp->rx_opt.tstamp_ok = 1; 5528 tcp_store_ts_recent(tp); 5529 tp->tcp_header_len = 5530 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5531 } else { 5532 tp->tcp_header_len = sizeof(struct tcphdr); 5533 } 5534 5535 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5536 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5537 5538 /* RFC1323: The window in SYN & SYN/ACK segments is 5539 * never scaled. 5540 */ 5541 tp->snd_wnd = ntohs(th->window); 5542 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5543 tp->max_window = tp->snd_wnd; 5544 5545 TCP_ECN_rcv_syn(tp, th); 5546 5547 tcp_mtup_init(sk); 5548 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5549 tcp_initialize_rcv_mss(sk); 5550 5551 tcp_send_synack(sk); 5552 #if 0 5553 /* Note, we could accept data and URG from this segment. 5554 * There are no obstacles to make this (except that we must 5555 * either change tcp_recvmsg() to prevent it from returning data 5556 * before 3WHS completes per RFC793, or employ TCP Fast Open). 5557 * 5558 * However, if we ignore data in ACKless segments sometimes, 5559 * we have no reasons to accept it sometimes. 5560 * Also, seems the code doing it in step6 of tcp_rcv_state_process 5561 * is not flawless. So, discard packet for sanity. 5562 * Uncomment this return to process the data. 5563 */ 5564 return -1; 5565 #else 5566 goto discard; 5567 #endif 5568 } 5569 /* "fifth, if neither of the SYN or RST bits is set then 5570 * drop the segment and return." 5571 */ 5572 5573 discard_and_undo: 5574 tcp_clear_options(&tp->rx_opt); 5575 tp->rx_opt.mss_clamp = saved_clamp; 5576 goto discard; 5577 5578 reset_and_undo: 5579 tcp_clear_options(&tp->rx_opt); 5580 tp->rx_opt.mss_clamp = saved_clamp; 5581 return 1; 5582 } 5583 5584 /* 5585 * This function implements the receiving procedure of RFC 793 for 5586 * all states except ESTABLISHED and TIME_WAIT. 5587 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 5588 * address independent. 5589 */ 5590 5591 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 5592 const struct tcphdr *th, unsigned int len) 5593 { 5594 struct tcp_sock *tp = tcp_sk(sk); 5595 struct inet_connection_sock *icsk = inet_csk(sk); 5596 struct request_sock *req; 5597 int queued = 0; 5598 5599 tp->rx_opt.saw_tstamp = 0; 5600 5601 switch (sk->sk_state) { 5602 case TCP_CLOSE: 5603 goto discard; 5604 5605 case TCP_LISTEN: 5606 if (th->ack) 5607 return 1; 5608 5609 if (th->rst) 5610 goto discard; 5611 5612 if (th->syn) { 5613 if (th->fin) 5614 goto discard; 5615 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 5616 return 1; 5617 5618 /* Now we have several options: In theory there is 5619 * nothing else in the frame. KA9Q has an option to 5620 * send data with the syn, BSD accepts data with the 5621 * syn up to the [to be] advertised window and 5622 * Solaris 2.1 gives you a protocol error. For now 5623 * we just ignore it, that fits the spec precisely 5624 * and avoids incompatibilities. It would be nice in 5625 * future to drop through and process the data. 5626 * 5627 * Now that TTCP is starting to be used we ought to 5628 * queue this data. 5629 * But, this leaves one open to an easy denial of 5630 * service attack, and SYN cookies can't defend 5631 * against this problem. So, we drop the data 5632 * in the interest of security over speed unless 5633 * it's still in use. 5634 */ 5635 kfree_skb(skb); 5636 return 0; 5637 } 5638 goto discard; 5639 5640 case TCP_SYN_SENT: 5641 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); 5642 if (queued >= 0) 5643 return queued; 5644 5645 /* Do step6 onward by hand. */ 5646 tcp_urg(sk, skb, th); 5647 __kfree_skb(skb); 5648 tcp_data_snd_check(sk); 5649 return 0; 5650 } 5651 5652 req = tp->fastopen_rsk; 5653 if (req != NULL) { 5654 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5655 sk->sk_state != TCP_FIN_WAIT1); 5656 5657 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5658 goto discard; 5659 } 5660 5661 if (!th->ack && !th->rst) 5662 goto discard; 5663 5664 if (!tcp_validate_incoming(sk, skb, th, 0)) 5665 return 0; 5666 5667 /* step 5: check the ACK field */ 5668 if (true) { 5669 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 5670 5671 switch (sk->sk_state) { 5672 case TCP_SYN_RECV: 5673 if (acceptable) { 5674 /* Once we leave TCP_SYN_RECV, we no longer 5675 * need req so release it. 5676 */ 5677 if (req) { 5678 tcp_synack_rtt_meas(sk, req); 5679 tp->total_retrans = req->num_retrans; 5680 5681 reqsk_fastopen_remove(sk, req, false); 5682 } else { 5683 /* Make sure socket is routed, for 5684 * correct metrics. 5685 */ 5686 icsk->icsk_af_ops->rebuild_header(sk); 5687 tcp_init_congestion_control(sk); 5688 5689 tcp_mtup_init(sk); 5690 tcp_init_buffer_space(sk); 5691 tp->copied_seq = tp->rcv_nxt; 5692 } 5693 smp_mb(); 5694 tcp_set_state(sk, TCP_ESTABLISHED); 5695 sk->sk_state_change(sk); 5696 5697 /* Note, that this wakeup is only for marginal 5698 * crossed SYN case. Passively open sockets 5699 * are not waked up, because sk->sk_sleep == 5700 * NULL and sk->sk_socket == NULL. 5701 */ 5702 if (sk->sk_socket) 5703 sk_wake_async(sk, 5704 SOCK_WAKE_IO, POLL_OUT); 5705 5706 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5707 tp->snd_wnd = ntohs(th->window) << 5708 tp->rx_opt.snd_wscale; 5709 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5710 5711 if (tp->rx_opt.tstamp_ok) 5712 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5713 5714 if (req) { 5715 /* Re-arm the timer because data may 5716 * have been sent out. This is similar 5717 * to the regular data transmission case 5718 * when new data has just been ack'ed. 5719 * 5720 * (TFO) - we could try to be more 5721 * aggressive and retranmitting any data 5722 * sooner based on when they were sent 5723 * out. 5724 */ 5725 tcp_rearm_rto(sk); 5726 } else 5727 tcp_init_metrics(sk); 5728 5729 /* Prevent spurious tcp_cwnd_restart() on 5730 * first data packet. 5731 */ 5732 tp->lsndtime = tcp_time_stamp; 5733 5734 tcp_initialize_rcv_mss(sk); 5735 tcp_fast_path_on(tp); 5736 } else { 5737 return 1; 5738 } 5739 break; 5740 5741 case TCP_FIN_WAIT1: 5742 /* If we enter the TCP_FIN_WAIT1 state and we are a 5743 * Fast Open socket and this is the first acceptable 5744 * ACK we have received, this would have acknowledged 5745 * our SYNACK so stop the SYNACK timer. 5746 */ 5747 if (req != NULL) { 5748 /* Return RST if ack_seq is invalid. 5749 * Note that RFC793 only says to generate a 5750 * DUPACK for it but for TCP Fast Open it seems 5751 * better to treat this case like TCP_SYN_RECV 5752 * above. 5753 */ 5754 if (!acceptable) 5755 return 1; 5756 /* We no longer need the request sock. */ 5757 reqsk_fastopen_remove(sk, req, false); 5758 tcp_rearm_rto(sk); 5759 } 5760 if (tp->snd_una == tp->write_seq) { 5761 struct dst_entry *dst; 5762 5763 tcp_set_state(sk, TCP_FIN_WAIT2); 5764 sk->sk_shutdown |= SEND_SHUTDOWN; 5765 5766 dst = __sk_dst_get(sk); 5767 if (dst) 5768 dst_confirm(dst); 5769 5770 if (!sock_flag(sk, SOCK_DEAD)) 5771 /* Wake up lingering close() */ 5772 sk->sk_state_change(sk); 5773 else { 5774 int tmo; 5775 5776 if (tp->linger2 < 0 || 5777 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5778 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5779 tcp_done(sk); 5780 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5781 return 1; 5782 } 5783 5784 tmo = tcp_fin_time(sk); 5785 if (tmo > TCP_TIMEWAIT_LEN) { 5786 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 5787 } else if (th->fin || sock_owned_by_user(sk)) { 5788 /* Bad case. We could lose such FIN otherwise. 5789 * It is not a big problem, but it looks confusing 5790 * and not so rare event. We still can lose it now, 5791 * if it spins in bh_lock_sock(), but it is really 5792 * marginal case. 5793 */ 5794 inet_csk_reset_keepalive_timer(sk, tmo); 5795 } else { 5796 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 5797 goto discard; 5798 } 5799 } 5800 } 5801 break; 5802 5803 case TCP_CLOSING: 5804 if (tp->snd_una == tp->write_seq) { 5805 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 5806 goto discard; 5807 } 5808 break; 5809 5810 case TCP_LAST_ACK: 5811 if (tp->snd_una == tp->write_seq) { 5812 tcp_update_metrics(sk); 5813 tcp_done(sk); 5814 goto discard; 5815 } 5816 break; 5817 } 5818 } 5819 5820 /* ts_recent update must be made after we are sure that the packet 5821 * is in window. 5822 */ 5823 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 5824 5825 /* step 6: check the URG bit */ 5826 tcp_urg(sk, skb, th); 5827 5828 /* step 7: process the segment text */ 5829 switch (sk->sk_state) { 5830 case TCP_CLOSE_WAIT: 5831 case TCP_CLOSING: 5832 case TCP_LAST_ACK: 5833 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 5834 break; 5835 case TCP_FIN_WAIT1: 5836 case TCP_FIN_WAIT2: 5837 /* RFC 793 says to queue data in these states, 5838 * RFC 1122 says we MUST send a reset. 5839 * BSD 4.4 also does reset. 5840 */ 5841 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5842 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5843 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5844 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5845 tcp_reset(sk); 5846 return 1; 5847 } 5848 } 5849 /* Fall through */ 5850 case TCP_ESTABLISHED: 5851 tcp_data_queue(sk, skb); 5852 queued = 1; 5853 break; 5854 } 5855 5856 /* tcp_data could move socket to TIME-WAIT */ 5857 if (sk->sk_state != TCP_CLOSE) { 5858 tcp_data_snd_check(sk); 5859 tcp_ack_snd_check(sk); 5860 } 5861 5862 if (!queued) { 5863 discard: 5864 __kfree_skb(skb); 5865 } 5866 return 0; 5867 } 5868 EXPORT_SYMBOL(tcp_rcv_state_process); 5869