1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 */ 22 23 /* 24 * Changes: Pedro Roque : Retransmit queue handled by TCP. 25 * : Fragmentation on mtu decrease 26 * : Segment collapse on retransmit 27 * : AF independence 28 * 29 * Linus Torvalds : send_delayed_ack 30 * David S. Miller : Charge memory using the right skb 31 * during syn/ack processing. 32 * David S. Miller : Output engine completely rewritten. 33 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 34 * Cacophonix Gaul : draft-minshall-nagle-01 35 * J Hadi Salim : ECN support 36 * 37 */ 38 39 #include <net/tcp.h> 40 41 #include <linux/compiler.h> 42 #include <linux/module.h> 43 #include <linux/smp_lock.h> 44 45 /* People can turn this off for buggy TCP's found in printers etc. */ 46 int sysctl_tcp_retrans_collapse = 1; 47 48 /* People can turn this on to work with those rare, broken TCPs that 49 * interpret the window field as a signed quantity. 50 */ 51 int sysctl_tcp_workaround_signed_windows = 0; 52 53 /* This limits the percentage of the congestion window which we 54 * will allow a single TSO frame to consume. Building TSO frames 55 * which are too large can cause TCP streams to be bursty. 56 */ 57 int sysctl_tcp_tso_win_divisor = 3; 58 59 int sysctl_tcp_mtu_probing = 0; 60 int sysctl_tcp_base_mss = 512; 61 62 /* By default, RFC2861 behavior. */ 63 int sysctl_tcp_slow_start_after_idle = 1; 64 65 static void update_send_head(struct sock *sk, struct tcp_sock *tp, 66 struct sk_buff *skb) 67 { 68 sk->sk_send_head = skb->next; 69 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) 70 sk->sk_send_head = NULL; 71 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 72 tcp_packets_out_inc(sk, tp, skb); 73 } 74 75 /* SND.NXT, if window was not shrunk. 76 * If window has been shrunk, what should we make? It is not clear at all. 77 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 78 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 79 * invalid. OK, let's make this for now: 80 */ 81 static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) 82 { 83 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 84 return tp->snd_nxt; 85 else 86 return tp->snd_una+tp->snd_wnd; 87 } 88 89 /* Calculate mss to advertise in SYN segment. 90 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 91 * 92 * 1. It is independent of path mtu. 93 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 94 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 95 * attached devices, because some buggy hosts are confused by 96 * large MSS. 97 * 4. We do not make 3, we advertise MSS, calculated from first 98 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 99 * This may be overridden via information stored in routing table. 100 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 101 * probably even Jumbo". 102 */ 103 static __u16 tcp_advertise_mss(struct sock *sk) 104 { 105 struct tcp_sock *tp = tcp_sk(sk); 106 struct dst_entry *dst = __sk_dst_get(sk); 107 int mss = tp->advmss; 108 109 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 110 mss = dst_metric(dst, RTAX_ADVMSS); 111 tp->advmss = mss; 112 } 113 114 return (__u16)mss; 115 } 116 117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 118 * This is the first part of cwnd validation mechanism. */ 119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 120 { 121 struct tcp_sock *tp = tcp_sk(sk); 122 s32 delta = tcp_time_stamp - tp->lsndtime; 123 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 124 u32 cwnd = tp->snd_cwnd; 125 126 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 127 128 tp->snd_ssthresh = tcp_current_ssthresh(sk); 129 restart_cwnd = min(restart_cwnd, cwnd); 130 131 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 132 cwnd >>= 1; 133 tp->snd_cwnd = max(cwnd, restart_cwnd); 134 tp->snd_cwnd_stamp = tcp_time_stamp; 135 tp->snd_cwnd_used = 0; 136 } 137 138 static void tcp_event_data_sent(struct tcp_sock *tp, 139 struct sk_buff *skb, struct sock *sk) 140 { 141 struct inet_connection_sock *icsk = inet_csk(sk); 142 const u32 now = tcp_time_stamp; 143 144 if (sysctl_tcp_slow_start_after_idle && 145 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 146 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 147 148 tp->lsndtime = now; 149 150 /* If it is a reply for ato after last received 151 * packet, enter pingpong mode. 152 */ 153 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 154 icsk->icsk_ack.pingpong = 1; 155 } 156 157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 158 { 159 tcp_dec_quickack_mode(sk, pkts); 160 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 161 } 162 163 /* Determine a window scaling and initial window to offer. 164 * Based on the assumption that the given amount of space 165 * will be offered. Store the results in the tp structure. 166 * NOTE: for smooth operation initial space offering should 167 * be a multiple of mss if possible. We assume here that mss >= 1. 168 * This MUST be enforced by all callers. 169 */ 170 void tcp_select_initial_window(int __space, __u32 mss, 171 __u32 *rcv_wnd, __u32 *window_clamp, 172 int wscale_ok, __u8 *rcv_wscale) 173 { 174 unsigned int space = (__space < 0 ? 0 : __space); 175 176 /* If no clamp set the clamp to the max possible scaled window */ 177 if (*window_clamp == 0) 178 (*window_clamp) = (65535 << 14); 179 space = min(*window_clamp, space); 180 181 /* Quantize space offering to a multiple of mss if possible. */ 182 if (space > mss) 183 space = (space / mss) * mss; 184 185 /* NOTE: offering an initial window larger than 32767 186 * will break some buggy TCP stacks. If the admin tells us 187 * it is likely we could be speaking with such a buggy stack 188 * we will truncate our initial window offering to 32K-1 189 * unless the remote has sent us a window scaling option, 190 * which we interpret as a sign the remote TCP is not 191 * misinterpreting the window field as a signed quantity. 192 */ 193 if (sysctl_tcp_workaround_signed_windows) 194 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 195 else 196 (*rcv_wnd) = space; 197 198 (*rcv_wscale) = 0; 199 if (wscale_ok) { 200 /* Set window scaling on max possible window 201 * See RFC1323 for an explanation of the limit to 14 202 */ 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 204 space = min_t(u32, space, *window_clamp); 205 while (space > 65535 && (*rcv_wscale) < 14) { 206 space >>= 1; 207 (*rcv_wscale)++; 208 } 209 } 210 211 /* Set initial window to value enough for senders, 212 * following RFC2414. Senders, not following this RFC, 213 * will be satisfied with 2. 214 */ 215 if (mss > (1<<*rcv_wscale)) { 216 int init_cwnd = 4; 217 if (mss > 1460*3) 218 init_cwnd = 2; 219 else if (mss > 1460) 220 init_cwnd = 3; 221 if (*rcv_wnd > init_cwnd*mss) 222 *rcv_wnd = init_cwnd*mss; 223 } 224 225 /* Set the clamp no higher than max representable value */ 226 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 227 } 228 229 /* Chose a new window to advertise, update state in tcp_sock for the 230 * socket, and return result with RFC1323 scaling applied. The return 231 * value can be stuffed directly into th->window for an outgoing 232 * frame. 233 */ 234 static u16 tcp_select_window(struct sock *sk) 235 { 236 struct tcp_sock *tp = tcp_sk(sk); 237 u32 cur_win = tcp_receive_window(tp); 238 u32 new_win = __tcp_select_window(sk); 239 240 /* Never shrink the offered window */ 241 if(new_win < cur_win) { 242 /* Danger Will Robinson! 243 * Don't update rcv_wup/rcv_wnd here or else 244 * we will not be able to advertise a zero 245 * window in time. --DaveM 246 * 247 * Relax Will Robinson. 248 */ 249 new_win = cur_win; 250 } 251 tp->rcv_wnd = new_win; 252 tp->rcv_wup = tp->rcv_nxt; 253 254 /* Make sure we do not exceed the maximum possible 255 * scaled window. 256 */ 257 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 258 new_win = min(new_win, MAX_TCP_WINDOW); 259 else 260 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 261 262 /* RFC1323 scaling applied */ 263 new_win >>= tp->rx_opt.rcv_wscale; 264 265 /* If we advertise zero window, disable fast path. */ 266 if (new_win == 0) 267 tp->pred_flags = 0; 268 269 return new_win; 270 } 271 272 static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, 273 __u32 tstamp) 274 { 275 if (tp->rx_opt.tstamp_ok) { 276 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | 277 (TCPOPT_NOP << 16) | 278 (TCPOPT_TIMESTAMP << 8) | 279 TCPOLEN_TIMESTAMP); 280 *ptr++ = htonl(tstamp); 281 *ptr++ = htonl(tp->rx_opt.ts_recent); 282 } 283 if (tp->rx_opt.eff_sacks) { 284 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; 285 int this_sack; 286 287 *ptr++ = htonl((TCPOPT_NOP << 24) | 288 (TCPOPT_NOP << 16) | 289 (TCPOPT_SACK << 8) | 290 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * 291 TCPOLEN_SACK_PERBLOCK))); 292 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { 293 *ptr++ = htonl(sp[this_sack].start_seq); 294 *ptr++ = htonl(sp[this_sack].end_seq); 295 } 296 if (tp->rx_opt.dsack) { 297 tp->rx_opt.dsack = 0; 298 tp->rx_opt.eff_sacks--; 299 } 300 } 301 } 302 303 /* Construct a tcp options header for a SYN or SYN_ACK packet. 304 * If this is every changed make sure to change the definition of 305 * MAX_SYN_SIZE to match the new maximum number of options that you 306 * can generate. 307 */ 308 static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, 309 int offer_wscale, int wscale, __u32 tstamp, 310 __u32 ts_recent) 311 { 312 /* We always get an MSS option. 313 * The option bytes which will be seen in normal data 314 * packets should timestamps be used, must be in the MSS 315 * advertised. But we subtract them from tp->mss_cache so 316 * that calculations in tcp_sendmsg are simpler etc. 317 * So account for this fact here if necessary. If we 318 * don't do this correctly, as a receiver we won't 319 * recognize data packets as being full sized when we 320 * should, and thus we won't abide by the delayed ACK 321 * rules correctly. 322 * SACKs don't matter, we never delay an ACK when we 323 * have any of those going out. 324 */ 325 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 326 if (ts) { 327 if(sack) 328 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | 329 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 330 else 331 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 332 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 333 *ptr++ = htonl(tstamp); /* TSVAL */ 334 *ptr++ = htonl(ts_recent); /* TSECR */ 335 } else if(sack) 336 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 337 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); 338 if (offer_wscale) 339 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); 340 } 341 342 /* This routine actually transmits TCP packets queued in by 343 * tcp_do_sendmsg(). This is used by both the initial 344 * transmission and possible later retransmissions. 345 * All SKB's seen here are completely headerless. It is our 346 * job to build the TCP header, and pass the packet down to 347 * IP so it can do the same plus pass the packet off to the 348 * device. 349 * 350 * We are working here with either a clone of the original 351 * SKB, or a fresh unique copy made by the retransmit engine. 352 */ 353 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) 354 { 355 const struct inet_connection_sock *icsk = inet_csk(sk); 356 struct inet_sock *inet; 357 struct tcp_sock *tp; 358 struct tcp_skb_cb *tcb; 359 int tcp_header_size; 360 struct tcphdr *th; 361 int sysctl_flags; 362 int err; 363 364 BUG_ON(!skb || !tcp_skb_pcount(skb)); 365 366 /* If congestion control is doing timestamping, we must 367 * take such a timestamp before we potentially clone/copy. 368 */ 369 if (icsk->icsk_ca_ops->rtt_sample) 370 __net_timestamp(skb); 371 372 if (likely(clone_it)) { 373 if (unlikely(skb_cloned(skb))) 374 skb = pskb_copy(skb, gfp_mask); 375 else 376 skb = skb_clone(skb, gfp_mask); 377 if (unlikely(!skb)) 378 return -ENOBUFS; 379 } 380 381 inet = inet_sk(sk); 382 tp = tcp_sk(sk); 383 tcb = TCP_SKB_CB(skb); 384 tcp_header_size = tp->tcp_header_len; 385 386 #define SYSCTL_FLAG_TSTAMPS 0x1 387 #define SYSCTL_FLAG_WSCALE 0x2 388 #define SYSCTL_FLAG_SACK 0x4 389 390 sysctl_flags = 0; 391 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 392 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 393 if(sysctl_tcp_timestamps) { 394 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 395 sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 396 } 397 if (sysctl_tcp_window_scaling) { 398 tcp_header_size += TCPOLEN_WSCALE_ALIGNED; 399 sysctl_flags |= SYSCTL_FLAG_WSCALE; 400 } 401 if (sysctl_tcp_sack) { 402 sysctl_flags |= SYSCTL_FLAG_SACK; 403 if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) 404 tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; 405 } 406 } else if (unlikely(tp->rx_opt.eff_sacks)) { 407 /* A SACK is 2 pad bytes, a 2 byte header, plus 408 * 2 32-bit sequence numbers for each SACK block. 409 */ 410 tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + 411 (tp->rx_opt.eff_sacks * 412 TCPOLEN_SACK_PERBLOCK)); 413 } 414 415 if (tcp_packets_in_flight(tp) == 0) 416 tcp_ca_event(sk, CA_EVENT_TX_START); 417 418 th = (struct tcphdr *) skb_push(skb, tcp_header_size); 419 skb->h.th = th; 420 skb_set_owner_w(skb, sk); 421 422 /* Build TCP header and checksum it. */ 423 th->source = inet->sport; 424 th->dest = inet->dport; 425 th->seq = htonl(tcb->seq); 426 th->ack_seq = htonl(tp->rcv_nxt); 427 *(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 428 tcb->flags); 429 430 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 431 /* RFC1323: The window in SYN & SYN/ACK segments 432 * is never scaled. 433 */ 434 th->window = htons(tp->rcv_wnd); 435 } else { 436 th->window = htons(tcp_select_window(sk)); 437 } 438 th->check = 0; 439 th->urg_ptr = 0; 440 441 if (unlikely(tp->urg_mode && 442 between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) { 443 th->urg_ptr = htons(tp->snd_up-tcb->seq); 444 th->urg = 1; 445 } 446 447 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 448 tcp_syn_build_options((__u32 *)(th + 1), 449 tcp_advertise_mss(sk), 450 (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 451 (sysctl_flags & SYSCTL_FLAG_SACK), 452 (sysctl_flags & SYSCTL_FLAG_WSCALE), 453 tp->rx_opt.rcv_wscale, 454 tcb->when, 455 tp->rx_opt.ts_recent); 456 } else { 457 tcp_build_and_update_options((__u32 *)(th + 1), 458 tp, tcb->when); 459 TCP_ECN_send(sk, tp, skb, tcp_header_size); 460 } 461 462 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 463 464 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 465 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 466 467 if (skb->len != tcp_header_size) 468 tcp_event_data_sent(tp, skb, sk); 469 470 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 471 TCP_INC_STATS(TCP_MIB_OUTSEGS); 472 473 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 474 if (likely(err <= 0)) 475 return err; 476 477 tcp_enter_cwr(sk); 478 479 /* NET_XMIT_CN is special. It does not guarantee, 480 * that this packet is lost. It tells that device 481 * is about to start to drop packets or already 482 * drops some packets of the same priority and 483 * invokes us to send less aggressively. 484 */ 485 return err == NET_XMIT_CN ? 0 : err; 486 487 #undef SYSCTL_FLAG_TSTAMPS 488 #undef SYSCTL_FLAG_WSCALE 489 #undef SYSCTL_FLAG_SACK 490 } 491 492 493 /* This routine just queue's the buffer 494 * 495 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 496 * otherwise socket can stall. 497 */ 498 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 499 { 500 struct tcp_sock *tp = tcp_sk(sk); 501 502 /* Advance write_seq and place onto the write_queue. */ 503 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 504 skb_header_release(skb); 505 __skb_queue_tail(&sk->sk_write_queue, skb); 506 sk_charge_skb(sk, skb); 507 508 /* Queue it, remembering where we must start sending. */ 509 if (sk->sk_send_head == NULL) 510 sk->sk_send_head = skb; 511 } 512 513 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 514 { 515 if (skb->len <= mss_now || !sk_can_gso(sk)) { 516 /* Avoid the costly divide in the normal 517 * non-TSO case. 518 */ 519 skb_shinfo(skb)->gso_segs = 1; 520 skb_shinfo(skb)->gso_size = 0; 521 skb_shinfo(skb)->gso_type = 0; 522 } else { 523 unsigned int factor; 524 525 factor = skb->len + (mss_now - 1); 526 factor /= mss_now; 527 skb_shinfo(skb)->gso_segs = factor; 528 skb_shinfo(skb)->gso_size = mss_now; 529 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 530 } 531 } 532 533 /* Function to create two new TCP segments. Shrinks the given segment 534 * to the specified size and appends a new segment with the rest of the 535 * packet to the list. This won't be called frequently, I hope. 536 * Remember, these are still headerless SKBs at this point. 537 */ 538 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 539 { 540 struct tcp_sock *tp = tcp_sk(sk); 541 struct sk_buff *buff; 542 int nsize, old_factor; 543 int nlen; 544 u16 flags; 545 546 BUG_ON(len > skb->len); 547 548 clear_all_retrans_hints(tp); 549 nsize = skb_headlen(skb) - len; 550 if (nsize < 0) 551 nsize = 0; 552 553 if (skb_cloned(skb) && 554 skb_is_nonlinear(skb) && 555 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 556 return -ENOMEM; 557 558 /* Get a new skb... force flag on. */ 559 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 560 if (buff == NULL) 561 return -ENOMEM; /* We'll just try again later. */ 562 563 sk_charge_skb(sk, buff); 564 nlen = skb->len - len - nsize; 565 buff->truesize += nlen; 566 skb->truesize -= nlen; 567 568 /* Correct the sequence numbers. */ 569 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 570 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 571 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 572 573 /* PSH and FIN should only be set in the second packet. */ 574 flags = TCP_SKB_CB(skb)->flags; 575 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 576 TCP_SKB_CB(buff)->flags = flags; 577 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 578 TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; 579 580 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) { 581 /* Copy and checksum data tail into the new buffer. */ 582 buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), 583 nsize, 0); 584 585 skb_trim(skb, len); 586 587 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 588 } else { 589 skb->ip_summed = CHECKSUM_HW; 590 skb_split(skb, buff, len); 591 } 592 593 buff->ip_summed = skb->ip_summed; 594 595 /* Looks stupid, but our code really uses when of 596 * skbs, which it never sent before. --ANK 597 */ 598 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 599 buff->tstamp = skb->tstamp; 600 601 old_factor = tcp_skb_pcount(skb); 602 603 /* Fix up tso_factor for both original and new SKB. */ 604 tcp_set_skb_tso_segs(sk, skb, mss_now); 605 tcp_set_skb_tso_segs(sk, buff, mss_now); 606 607 /* If this packet has been sent out already, we must 608 * adjust the various packet counters. 609 */ 610 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 611 int diff = old_factor - tcp_skb_pcount(skb) - 612 tcp_skb_pcount(buff); 613 614 tp->packets_out -= diff; 615 616 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 617 tp->sacked_out -= diff; 618 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 619 tp->retrans_out -= diff; 620 621 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 622 tp->lost_out -= diff; 623 tp->left_out -= diff; 624 } 625 626 if (diff > 0) { 627 /* Adjust Reno SACK estimate. */ 628 if (!tp->rx_opt.sack_ok) { 629 tp->sacked_out -= diff; 630 if ((int)tp->sacked_out < 0) 631 tp->sacked_out = 0; 632 tcp_sync_left_out(tp); 633 } 634 635 tp->fackets_out -= diff; 636 if ((int)tp->fackets_out < 0) 637 tp->fackets_out = 0; 638 } 639 } 640 641 /* Link BUFF into the send queue. */ 642 skb_header_release(buff); 643 __skb_append(skb, buff, &sk->sk_write_queue); 644 645 return 0; 646 } 647 648 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 649 * eventually). The difference is that pulled data not copied, but 650 * immediately discarded. 651 */ 652 static void __pskb_trim_head(struct sk_buff *skb, int len) 653 { 654 int i, k, eat; 655 656 eat = len; 657 k = 0; 658 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { 659 if (skb_shinfo(skb)->frags[i].size <= eat) { 660 put_page(skb_shinfo(skb)->frags[i].page); 661 eat -= skb_shinfo(skb)->frags[i].size; 662 } else { 663 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 664 if (eat) { 665 skb_shinfo(skb)->frags[k].page_offset += eat; 666 skb_shinfo(skb)->frags[k].size -= eat; 667 eat = 0; 668 } 669 k++; 670 } 671 } 672 skb_shinfo(skb)->nr_frags = k; 673 674 skb->tail = skb->data; 675 skb->data_len -= len; 676 skb->len = skb->data_len; 677 } 678 679 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 680 { 681 if (skb_cloned(skb) && 682 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 683 return -ENOMEM; 684 685 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 686 if (unlikely(len < skb_headlen(skb))) 687 __skb_pull(skb, len); 688 else 689 __pskb_trim_head(skb, len - skb_headlen(skb)); 690 691 TCP_SKB_CB(skb)->seq += len; 692 skb->ip_summed = CHECKSUM_HW; 693 694 skb->truesize -= len; 695 sk->sk_wmem_queued -= len; 696 sk->sk_forward_alloc += len; 697 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 698 699 /* Any change of skb->len requires recalculation of tso 700 * factor and mss. 701 */ 702 if (tcp_skb_pcount(skb) > 1) 703 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 704 705 return 0; 706 } 707 708 /* Not accounting for SACKs here. */ 709 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 710 { 711 struct tcp_sock *tp = tcp_sk(sk); 712 struct inet_connection_sock *icsk = inet_csk(sk); 713 int mss_now; 714 715 /* Calculate base mss without TCP options: 716 It is MMS_S - sizeof(tcphdr) of rfc1122 717 */ 718 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 719 720 /* Clamp it (mss_clamp does not include tcp options) */ 721 if (mss_now > tp->rx_opt.mss_clamp) 722 mss_now = tp->rx_opt.mss_clamp; 723 724 /* Now subtract optional transport overhead */ 725 mss_now -= icsk->icsk_ext_hdr_len; 726 727 /* Then reserve room for full set of TCP options and 8 bytes of data */ 728 if (mss_now < 48) 729 mss_now = 48; 730 731 /* Now subtract TCP options size, not including SACKs */ 732 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 733 734 return mss_now; 735 } 736 737 /* Inverse of above */ 738 int tcp_mss_to_mtu(struct sock *sk, int mss) 739 { 740 struct tcp_sock *tp = tcp_sk(sk); 741 struct inet_connection_sock *icsk = inet_csk(sk); 742 int mtu; 743 744 mtu = mss + 745 tp->tcp_header_len + 746 icsk->icsk_ext_hdr_len + 747 icsk->icsk_af_ops->net_header_len; 748 749 return mtu; 750 } 751 752 void tcp_mtup_init(struct sock *sk) 753 { 754 struct tcp_sock *tp = tcp_sk(sk); 755 struct inet_connection_sock *icsk = inet_csk(sk); 756 757 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 758 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 759 icsk->icsk_af_ops->net_header_len; 760 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 761 icsk->icsk_mtup.probe_size = 0; 762 } 763 764 /* This function synchronize snd mss to current pmtu/exthdr set. 765 766 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 767 for TCP options, but includes only bare TCP header. 768 769 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 770 It is minimum of user_mss and mss received with SYN. 771 It also does not include TCP options. 772 773 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 774 775 tp->mss_cache is current effective sending mss, including 776 all tcp options except for SACKs. It is evaluated, 777 taking into account current pmtu, but never exceeds 778 tp->rx_opt.mss_clamp. 779 780 NOTE1. rfc1122 clearly states that advertised MSS 781 DOES NOT include either tcp or ip options. 782 783 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 784 are READ ONLY outside this function. --ANK (980731) 785 */ 786 787 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 788 { 789 struct tcp_sock *tp = tcp_sk(sk); 790 struct inet_connection_sock *icsk = inet_csk(sk); 791 int mss_now; 792 793 if (icsk->icsk_mtup.search_high > pmtu) 794 icsk->icsk_mtup.search_high = pmtu; 795 796 mss_now = tcp_mtu_to_mss(sk, pmtu); 797 798 /* Bound mss with half of window */ 799 if (tp->max_window && mss_now > (tp->max_window>>1)) 800 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); 801 802 /* And store cached results */ 803 icsk->icsk_pmtu_cookie = pmtu; 804 if (icsk->icsk_mtup.enabled) 805 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 806 tp->mss_cache = mss_now; 807 808 return mss_now; 809 } 810 811 /* Compute the current effective MSS, taking SACKs and IP options, 812 * and even PMTU discovery events into account. 813 * 814 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up 815 * cannot be large. However, taking into account rare use of URG, this 816 * is not a big flaw. 817 */ 818 unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 819 { 820 struct tcp_sock *tp = tcp_sk(sk); 821 struct dst_entry *dst = __sk_dst_get(sk); 822 u32 mss_now; 823 u16 xmit_size_goal; 824 int doing_tso = 0; 825 826 mss_now = tp->mss_cache; 827 828 if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) 829 doing_tso = 1; 830 831 if (dst) { 832 u32 mtu = dst_mtu(dst); 833 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 834 mss_now = tcp_sync_mss(sk, mtu); 835 } 836 837 if (tp->rx_opt.eff_sacks) 838 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 839 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 840 841 xmit_size_goal = mss_now; 842 843 if (doing_tso) { 844 xmit_size_goal = (65535 - 845 inet_csk(sk)->icsk_af_ops->net_header_len - 846 inet_csk(sk)->icsk_ext_hdr_len - 847 tp->tcp_header_len); 848 849 if (tp->max_window && 850 (xmit_size_goal > (tp->max_window >> 1))) 851 xmit_size_goal = max((tp->max_window >> 1), 852 68U - tp->tcp_header_len); 853 854 xmit_size_goal -= (xmit_size_goal % mss_now); 855 } 856 tp->xmit_size_goal = xmit_size_goal; 857 858 return mss_now; 859 } 860 861 /* Congestion window validation. (RFC2861) */ 862 863 static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) 864 { 865 __u32 packets_out = tp->packets_out; 866 867 if (packets_out >= tp->snd_cwnd) { 868 /* Network is feed fully. */ 869 tp->snd_cwnd_used = 0; 870 tp->snd_cwnd_stamp = tcp_time_stamp; 871 } else { 872 /* Network starves. */ 873 if (tp->packets_out > tp->snd_cwnd_used) 874 tp->snd_cwnd_used = tp->packets_out; 875 876 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 877 tcp_cwnd_application_limited(sk); 878 } 879 } 880 881 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 882 { 883 u32 window, cwnd_len; 884 885 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 886 cwnd_len = mss_now * cwnd; 887 return min(window, cwnd_len); 888 } 889 890 /* Can at least one segment of SKB be sent right now, according to the 891 * congestion window rules? If so, return how many segments are allowed. 892 */ 893 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 894 { 895 u32 in_flight, cwnd; 896 897 /* Don't be strict about the congestion window for the final FIN. */ 898 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 899 return 1; 900 901 in_flight = tcp_packets_in_flight(tp); 902 cwnd = tp->snd_cwnd; 903 if (in_flight < cwnd) 904 return (cwnd - in_flight); 905 906 return 0; 907 } 908 909 /* This must be invoked the first time we consider transmitting 910 * SKB onto the wire. 911 */ 912 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 913 { 914 int tso_segs = tcp_skb_pcount(skb); 915 916 if (!tso_segs || 917 (tso_segs > 1 && 918 tcp_skb_mss(skb) != mss_now)) { 919 tcp_set_skb_tso_segs(sk, skb, mss_now); 920 tso_segs = tcp_skb_pcount(skb); 921 } 922 return tso_segs; 923 } 924 925 static inline int tcp_minshall_check(const struct tcp_sock *tp) 926 { 927 return after(tp->snd_sml,tp->snd_una) && 928 !after(tp->snd_sml, tp->snd_nxt); 929 } 930 931 /* Return 0, if packet can be sent now without violation Nagle's rules: 932 * 1. It is full sized. 933 * 2. Or it contains FIN. (already checked by caller) 934 * 3. Or TCP_NODELAY was set. 935 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 936 * With Minshall's modification: all sent small packets are ACKed. 937 */ 938 939 static inline int tcp_nagle_check(const struct tcp_sock *tp, 940 const struct sk_buff *skb, 941 unsigned mss_now, int nonagle) 942 { 943 return (skb->len < mss_now && 944 ((nonagle&TCP_NAGLE_CORK) || 945 (!nonagle && 946 tp->packets_out && 947 tcp_minshall_check(tp)))); 948 } 949 950 /* Return non-zero if the Nagle test allows this packet to be 951 * sent now. 952 */ 953 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 954 unsigned int cur_mss, int nonagle) 955 { 956 /* Nagle rule does not apply to frames, which sit in the middle of the 957 * write_queue (they have no chances to get new data). 958 * 959 * This is implemented in the callers, where they modify the 'nonagle' 960 * argument based upon the location of SKB in the send queue. 961 */ 962 if (nonagle & TCP_NAGLE_PUSH) 963 return 1; 964 965 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 966 if (tp->urg_mode || 967 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 968 return 1; 969 970 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 971 return 1; 972 973 return 0; 974 } 975 976 /* Does at least the first segment of SKB fit into the send window? */ 977 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 978 { 979 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 980 981 if (skb->len > cur_mss) 982 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 983 984 return !after(end_seq, tp->snd_una + tp->snd_wnd); 985 } 986 987 /* This checks if the data bearing packet SKB (usually sk->sk_send_head) 988 * should be put on the wire right now. If so, it returns the number of 989 * packets allowed by the congestion window. 990 */ 991 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 992 unsigned int cur_mss, int nonagle) 993 { 994 struct tcp_sock *tp = tcp_sk(sk); 995 unsigned int cwnd_quota; 996 997 tcp_init_tso_segs(sk, skb, cur_mss); 998 999 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1000 return 0; 1001 1002 cwnd_quota = tcp_cwnd_test(tp, skb); 1003 if (cwnd_quota && 1004 !tcp_snd_wnd_test(tp, skb, cur_mss)) 1005 cwnd_quota = 0; 1006 1007 return cwnd_quota; 1008 } 1009 1010 static inline int tcp_skb_is_last(const struct sock *sk, 1011 const struct sk_buff *skb) 1012 { 1013 return skb->next == (struct sk_buff *)&sk->sk_write_queue; 1014 } 1015 1016 int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) 1017 { 1018 struct sk_buff *skb = sk->sk_send_head; 1019 1020 return (skb && 1021 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1022 (tcp_skb_is_last(sk, skb) ? 1023 TCP_NAGLE_PUSH : 1024 tp->nonagle))); 1025 } 1026 1027 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1028 * which is put after SKB on the list. It is very much like 1029 * tcp_fragment() except that it may make several kinds of assumptions 1030 * in order to speed up the splitting operation. In particular, we 1031 * know that all the data is in scatter-gather pages, and that the 1032 * packet has never been sent out before (and thus is not cloned). 1033 */ 1034 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) 1035 { 1036 struct sk_buff *buff; 1037 int nlen = skb->len - len; 1038 u16 flags; 1039 1040 /* All of a TSO frame must be composed of paged data. */ 1041 if (skb->len != skb->data_len) 1042 return tcp_fragment(sk, skb, len, mss_now); 1043 1044 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 1045 if (unlikely(buff == NULL)) 1046 return -ENOMEM; 1047 1048 sk_charge_skb(sk, buff); 1049 buff->truesize += nlen; 1050 skb->truesize -= nlen; 1051 1052 /* Correct the sequence numbers. */ 1053 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1054 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1055 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1056 1057 /* PSH and FIN should only be set in the second packet. */ 1058 flags = TCP_SKB_CB(skb)->flags; 1059 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1060 TCP_SKB_CB(buff)->flags = flags; 1061 1062 /* This packet was never sent out yet, so no SACK bits. */ 1063 TCP_SKB_CB(buff)->sacked = 0; 1064 1065 buff->ip_summed = skb->ip_summed = CHECKSUM_HW; 1066 skb_split(skb, buff, len); 1067 1068 /* Fix up tso_factor for both original and new SKB. */ 1069 tcp_set_skb_tso_segs(sk, skb, mss_now); 1070 tcp_set_skb_tso_segs(sk, buff, mss_now); 1071 1072 /* Link BUFF into the send queue. */ 1073 skb_header_release(buff); 1074 __skb_append(skb, buff, &sk->sk_write_queue); 1075 1076 return 0; 1077 } 1078 1079 /* Try to defer sending, if possible, in order to minimize the amount 1080 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1081 * 1082 * This algorithm is from John Heffner. 1083 */ 1084 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 1085 { 1086 const struct inet_connection_sock *icsk = inet_csk(sk); 1087 u32 send_win, cong_win, limit, in_flight; 1088 1089 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1090 return 0; 1091 1092 if (icsk->icsk_ca_state != TCP_CA_Open) 1093 return 0; 1094 1095 in_flight = tcp_packets_in_flight(tp); 1096 1097 BUG_ON(tcp_skb_pcount(skb) <= 1 || 1098 (tp->snd_cwnd <= in_flight)); 1099 1100 send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; 1101 1102 /* From in_flight test above, we know that cwnd > in_flight. */ 1103 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1104 1105 limit = min(send_win, cong_win); 1106 1107 /* If a full-sized TSO skb can be sent, do it. */ 1108 if (limit >= 65536) 1109 return 0; 1110 1111 if (sysctl_tcp_tso_win_divisor) { 1112 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1113 1114 /* If at least some fraction of a window is available, 1115 * just use it. 1116 */ 1117 chunk /= sysctl_tcp_tso_win_divisor; 1118 if (limit >= chunk) 1119 return 0; 1120 } else { 1121 /* Different approach, try not to defer past a single 1122 * ACK. Receiver should ACK every other full sized 1123 * frame, so if we have space for more than 3 frames 1124 * then send now. 1125 */ 1126 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1127 return 0; 1128 } 1129 1130 /* Ok, it looks like it is advisable to defer. */ 1131 return 1; 1132 } 1133 1134 /* Create a new MTU probe if we are ready. 1135 * Returns 0 if we should wait to probe (no cwnd available), 1136 * 1 if a probe was sent, 1137 * -1 otherwise */ 1138 static int tcp_mtu_probe(struct sock *sk) 1139 { 1140 struct tcp_sock *tp = tcp_sk(sk); 1141 struct inet_connection_sock *icsk = inet_csk(sk); 1142 struct sk_buff *skb, *nskb, *next; 1143 int len; 1144 int probe_size; 1145 unsigned int pif; 1146 int copy; 1147 int mss_now; 1148 1149 /* Not currently probing/verifying, 1150 * not in recovery, 1151 * have enough cwnd, and 1152 * not SACKing (the variable headers throw things off) */ 1153 if (!icsk->icsk_mtup.enabled || 1154 icsk->icsk_mtup.probe_size || 1155 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1156 tp->snd_cwnd < 11 || 1157 tp->rx_opt.eff_sacks) 1158 return -1; 1159 1160 /* Very simple search strategy: just double the MSS. */ 1161 mss_now = tcp_current_mss(sk, 0); 1162 probe_size = 2*tp->mss_cache; 1163 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1164 /* TODO: set timer for probe_converge_event */ 1165 return -1; 1166 } 1167 1168 /* Have enough data in the send queue to probe? */ 1169 len = 0; 1170 if ((skb = sk->sk_send_head) == NULL) 1171 return -1; 1172 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) 1173 skb = skb->next; 1174 if (len < probe_size) 1175 return -1; 1176 1177 /* Receive window check. */ 1178 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { 1179 if (tp->snd_wnd < probe_size) 1180 return -1; 1181 else 1182 return 0; 1183 } 1184 1185 /* Do we need to wait to drain cwnd? */ 1186 pif = tcp_packets_in_flight(tp); 1187 if (pif + 2 > tp->snd_cwnd) { 1188 /* With no packets in flight, don't stall. */ 1189 if (pif == 0) 1190 return -1; 1191 else 1192 return 0; 1193 } 1194 1195 /* We're allowed to probe. Build it now. */ 1196 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1197 return -1; 1198 sk_charge_skb(sk, nskb); 1199 1200 skb = sk->sk_send_head; 1201 __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); 1202 sk->sk_send_head = nskb; 1203 1204 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1205 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1206 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1207 TCP_SKB_CB(nskb)->sacked = 0; 1208 nskb->csum = 0; 1209 if (skb->ip_summed == CHECKSUM_HW) 1210 nskb->ip_summed = CHECKSUM_HW; 1211 1212 len = 0; 1213 while (len < probe_size) { 1214 next = skb->next; 1215 1216 copy = min_t(int, skb->len, probe_size - len); 1217 if (nskb->ip_summed) 1218 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1219 else 1220 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1221 skb_put(nskb, copy), copy, nskb->csum); 1222 1223 if (skb->len <= copy) { 1224 /* We've eaten all the data from this skb. 1225 * Throw it away. */ 1226 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1227 __skb_unlink(skb, &sk->sk_write_queue); 1228 sk_stream_free_skb(sk, skb); 1229 } else { 1230 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1231 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1232 if (!skb_shinfo(skb)->nr_frags) { 1233 skb_pull(skb, copy); 1234 if (skb->ip_summed != CHECKSUM_HW) 1235 skb->csum = csum_partial(skb->data, skb->len, 0); 1236 } else { 1237 __pskb_trim_head(skb, copy); 1238 tcp_set_skb_tso_segs(sk, skb, mss_now); 1239 } 1240 TCP_SKB_CB(skb)->seq += copy; 1241 } 1242 1243 len += copy; 1244 skb = next; 1245 } 1246 tcp_init_tso_segs(sk, nskb, nskb->len); 1247 1248 /* We're ready to send. If this fails, the probe will 1249 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1250 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1251 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1252 /* Decrement cwnd here because we are sending 1253 * effectively two packets. */ 1254 tp->snd_cwnd--; 1255 update_send_head(sk, tp, nskb); 1256 1257 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1258 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1259 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1260 1261 return 1; 1262 } 1263 1264 return -1; 1265 } 1266 1267 1268 /* This routine writes packets to the network. It advances the 1269 * send_head. This happens as incoming acks open up the remote 1270 * window for us. 1271 * 1272 * Returns 1, if no segments are in flight and we have queued segments, but 1273 * cannot send anything now because of SWS or another problem. 1274 */ 1275 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) 1276 { 1277 struct tcp_sock *tp = tcp_sk(sk); 1278 struct sk_buff *skb; 1279 unsigned int tso_segs, sent_pkts; 1280 int cwnd_quota; 1281 int result; 1282 1283 /* If we are closed, the bytes will have to remain here. 1284 * In time closedown will finish, we empty the write queue and all 1285 * will be happy. 1286 */ 1287 if (unlikely(sk->sk_state == TCP_CLOSE)) 1288 return 0; 1289 1290 sent_pkts = 0; 1291 1292 /* Do MTU probing. */ 1293 if ((result = tcp_mtu_probe(sk)) == 0) { 1294 return 0; 1295 } else if (result > 0) { 1296 sent_pkts = 1; 1297 } 1298 1299 while ((skb = sk->sk_send_head)) { 1300 unsigned int limit; 1301 1302 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1303 BUG_ON(!tso_segs); 1304 1305 cwnd_quota = tcp_cwnd_test(tp, skb); 1306 if (!cwnd_quota) 1307 break; 1308 1309 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1310 break; 1311 1312 if (tso_segs == 1) { 1313 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1314 (tcp_skb_is_last(sk, skb) ? 1315 nonagle : TCP_NAGLE_PUSH)))) 1316 break; 1317 } else { 1318 if (tcp_tso_should_defer(sk, tp, skb)) 1319 break; 1320 } 1321 1322 limit = mss_now; 1323 if (tso_segs > 1) { 1324 limit = tcp_window_allows(tp, skb, 1325 mss_now, cwnd_quota); 1326 1327 if (skb->len < limit) { 1328 unsigned int trim = skb->len % mss_now; 1329 1330 if (trim) 1331 limit = skb->len - trim; 1332 } 1333 } 1334 1335 if (skb->len > limit && 1336 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1337 break; 1338 1339 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1340 1341 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC))) 1342 break; 1343 1344 /* Advance the send_head. This one is sent out. 1345 * This call will increment packets_out. 1346 */ 1347 update_send_head(sk, tp, skb); 1348 1349 tcp_minshall_update(tp, mss_now, skb); 1350 sent_pkts++; 1351 } 1352 1353 if (likely(sent_pkts)) { 1354 tcp_cwnd_validate(sk, tp); 1355 return 0; 1356 } 1357 return !tp->packets_out && sk->sk_send_head; 1358 } 1359 1360 /* Push out any pending frames which were held back due to 1361 * TCP_CORK or attempt at coalescing tiny packets. 1362 * The socket must be locked by the caller. 1363 */ 1364 void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, 1365 unsigned int cur_mss, int nonagle) 1366 { 1367 struct sk_buff *skb = sk->sk_send_head; 1368 1369 if (skb) { 1370 if (tcp_write_xmit(sk, cur_mss, nonagle)) 1371 tcp_check_probe_timer(sk, tp); 1372 } 1373 } 1374 1375 /* Send _single_ skb sitting at the send head. This function requires 1376 * true push pending frames to setup probe timer etc. 1377 */ 1378 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1379 { 1380 struct tcp_sock *tp = tcp_sk(sk); 1381 struct sk_buff *skb = sk->sk_send_head; 1382 unsigned int tso_segs, cwnd_quota; 1383 1384 BUG_ON(!skb || skb->len < mss_now); 1385 1386 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1387 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1388 1389 if (likely(cwnd_quota)) { 1390 unsigned int limit; 1391 1392 BUG_ON(!tso_segs); 1393 1394 limit = mss_now; 1395 if (tso_segs > 1) { 1396 limit = tcp_window_allows(tp, skb, 1397 mss_now, cwnd_quota); 1398 1399 if (skb->len < limit) { 1400 unsigned int trim = skb->len % mss_now; 1401 1402 if (trim) 1403 limit = skb->len - trim; 1404 } 1405 } 1406 1407 if (skb->len > limit && 1408 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1409 return; 1410 1411 /* Send it out now. */ 1412 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1413 1414 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { 1415 update_send_head(sk, tp, skb); 1416 tcp_cwnd_validate(sk, tp); 1417 return; 1418 } 1419 } 1420 } 1421 1422 /* This function returns the amount that we can raise the 1423 * usable window based on the following constraints 1424 * 1425 * 1. The window can never be shrunk once it is offered (RFC 793) 1426 * 2. We limit memory per socket 1427 * 1428 * RFC 1122: 1429 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1430 * RECV.NEXT + RCV.WIN fixed until: 1431 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1432 * 1433 * i.e. don't raise the right edge of the window until you can raise 1434 * it at least MSS bytes. 1435 * 1436 * Unfortunately, the recommended algorithm breaks header prediction, 1437 * since header prediction assumes th->window stays fixed. 1438 * 1439 * Strictly speaking, keeping th->window fixed violates the receiver 1440 * side SWS prevention criteria. The problem is that under this rule 1441 * a stream of single byte packets will cause the right side of the 1442 * window to always advance by a single byte. 1443 * 1444 * Of course, if the sender implements sender side SWS prevention 1445 * then this will not be a problem. 1446 * 1447 * BSD seems to make the following compromise: 1448 * 1449 * If the free space is less than the 1/4 of the maximum 1450 * space available and the free space is less than 1/2 mss, 1451 * then set the window to 0. 1452 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1453 * Otherwise, just prevent the window from shrinking 1454 * and from being larger than the largest representable value. 1455 * 1456 * This prevents incremental opening of the window in the regime 1457 * where TCP is limited by the speed of the reader side taking 1458 * data out of the TCP receive queue. It does nothing about 1459 * those cases where the window is constrained on the sender side 1460 * because the pipeline is full. 1461 * 1462 * BSD also seems to "accidentally" limit itself to windows that are a 1463 * multiple of MSS, at least until the free space gets quite small. 1464 * This would appear to be a side effect of the mbuf implementation. 1465 * Combining these two algorithms results in the observed behavior 1466 * of having a fixed window size at almost all times. 1467 * 1468 * Below we obtain similar behavior by forcing the offered window to 1469 * a multiple of the mss when it is feasible to do so. 1470 * 1471 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1472 * Regular options like TIMESTAMP are taken into account. 1473 */ 1474 u32 __tcp_select_window(struct sock *sk) 1475 { 1476 struct inet_connection_sock *icsk = inet_csk(sk); 1477 struct tcp_sock *tp = tcp_sk(sk); 1478 /* MSS for the peer's data. Previous versions used mss_clamp 1479 * here. I don't know if the value based on our guesses 1480 * of peer's MSS is better for the performance. It's more correct 1481 * but may be worse for the performance because of rcv_mss 1482 * fluctuations. --SAW 1998/11/1 1483 */ 1484 int mss = icsk->icsk_ack.rcv_mss; 1485 int free_space = tcp_space(sk); 1486 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1487 int window; 1488 1489 if (mss > full_space) 1490 mss = full_space; 1491 1492 if (free_space < full_space/2) { 1493 icsk->icsk_ack.quick = 0; 1494 1495 if (tcp_memory_pressure) 1496 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 1497 1498 if (free_space < mss) 1499 return 0; 1500 } 1501 1502 if (free_space > tp->rcv_ssthresh) 1503 free_space = tp->rcv_ssthresh; 1504 1505 /* Don't do rounding if we are using window scaling, since the 1506 * scaled window will not line up with the MSS boundary anyway. 1507 */ 1508 window = tp->rcv_wnd; 1509 if (tp->rx_opt.rcv_wscale) { 1510 window = free_space; 1511 1512 /* Advertise enough space so that it won't get scaled away. 1513 * Import case: prevent zero window announcement if 1514 * 1<<rcv_wscale > mss. 1515 */ 1516 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1517 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1518 << tp->rx_opt.rcv_wscale); 1519 } else { 1520 /* Get the largest window that is a nice multiple of mss. 1521 * Window clamp already applied above. 1522 * If our current window offering is within 1 mss of the 1523 * free space we just keep it. This prevents the divide 1524 * and multiply from happening most of the time. 1525 * We also don't do any window rounding when the free space 1526 * is too small. 1527 */ 1528 if (window <= free_space - mss || window > free_space) 1529 window = (free_space/mss)*mss; 1530 } 1531 1532 return window; 1533 } 1534 1535 /* Attempt to collapse two adjacent SKB's during retransmission. */ 1536 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) 1537 { 1538 struct tcp_sock *tp = tcp_sk(sk); 1539 struct sk_buff *next_skb = skb->next; 1540 1541 /* The first test we must make is that neither of these two 1542 * SKB's are still referenced by someone else. 1543 */ 1544 if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 1545 int skb_size = skb->len, next_skb_size = next_skb->len; 1546 u16 flags = TCP_SKB_CB(skb)->flags; 1547 1548 /* Also punt if next skb has been SACK'd. */ 1549 if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 1550 return; 1551 1552 /* Next skb is out of window. */ 1553 if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) 1554 return; 1555 1556 /* Punt if not enough space exists in the first SKB for 1557 * the data in the second, or the total combined payload 1558 * would exceed the MSS. 1559 */ 1560 if ((next_skb_size > skb_tailroom(skb)) || 1561 ((skb_size + next_skb_size) > mss_now)) 1562 return; 1563 1564 BUG_ON(tcp_skb_pcount(skb) != 1 || 1565 tcp_skb_pcount(next_skb) != 1); 1566 1567 /* changing transmit queue under us so clear hints */ 1568 clear_all_retrans_hints(tp); 1569 1570 /* Ok. We will be able to collapse the packet. */ 1571 __skb_unlink(next_skb, &sk->sk_write_queue); 1572 1573 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 1574 1575 if (next_skb->ip_summed == CHECKSUM_HW) 1576 skb->ip_summed = CHECKSUM_HW; 1577 1578 if (skb->ip_summed != CHECKSUM_HW) 1579 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1580 1581 /* Update sequence range on original skb. */ 1582 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1583 1584 /* Merge over control information. */ 1585 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 1586 TCP_SKB_CB(skb)->flags = flags; 1587 1588 /* All done, get rid of second SKB and account for it so 1589 * packet counting does not break. 1590 */ 1591 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); 1592 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 1593 tp->retrans_out -= tcp_skb_pcount(next_skb); 1594 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) { 1595 tp->lost_out -= tcp_skb_pcount(next_skb); 1596 tp->left_out -= tcp_skb_pcount(next_skb); 1597 } 1598 /* Reno case is special. Sigh... */ 1599 if (!tp->rx_opt.sack_ok && tp->sacked_out) { 1600 tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 1601 tp->left_out -= tcp_skb_pcount(next_skb); 1602 } 1603 1604 /* Not quite right: it can be > snd.fack, but 1605 * it is better to underestimate fackets. 1606 */ 1607 tcp_dec_pcount_approx(&tp->fackets_out, next_skb); 1608 tcp_packets_out_dec(tp, next_skb); 1609 sk_stream_free_skb(sk, next_skb); 1610 } 1611 } 1612 1613 /* Do a simple retransmit without using the backoff mechanisms in 1614 * tcp_timer. This is used for path mtu discovery. 1615 * The socket is already locked here. 1616 */ 1617 void tcp_simple_retransmit(struct sock *sk) 1618 { 1619 const struct inet_connection_sock *icsk = inet_csk(sk); 1620 struct tcp_sock *tp = tcp_sk(sk); 1621 struct sk_buff *skb; 1622 unsigned int mss = tcp_current_mss(sk, 0); 1623 int lost = 0; 1624 1625 sk_stream_for_retrans_queue(skb, sk) { 1626 if (skb->len > mss && 1627 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 1628 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1629 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1630 tp->retrans_out -= tcp_skb_pcount(skb); 1631 } 1632 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { 1633 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1634 tp->lost_out += tcp_skb_pcount(skb); 1635 lost = 1; 1636 } 1637 } 1638 } 1639 1640 clear_all_retrans_hints(tp); 1641 1642 if (!lost) 1643 return; 1644 1645 tcp_sync_left_out(tp); 1646 1647 /* Don't muck with the congestion window here. 1648 * Reason is that we do not increase amount of _data_ 1649 * in network, but units changed and effective 1650 * cwnd/ssthresh really reduced now. 1651 */ 1652 if (icsk->icsk_ca_state != TCP_CA_Loss) { 1653 tp->high_seq = tp->snd_nxt; 1654 tp->snd_ssthresh = tcp_current_ssthresh(sk); 1655 tp->prior_ssthresh = 0; 1656 tp->undo_marker = 0; 1657 tcp_set_ca_state(sk, TCP_CA_Loss); 1658 } 1659 tcp_xmit_retransmit_queue(sk); 1660 } 1661 1662 /* This retransmits one SKB. Policy decisions and retransmit queue 1663 * state updates are done by the caller. Returns non-zero if an 1664 * error occurred which prevented the send. 1665 */ 1666 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 1667 { 1668 struct tcp_sock *tp = tcp_sk(sk); 1669 struct inet_connection_sock *icsk = inet_csk(sk); 1670 unsigned int cur_mss = tcp_current_mss(sk, 0); 1671 int err; 1672 1673 /* Inconslusive MTU probe */ 1674 if (icsk->icsk_mtup.probe_size) { 1675 icsk->icsk_mtup.probe_size = 0; 1676 } 1677 1678 /* Do not sent more than we queued. 1/4 is reserved for possible 1679 * copying overhead: fragmentation, tunneling, mangling etc. 1680 */ 1681 if (atomic_read(&sk->sk_wmem_alloc) > 1682 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1683 return -EAGAIN; 1684 1685 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1686 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1687 BUG(); 1688 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1689 return -ENOMEM; 1690 } 1691 1692 /* If receiver has shrunk his window, and skb is out of 1693 * new window, do not retransmit it. The exception is the 1694 * case, when window is shrunk to zero. In this case 1695 * our retransmit serves as a zero window probe. 1696 */ 1697 if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) 1698 && TCP_SKB_CB(skb)->seq != tp->snd_una) 1699 return -EAGAIN; 1700 1701 if (skb->len > cur_mss) { 1702 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1703 return -ENOMEM; /* We'll try again later. */ 1704 } 1705 1706 /* Collapse two adjacent packets if worthwhile and we can. */ 1707 if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1708 (skb->len < (cur_mss >> 1)) && 1709 (skb->next != sk->sk_send_head) && 1710 (skb->next != (struct sk_buff *)&sk->sk_write_queue) && 1711 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && 1712 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && 1713 (sysctl_tcp_retrans_collapse != 0)) 1714 tcp_retrans_try_collapse(sk, skb, cur_mss); 1715 1716 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1717 return -EHOSTUNREACH; /* Routing failure or similar. */ 1718 1719 /* Some Solaris stacks overoptimize and ignore the FIN on a 1720 * retransmit when old data is attached. So strip it off 1721 * since it is cheap to do so and saves bytes on the network. 1722 */ 1723 if(skb->len > 0 && 1724 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1725 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1726 if (!pskb_trim(skb, 0)) { 1727 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 1728 skb_shinfo(skb)->gso_segs = 1; 1729 skb_shinfo(skb)->gso_size = 0; 1730 skb_shinfo(skb)->gso_type = 0; 1731 skb->ip_summed = CHECKSUM_NONE; 1732 skb->csum = 0; 1733 } 1734 } 1735 1736 /* Make a copy, if the first transmission SKB clone we made 1737 * is still in somebody's hands, else make a clone. 1738 */ 1739 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1740 1741 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 1742 1743 if (err == 0) { 1744 /* Update global TCP statistics. */ 1745 TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 1746 1747 tp->total_retrans++; 1748 1749 #if FASTRETRANS_DEBUG > 0 1750 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1751 if (net_ratelimit()) 1752 printk(KERN_DEBUG "retrans_out leaked.\n"); 1753 } 1754 #endif 1755 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 1756 tp->retrans_out += tcp_skb_pcount(skb); 1757 1758 /* Save stamp of the first retransmit. */ 1759 if (!tp->retrans_stamp) 1760 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 1761 1762 tp->undo_retrans++; 1763 1764 /* snd_nxt is stored to detect loss of retransmitted segment, 1765 * see tcp_input.c tcp_sacktag_write_queue(). 1766 */ 1767 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 1768 } 1769 return err; 1770 } 1771 1772 /* This gets called after a retransmit timeout, and the initially 1773 * retransmitted data is acknowledged. It tries to continue 1774 * resending the rest of the retransmit queue, until either 1775 * we've sent it all or the congestion window limit is reached. 1776 * If doing SACK, the first ACK which comes back for a timeout 1777 * based retransmit packet might feed us FACK information again. 1778 * If so, we use it to avoid unnecessarily retransmissions. 1779 */ 1780 void tcp_xmit_retransmit_queue(struct sock *sk) 1781 { 1782 const struct inet_connection_sock *icsk = inet_csk(sk); 1783 struct tcp_sock *tp = tcp_sk(sk); 1784 struct sk_buff *skb; 1785 int packet_cnt; 1786 1787 if (tp->retransmit_skb_hint) { 1788 skb = tp->retransmit_skb_hint; 1789 packet_cnt = tp->retransmit_cnt_hint; 1790 }else{ 1791 skb = sk->sk_write_queue.next; 1792 packet_cnt = 0; 1793 } 1794 1795 /* First pass: retransmit lost packets. */ 1796 if (tp->lost_out) { 1797 sk_stream_for_retrans_queue_from(skb, sk) { 1798 __u8 sacked = TCP_SKB_CB(skb)->sacked; 1799 1800 /* we could do better than to assign each time */ 1801 tp->retransmit_skb_hint = skb; 1802 tp->retransmit_cnt_hint = packet_cnt; 1803 1804 /* Assume this retransmit will generate 1805 * only one packet for congestion window 1806 * calculation purposes. This works because 1807 * tcp_retransmit_skb() will chop up the 1808 * packet to be MSS sized and all the 1809 * packet counting works out. 1810 */ 1811 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 1812 return; 1813 1814 if (sacked & TCPCB_LOST) { 1815 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1816 if (tcp_retransmit_skb(sk, skb)) { 1817 tp->retransmit_skb_hint = NULL; 1818 return; 1819 } 1820 if (icsk->icsk_ca_state != TCP_CA_Loss) 1821 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1822 else 1823 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1824 1825 if (skb == 1826 skb_peek(&sk->sk_write_queue)) 1827 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1828 inet_csk(sk)->icsk_rto, 1829 TCP_RTO_MAX); 1830 } 1831 1832 packet_cnt += tcp_skb_pcount(skb); 1833 if (packet_cnt >= tp->lost_out) 1834 break; 1835 } 1836 } 1837 } 1838 1839 /* OK, demanded retransmission is finished. */ 1840 1841 /* Forward retransmissions are possible only during Recovery. */ 1842 if (icsk->icsk_ca_state != TCP_CA_Recovery) 1843 return; 1844 1845 /* No forward retransmissions in Reno are possible. */ 1846 if (!tp->rx_opt.sack_ok) 1847 return; 1848 1849 /* Yeah, we have to make difficult choice between forward transmission 1850 * and retransmission... Both ways have their merits... 1851 * 1852 * For now we do not retransmit anything, while we have some new 1853 * segments to send. 1854 */ 1855 1856 if (tcp_may_send_now(sk, tp)) 1857 return; 1858 1859 if (tp->forward_skb_hint) { 1860 skb = tp->forward_skb_hint; 1861 packet_cnt = tp->forward_cnt_hint; 1862 } else{ 1863 skb = sk->sk_write_queue.next; 1864 packet_cnt = 0; 1865 } 1866 1867 sk_stream_for_retrans_queue_from(skb, sk) { 1868 tp->forward_cnt_hint = packet_cnt; 1869 tp->forward_skb_hint = skb; 1870 1871 /* Similar to the retransmit loop above we 1872 * can pretend that the retransmitted SKB 1873 * we send out here will be composed of one 1874 * real MSS sized packet because tcp_retransmit_skb() 1875 * will fragment it if necessary. 1876 */ 1877 if (++packet_cnt > tp->fackets_out) 1878 break; 1879 1880 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 1881 break; 1882 1883 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 1884 continue; 1885 1886 /* Ok, retransmit it. */ 1887 if (tcp_retransmit_skb(sk, skb)) { 1888 tp->forward_skb_hint = NULL; 1889 break; 1890 } 1891 1892 if (skb == skb_peek(&sk->sk_write_queue)) 1893 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1894 inet_csk(sk)->icsk_rto, 1895 TCP_RTO_MAX); 1896 1897 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 1898 } 1899 } 1900 1901 1902 /* Send a fin. The caller locks the socket for us. This cannot be 1903 * allowed to fail queueing a FIN frame under any circumstances. 1904 */ 1905 void tcp_send_fin(struct sock *sk) 1906 { 1907 struct tcp_sock *tp = tcp_sk(sk); 1908 struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); 1909 int mss_now; 1910 1911 /* Optimization, tack on the FIN if we have a queue of 1912 * unsent frames. But be careful about outgoing SACKS 1913 * and IP options. 1914 */ 1915 mss_now = tcp_current_mss(sk, 1); 1916 1917 if (sk->sk_send_head != NULL) { 1918 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 1919 TCP_SKB_CB(skb)->end_seq++; 1920 tp->write_seq++; 1921 } else { 1922 /* Socket is locked, keep trying until memory is available. */ 1923 for (;;) { 1924 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 1925 if (skb) 1926 break; 1927 yield(); 1928 } 1929 1930 /* Reserve space for headers and prepare control bits. */ 1931 skb_reserve(skb, MAX_TCP_HEADER); 1932 skb->csum = 0; 1933 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 1934 TCP_SKB_CB(skb)->sacked = 0; 1935 skb_shinfo(skb)->gso_segs = 1; 1936 skb_shinfo(skb)->gso_size = 0; 1937 skb_shinfo(skb)->gso_type = 0; 1938 1939 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 1940 TCP_SKB_CB(skb)->seq = tp->write_seq; 1941 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 1942 tcp_queue_skb(sk, skb); 1943 } 1944 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); 1945 } 1946 1947 /* We get here when a process closes a file descriptor (either due to 1948 * an explicit close() or as a byproduct of exit()'ing) and there 1949 * was unread data in the receive queue. This behavior is recommended 1950 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1951 */ 1952 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 1953 { 1954 struct tcp_sock *tp = tcp_sk(sk); 1955 struct sk_buff *skb; 1956 1957 /* NOTE: No TCP options attached and we never retransmit this. */ 1958 skb = alloc_skb(MAX_TCP_HEADER, priority); 1959 if (!skb) { 1960 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 1961 return; 1962 } 1963 1964 /* Reserve space for headers and prepare control bits. */ 1965 skb_reserve(skb, MAX_TCP_HEADER); 1966 skb->csum = 0; 1967 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 1968 TCP_SKB_CB(skb)->sacked = 0; 1969 skb_shinfo(skb)->gso_segs = 1; 1970 skb_shinfo(skb)->gso_size = 0; 1971 skb_shinfo(skb)->gso_type = 0; 1972 1973 /* Send it off. */ 1974 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 1975 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 1976 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1977 if (tcp_transmit_skb(sk, skb, 0, priority)) 1978 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 1979 } 1980 1981 /* WARNING: This routine must only be called when we have already sent 1982 * a SYN packet that crossed the incoming SYN that caused this routine 1983 * to get called. If this assumption fails then the initial rcv_wnd 1984 * and rcv_wscale values will not be correct. 1985 */ 1986 int tcp_send_synack(struct sock *sk) 1987 { 1988 struct sk_buff* skb; 1989 1990 skb = skb_peek(&sk->sk_write_queue); 1991 if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { 1992 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 1993 return -EFAULT; 1994 } 1995 if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { 1996 if (skb_cloned(skb)) { 1997 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 1998 if (nskb == NULL) 1999 return -ENOMEM; 2000 __skb_unlink(skb, &sk->sk_write_queue); 2001 skb_header_release(nskb); 2002 __skb_queue_head(&sk->sk_write_queue, nskb); 2003 sk_stream_free_skb(sk, skb); 2004 sk_charge_skb(sk, nskb); 2005 skb = nskb; 2006 } 2007 2008 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2009 TCP_ECN_send_synack(tcp_sk(sk), skb); 2010 } 2011 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2012 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2013 } 2014 2015 /* 2016 * Prepare a SYN-ACK. 2017 */ 2018 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2019 struct request_sock *req) 2020 { 2021 struct inet_request_sock *ireq = inet_rsk(req); 2022 struct tcp_sock *tp = tcp_sk(sk); 2023 struct tcphdr *th; 2024 int tcp_header_size; 2025 struct sk_buff *skb; 2026 2027 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2028 if (skb == NULL) 2029 return NULL; 2030 2031 /* Reserve space for headers. */ 2032 skb_reserve(skb, MAX_TCP_HEADER); 2033 2034 skb->dst = dst_clone(dst); 2035 2036 tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + 2037 (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + 2038 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 2039 /* SACK_PERM is in the place of NOP NOP of TS */ 2040 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 2041 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); 2042 2043 memset(th, 0, sizeof(struct tcphdr)); 2044 th->syn = 1; 2045 th->ack = 1; 2046 TCP_ECN_make_synack(req, th); 2047 th->source = inet_sk(sk)->sport; 2048 th->dest = ireq->rmt_port; 2049 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 2050 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 2051 TCP_SKB_CB(skb)->sacked = 0; 2052 skb_shinfo(skb)->gso_segs = 1; 2053 skb_shinfo(skb)->gso_size = 0; 2054 skb_shinfo(skb)->gso_type = 0; 2055 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2056 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2057 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2058 __u8 rcv_wscale; 2059 /* Set this up on the first call only */ 2060 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2061 /* tcp_full_space because it is guaranteed to be the first packet */ 2062 tcp_select_initial_window(tcp_full_space(sk), 2063 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2064 &req->rcv_wnd, 2065 &req->window_clamp, 2066 ireq->wscale_ok, 2067 &rcv_wscale); 2068 ireq->rcv_wscale = rcv_wscale; 2069 } 2070 2071 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2072 th->window = htons(req->rcv_wnd); 2073 2074 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2075 tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 2076 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 2077 TCP_SKB_CB(skb)->when, 2078 req->ts_recent); 2079 2080 skb->csum = 0; 2081 th->doff = (tcp_header_size >> 2); 2082 TCP_INC_STATS(TCP_MIB_OUTSEGS); 2083 return skb; 2084 } 2085 2086 /* 2087 * Do all connect socket setups that can be done AF independent. 2088 */ 2089 static void tcp_connect_init(struct sock *sk) 2090 { 2091 struct dst_entry *dst = __sk_dst_get(sk); 2092 struct tcp_sock *tp = tcp_sk(sk); 2093 __u8 rcv_wscale; 2094 2095 /* We'll fix this up when we get a response from the other end. 2096 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2097 */ 2098 tp->tcp_header_len = sizeof(struct tcphdr) + 2099 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2100 2101 /* If user gave his TCP_MAXSEG, record it to clamp */ 2102 if (tp->rx_opt.user_mss) 2103 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2104 tp->max_window = 0; 2105 tcp_mtup_init(sk); 2106 tcp_sync_mss(sk, dst_mtu(dst)); 2107 2108 if (!tp->window_clamp) 2109 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2110 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2111 tcp_initialize_rcv_mss(sk); 2112 2113 tcp_select_initial_window(tcp_full_space(sk), 2114 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2115 &tp->rcv_wnd, 2116 &tp->window_clamp, 2117 sysctl_tcp_window_scaling, 2118 &rcv_wscale); 2119 2120 tp->rx_opt.rcv_wscale = rcv_wscale; 2121 tp->rcv_ssthresh = tp->rcv_wnd; 2122 2123 sk->sk_err = 0; 2124 sock_reset_flag(sk, SOCK_DONE); 2125 tp->snd_wnd = 0; 2126 tcp_init_wl(tp, tp->write_seq, 0); 2127 tp->snd_una = tp->write_seq; 2128 tp->snd_sml = tp->write_seq; 2129 tp->rcv_nxt = 0; 2130 tp->rcv_wup = 0; 2131 tp->copied_seq = 0; 2132 2133 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2134 inet_csk(sk)->icsk_retransmits = 0; 2135 tcp_clear_retrans(tp); 2136 } 2137 2138 /* 2139 * Build a SYN and send it off. 2140 */ 2141 int tcp_connect(struct sock *sk) 2142 { 2143 struct tcp_sock *tp = tcp_sk(sk); 2144 struct sk_buff *buff; 2145 2146 tcp_connect_init(sk); 2147 2148 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2149 if (unlikely(buff == NULL)) 2150 return -ENOBUFS; 2151 2152 /* Reserve space for headers. */ 2153 skb_reserve(buff, MAX_TCP_HEADER); 2154 2155 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 2156 TCP_ECN_send_syn(sk, tp, buff); 2157 TCP_SKB_CB(buff)->sacked = 0; 2158 skb_shinfo(buff)->gso_segs = 1; 2159 skb_shinfo(buff)->gso_size = 0; 2160 skb_shinfo(buff)->gso_type = 0; 2161 buff->csum = 0; 2162 tp->snd_nxt = tp->write_seq; 2163 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2164 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2165 2166 /* Send it off. */ 2167 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2168 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2169 skb_header_release(buff); 2170 __skb_queue_tail(&sk->sk_write_queue, buff); 2171 sk_charge_skb(sk, buff); 2172 tp->packets_out += tcp_skb_pcount(buff); 2173 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2174 2175 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2176 * in order to make this packet get counted in tcpOutSegs. 2177 */ 2178 tp->snd_nxt = tp->write_seq; 2179 tp->pushed_seq = tp->write_seq; 2180 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2181 2182 /* Timer for repeating the SYN until an answer. */ 2183 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2184 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2185 return 0; 2186 } 2187 2188 /* Send out a delayed ack, the caller does the policy checking 2189 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2190 * for details. 2191 */ 2192 void tcp_send_delayed_ack(struct sock *sk) 2193 { 2194 struct inet_connection_sock *icsk = inet_csk(sk); 2195 int ato = icsk->icsk_ack.ato; 2196 unsigned long timeout; 2197 2198 if (ato > TCP_DELACK_MIN) { 2199 const struct tcp_sock *tp = tcp_sk(sk); 2200 int max_ato = HZ/2; 2201 2202 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2203 max_ato = TCP_DELACK_MAX; 2204 2205 /* Slow path, intersegment interval is "high". */ 2206 2207 /* If some rtt estimate is known, use it to bound delayed ack. 2208 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2209 * directly. 2210 */ 2211 if (tp->srtt) { 2212 int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); 2213 2214 if (rtt < max_ato) 2215 max_ato = rtt; 2216 } 2217 2218 ato = min(ato, max_ato); 2219 } 2220 2221 /* Stay within the limit we were given */ 2222 timeout = jiffies + ato; 2223 2224 /* Use new timeout only if there wasn't a older one earlier. */ 2225 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2226 /* If delack timer was blocked or is about to expire, 2227 * send ACK now. 2228 */ 2229 if (icsk->icsk_ack.blocked || 2230 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2231 tcp_send_ack(sk); 2232 return; 2233 } 2234 2235 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2236 timeout = icsk->icsk_ack.timeout; 2237 } 2238 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2239 icsk->icsk_ack.timeout = timeout; 2240 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2241 } 2242 2243 /* This routine sends an ack and also updates the window. */ 2244 void tcp_send_ack(struct sock *sk) 2245 { 2246 /* If we have been reset, we may not send again. */ 2247 if (sk->sk_state != TCP_CLOSE) { 2248 struct tcp_sock *tp = tcp_sk(sk); 2249 struct sk_buff *buff; 2250 2251 /* We are not putting this on the write queue, so 2252 * tcp_transmit_skb() will set the ownership to this 2253 * sock. 2254 */ 2255 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2256 if (buff == NULL) { 2257 inet_csk_schedule_ack(sk); 2258 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2259 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2260 TCP_DELACK_MAX, TCP_RTO_MAX); 2261 return; 2262 } 2263 2264 /* Reserve space for headers and prepare control bits. */ 2265 skb_reserve(buff, MAX_TCP_HEADER); 2266 buff->csum = 0; 2267 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 2268 TCP_SKB_CB(buff)->sacked = 0; 2269 skb_shinfo(buff)->gso_segs = 1; 2270 skb_shinfo(buff)->gso_size = 0; 2271 skb_shinfo(buff)->gso_type = 0; 2272 2273 /* Send it off, this clears delayed acks for us. */ 2274 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 2275 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2276 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2277 } 2278 } 2279 2280 /* This routine sends a packet with an out of date sequence 2281 * number. It assumes the other end will try to ack it. 2282 * 2283 * Question: what should we make while urgent mode? 2284 * 4.4BSD forces sending single byte of data. We cannot send 2285 * out of window data, because we have SND.NXT==SND.MAX... 2286 * 2287 * Current solution: to send TWO zero-length segments in urgent mode: 2288 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2289 * out-of-date with SND.UNA-1 to probe window. 2290 */ 2291 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2292 { 2293 struct tcp_sock *tp = tcp_sk(sk); 2294 struct sk_buff *skb; 2295 2296 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2297 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2298 if (skb == NULL) 2299 return -1; 2300 2301 /* Reserve space for headers and set control bits. */ 2302 skb_reserve(skb, MAX_TCP_HEADER); 2303 skb->csum = 0; 2304 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 2305 TCP_SKB_CB(skb)->sacked = urgent; 2306 skb_shinfo(skb)->gso_segs = 1; 2307 skb_shinfo(skb)->gso_size = 0; 2308 skb_shinfo(skb)->gso_type = 0; 2309 2310 /* Use a previous sequence. This should cause the other 2311 * end to send an ack. Don't queue or clone SKB, just 2312 * send it. 2313 */ 2314 TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; 2315 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 2316 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2317 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2318 } 2319 2320 int tcp_write_wakeup(struct sock *sk) 2321 { 2322 if (sk->sk_state != TCP_CLOSE) { 2323 struct tcp_sock *tp = tcp_sk(sk); 2324 struct sk_buff *skb; 2325 2326 if ((skb = sk->sk_send_head) != NULL && 2327 before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { 2328 int err; 2329 unsigned int mss = tcp_current_mss(sk, 0); 2330 unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; 2331 2332 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2333 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2334 2335 /* We are probing the opening of a window 2336 * but the window size is != 0 2337 * must have been a result SWS avoidance ( sender ) 2338 */ 2339 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2340 skb->len > mss) { 2341 seg_size = min(seg_size, mss); 2342 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2343 if (tcp_fragment(sk, skb, seg_size, mss)) 2344 return -1; 2345 } else if (!tcp_skb_pcount(skb)) 2346 tcp_set_skb_tso_segs(sk, skb, mss); 2347 2348 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2349 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2350 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2351 if (!err) { 2352 update_send_head(sk, tp, skb); 2353 } 2354 return err; 2355 } else { 2356 if (tp->urg_mode && 2357 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 2358 tcp_xmit_probe_skb(sk, TCPCB_URG); 2359 return tcp_xmit_probe_skb(sk, 0); 2360 } 2361 } 2362 return -1; 2363 } 2364 2365 /* A window probe timeout has occurred. If window is not closed send 2366 * a partial packet else a zero probe. 2367 */ 2368 void tcp_send_probe0(struct sock *sk) 2369 { 2370 struct inet_connection_sock *icsk = inet_csk(sk); 2371 struct tcp_sock *tp = tcp_sk(sk); 2372 int err; 2373 2374 err = tcp_write_wakeup(sk); 2375 2376 if (tp->packets_out || !sk->sk_send_head) { 2377 /* Cancel probe timer, if it is not required. */ 2378 icsk->icsk_probes_out = 0; 2379 icsk->icsk_backoff = 0; 2380 return; 2381 } 2382 2383 if (err <= 0) { 2384 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2385 icsk->icsk_backoff++; 2386 icsk->icsk_probes_out++; 2387 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2388 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2389 TCP_RTO_MAX); 2390 } else { 2391 /* If packet was not sent due to local congestion, 2392 * do not backoff and do not remember icsk_probes_out. 2393 * Let local senders to fight for local resources. 2394 * 2395 * Use accumulated backoff yet. 2396 */ 2397 if (!icsk->icsk_probes_out) 2398 icsk->icsk_probes_out = 1; 2399 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2400 min(icsk->icsk_rto << icsk->icsk_backoff, 2401 TCP_RESOURCE_PROBE_INTERVAL), 2402 TCP_RTO_MAX); 2403 } 2404 } 2405 2406 EXPORT_SYMBOL(tcp_connect); 2407 EXPORT_SYMBOL(tcp_make_synack); 2408 EXPORT_SYMBOL(tcp_simple_retransmit); 2409 EXPORT_SYMBOL(tcp_sync_mss); 2410 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor); 2411 EXPORT_SYMBOL(tcp_mtup_init); 2412