1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 */ 22 23 /* 24 * Changes: Pedro Roque : Retransmit queue handled by TCP. 25 * : Fragmentation on mtu decrease 26 * : Segment collapse on retransmit 27 * : AF independence 28 * 29 * Linus Torvalds : send_delayed_ack 30 * David S. Miller : Charge memory using the right skb 31 * during syn/ack processing. 32 * David S. Miller : Output engine completely rewritten. 33 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 34 * Cacophonix Gaul : draft-minshall-nagle-01 35 * J Hadi Salim : ECN support 36 * 37 */ 38 39 #include <net/tcp.h> 40 41 #include <linux/compiler.h> 42 #include <linux/module.h> 43 #include <linux/smp_lock.h> 44 45 /* People can turn this off for buggy TCP's found in printers etc. */ 46 int sysctl_tcp_retrans_collapse = 1; 47 48 /* People can turn this on to work with those rare, broken TCPs that 49 * interpret the window field as a signed quantity. 50 */ 51 int sysctl_tcp_workaround_signed_windows = 0; 52 53 /* This limits the percentage of the congestion window which we 54 * will allow a single TSO frame to consume. Building TSO frames 55 * which are too large can cause TCP streams to be bursty. 56 */ 57 int sysctl_tcp_tso_win_divisor = 3; 58 59 int sysctl_tcp_mtu_probing = 0; 60 int sysctl_tcp_base_mss = 512; 61 62 /* By default, RFC2861 behavior. */ 63 int sysctl_tcp_slow_start_after_idle = 1; 64 65 static void update_send_head(struct sock *sk, struct tcp_sock *tp, 66 struct sk_buff *skb) 67 { 68 sk->sk_send_head = skb->next; 69 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) 70 sk->sk_send_head = NULL; 71 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 72 tcp_packets_out_inc(sk, tp, skb); 73 } 74 75 /* SND.NXT, if window was not shrunk. 76 * If window has been shrunk, what should we make? It is not clear at all. 77 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 78 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 79 * invalid. OK, let's make this for now: 80 */ 81 static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) 82 { 83 if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) 84 return tp->snd_nxt; 85 else 86 return tp->snd_una+tp->snd_wnd; 87 } 88 89 /* Calculate mss to advertise in SYN segment. 90 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 91 * 92 * 1. It is independent of path mtu. 93 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 94 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 95 * attached devices, because some buggy hosts are confused by 96 * large MSS. 97 * 4. We do not make 3, we advertise MSS, calculated from first 98 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 99 * This may be overridden via information stored in routing table. 100 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 101 * probably even Jumbo". 102 */ 103 static __u16 tcp_advertise_mss(struct sock *sk) 104 { 105 struct tcp_sock *tp = tcp_sk(sk); 106 struct dst_entry *dst = __sk_dst_get(sk); 107 int mss = tp->advmss; 108 109 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 110 mss = dst_metric(dst, RTAX_ADVMSS); 111 tp->advmss = mss; 112 } 113 114 return (__u16)mss; 115 } 116 117 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 118 * This is the first part of cwnd validation mechanism. */ 119 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 120 { 121 struct tcp_sock *tp = tcp_sk(sk); 122 s32 delta = tcp_time_stamp - tp->lsndtime; 123 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 124 u32 cwnd = tp->snd_cwnd; 125 126 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 127 128 tp->snd_ssthresh = tcp_current_ssthresh(sk); 129 restart_cwnd = min(restart_cwnd, cwnd); 130 131 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 132 cwnd >>= 1; 133 tp->snd_cwnd = max(cwnd, restart_cwnd); 134 tp->snd_cwnd_stamp = tcp_time_stamp; 135 tp->snd_cwnd_used = 0; 136 } 137 138 static void tcp_event_data_sent(struct tcp_sock *tp, 139 struct sk_buff *skb, struct sock *sk) 140 { 141 struct inet_connection_sock *icsk = inet_csk(sk); 142 const u32 now = tcp_time_stamp; 143 144 if (sysctl_tcp_slow_start_after_idle && 145 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 146 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 147 148 tp->lsndtime = now; 149 150 /* If it is a reply for ato after last received 151 * packet, enter pingpong mode. 152 */ 153 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 154 icsk->icsk_ack.pingpong = 1; 155 } 156 157 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 158 { 159 tcp_dec_quickack_mode(sk, pkts); 160 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 161 } 162 163 /* Determine a window scaling and initial window to offer. 164 * Based on the assumption that the given amount of space 165 * will be offered. Store the results in the tp structure. 166 * NOTE: for smooth operation initial space offering should 167 * be a multiple of mss if possible. We assume here that mss >= 1. 168 * This MUST be enforced by all callers. 169 */ 170 void tcp_select_initial_window(int __space, __u32 mss, 171 __u32 *rcv_wnd, __u32 *window_clamp, 172 int wscale_ok, __u8 *rcv_wscale) 173 { 174 unsigned int space = (__space < 0 ? 0 : __space); 175 176 /* If no clamp set the clamp to the max possible scaled window */ 177 if (*window_clamp == 0) 178 (*window_clamp) = (65535 << 14); 179 space = min(*window_clamp, space); 180 181 /* Quantize space offering to a multiple of mss if possible. */ 182 if (space > mss) 183 space = (space / mss) * mss; 184 185 /* NOTE: offering an initial window larger than 32767 186 * will break some buggy TCP stacks. If the admin tells us 187 * it is likely we could be speaking with such a buggy stack 188 * we will truncate our initial window offering to 32K-1 189 * unless the remote has sent us a window scaling option, 190 * which we interpret as a sign the remote TCP is not 191 * misinterpreting the window field as a signed quantity. 192 */ 193 if (sysctl_tcp_workaround_signed_windows) 194 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 195 else 196 (*rcv_wnd) = space; 197 198 (*rcv_wscale) = 0; 199 if (wscale_ok) { 200 /* Set window scaling on max possible window 201 * See RFC1323 for an explanation of the limit to 14 202 */ 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 204 while (space > 65535 && (*rcv_wscale) < 14) { 205 space >>= 1; 206 (*rcv_wscale)++; 207 } 208 } 209 210 /* Set initial window to value enough for senders, 211 * following RFC2414. Senders, not following this RFC, 212 * will be satisfied with 2. 213 */ 214 if (mss > (1<<*rcv_wscale)) { 215 int init_cwnd = 4; 216 if (mss > 1460*3) 217 init_cwnd = 2; 218 else if (mss > 1460) 219 init_cwnd = 3; 220 if (*rcv_wnd > init_cwnd*mss) 221 *rcv_wnd = init_cwnd*mss; 222 } 223 224 /* Set the clamp no higher than max representable value */ 225 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 226 } 227 228 /* Chose a new window to advertise, update state in tcp_sock for the 229 * socket, and return result with RFC1323 scaling applied. The return 230 * value can be stuffed directly into th->window for an outgoing 231 * frame. 232 */ 233 static u16 tcp_select_window(struct sock *sk) 234 { 235 struct tcp_sock *tp = tcp_sk(sk); 236 u32 cur_win = tcp_receive_window(tp); 237 u32 new_win = __tcp_select_window(sk); 238 239 /* Never shrink the offered window */ 240 if(new_win < cur_win) { 241 /* Danger Will Robinson! 242 * Don't update rcv_wup/rcv_wnd here or else 243 * we will not be able to advertise a zero 244 * window in time. --DaveM 245 * 246 * Relax Will Robinson. 247 */ 248 new_win = cur_win; 249 } 250 tp->rcv_wnd = new_win; 251 tp->rcv_wup = tp->rcv_nxt; 252 253 /* Make sure we do not exceed the maximum possible 254 * scaled window. 255 */ 256 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 257 new_win = min(new_win, MAX_TCP_WINDOW); 258 else 259 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 260 261 /* RFC1323 scaling applied */ 262 new_win >>= tp->rx_opt.rcv_wscale; 263 264 /* If we advertise zero window, disable fast path. */ 265 if (new_win == 0) 266 tp->pred_flags = 0; 267 268 return new_win; 269 } 270 271 static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp, 272 __u32 tstamp) 273 { 274 if (tp->rx_opt.tstamp_ok) { 275 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | 276 (TCPOPT_NOP << 16) | 277 (TCPOPT_TIMESTAMP << 8) | 278 TCPOLEN_TIMESTAMP); 279 *ptr++ = htonl(tstamp); 280 *ptr++ = htonl(tp->rx_opt.ts_recent); 281 } 282 if (tp->rx_opt.eff_sacks) { 283 struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks; 284 int this_sack; 285 286 *ptr++ = htonl((TCPOPT_NOP << 24) | 287 (TCPOPT_NOP << 16) | 288 (TCPOPT_SACK << 8) | 289 (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks * 290 TCPOLEN_SACK_PERBLOCK))); 291 for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) { 292 *ptr++ = htonl(sp[this_sack].start_seq); 293 *ptr++ = htonl(sp[this_sack].end_seq); 294 } 295 if (tp->rx_opt.dsack) { 296 tp->rx_opt.dsack = 0; 297 tp->rx_opt.eff_sacks--; 298 } 299 } 300 } 301 302 /* Construct a tcp options header for a SYN or SYN_ACK packet. 303 * If this is every changed make sure to change the definition of 304 * MAX_SYN_SIZE to match the new maximum number of options that you 305 * can generate. 306 */ 307 static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack, 308 int offer_wscale, int wscale, __u32 tstamp, 309 __u32 ts_recent) 310 { 311 /* We always get an MSS option. 312 * The option bytes which will be seen in normal data 313 * packets should timestamps be used, must be in the MSS 314 * advertised. But we subtract them from tp->mss_cache so 315 * that calculations in tcp_sendmsg are simpler etc. 316 * So account for this fact here if necessary. If we 317 * don't do this correctly, as a receiver we won't 318 * recognize data packets as being full sized when we 319 * should, and thus we won't abide by the delayed ACK 320 * rules correctly. 321 * SACKs don't matter, we never delay an ACK when we 322 * have any of those going out. 323 */ 324 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 325 if (ts) { 326 if(sack) 327 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | 328 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 329 else 330 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 331 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 332 *ptr++ = htonl(tstamp); /* TSVAL */ 333 *ptr++ = htonl(ts_recent); /* TSECR */ 334 } else if(sack) 335 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 336 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); 337 if (offer_wscale) 338 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); 339 } 340 341 /* This routine actually transmits TCP packets queued in by 342 * tcp_do_sendmsg(). This is used by both the initial 343 * transmission and possible later retransmissions. 344 * All SKB's seen here are completely headerless. It is our 345 * job to build the TCP header, and pass the packet down to 346 * IP so it can do the same plus pass the packet off to the 347 * device. 348 * 349 * We are working here with either a clone of the original 350 * SKB, or a fresh unique copy made by the retransmit engine. 351 */ 352 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) 353 { 354 const struct inet_connection_sock *icsk = inet_csk(sk); 355 struct inet_sock *inet; 356 struct tcp_sock *tp; 357 struct tcp_skb_cb *tcb; 358 int tcp_header_size; 359 struct tcphdr *th; 360 int sysctl_flags; 361 int err; 362 363 BUG_ON(!skb || !tcp_skb_pcount(skb)); 364 365 /* If congestion control is doing timestamping, we must 366 * take such a timestamp before we potentially clone/copy. 367 */ 368 if (icsk->icsk_ca_ops->rtt_sample) 369 __net_timestamp(skb); 370 371 if (likely(clone_it)) { 372 if (unlikely(skb_cloned(skb))) 373 skb = pskb_copy(skb, gfp_mask); 374 else 375 skb = skb_clone(skb, gfp_mask); 376 if (unlikely(!skb)) 377 return -ENOBUFS; 378 } 379 380 inet = inet_sk(sk); 381 tp = tcp_sk(sk); 382 tcb = TCP_SKB_CB(skb); 383 tcp_header_size = tp->tcp_header_len; 384 385 #define SYSCTL_FLAG_TSTAMPS 0x1 386 #define SYSCTL_FLAG_WSCALE 0x2 387 #define SYSCTL_FLAG_SACK 0x4 388 389 sysctl_flags = 0; 390 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 391 tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; 392 if(sysctl_tcp_timestamps) { 393 tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; 394 sysctl_flags |= SYSCTL_FLAG_TSTAMPS; 395 } 396 if (sysctl_tcp_window_scaling) { 397 tcp_header_size += TCPOLEN_WSCALE_ALIGNED; 398 sysctl_flags |= SYSCTL_FLAG_WSCALE; 399 } 400 if (sysctl_tcp_sack) { 401 sysctl_flags |= SYSCTL_FLAG_SACK; 402 if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) 403 tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; 404 } 405 } else if (unlikely(tp->rx_opt.eff_sacks)) { 406 /* A SACK is 2 pad bytes, a 2 byte header, plus 407 * 2 32-bit sequence numbers for each SACK block. 408 */ 409 tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + 410 (tp->rx_opt.eff_sacks * 411 TCPOLEN_SACK_PERBLOCK)); 412 } 413 414 if (tcp_packets_in_flight(tp) == 0) 415 tcp_ca_event(sk, CA_EVENT_TX_START); 416 417 th = (struct tcphdr *) skb_push(skb, tcp_header_size); 418 skb->h.th = th; 419 skb_set_owner_w(skb, sk); 420 421 /* Build TCP header and checksum it. */ 422 th->source = inet->sport; 423 th->dest = inet->dport; 424 th->seq = htonl(tcb->seq); 425 th->ack_seq = htonl(tp->rcv_nxt); 426 *(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 427 tcb->flags); 428 429 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 430 /* RFC1323: The window in SYN & SYN/ACK segments 431 * is never scaled. 432 */ 433 th->window = htons(tp->rcv_wnd); 434 } else { 435 th->window = htons(tcp_select_window(sk)); 436 } 437 th->check = 0; 438 th->urg_ptr = 0; 439 440 if (unlikely(tp->urg_mode && 441 between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) { 442 th->urg_ptr = htons(tp->snd_up-tcb->seq); 443 th->urg = 1; 444 } 445 446 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 447 tcp_syn_build_options((__u32 *)(th + 1), 448 tcp_advertise_mss(sk), 449 (sysctl_flags & SYSCTL_FLAG_TSTAMPS), 450 (sysctl_flags & SYSCTL_FLAG_SACK), 451 (sysctl_flags & SYSCTL_FLAG_WSCALE), 452 tp->rx_opt.rcv_wscale, 453 tcb->when, 454 tp->rx_opt.ts_recent); 455 } else { 456 tcp_build_and_update_options((__u32 *)(th + 1), 457 tp, tcb->when); 458 TCP_ECN_send(sk, tp, skb, tcp_header_size); 459 } 460 461 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 462 463 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 464 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 465 466 if (skb->len != tcp_header_size) 467 tcp_event_data_sent(tp, skb, sk); 468 469 TCP_INC_STATS(TCP_MIB_OUTSEGS); 470 471 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 472 if (likely(err <= 0)) 473 return err; 474 475 tcp_enter_cwr(sk); 476 477 /* NET_XMIT_CN is special. It does not guarantee, 478 * that this packet is lost. It tells that device 479 * is about to start to drop packets or already 480 * drops some packets of the same priority and 481 * invokes us to send less aggressively. 482 */ 483 return err == NET_XMIT_CN ? 0 : err; 484 485 #undef SYSCTL_FLAG_TSTAMPS 486 #undef SYSCTL_FLAG_WSCALE 487 #undef SYSCTL_FLAG_SACK 488 } 489 490 491 /* This routine just queue's the buffer 492 * 493 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 494 * otherwise socket can stall. 495 */ 496 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 497 { 498 struct tcp_sock *tp = tcp_sk(sk); 499 500 /* Advance write_seq and place onto the write_queue. */ 501 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 502 skb_header_release(skb); 503 __skb_queue_tail(&sk->sk_write_queue, skb); 504 sk_charge_skb(sk, skb); 505 506 /* Queue it, remembering where we must start sending. */ 507 if (sk->sk_send_head == NULL) 508 sk->sk_send_head = skb; 509 } 510 511 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 512 { 513 if (skb->len <= mss_now || 514 !(sk->sk_route_caps & NETIF_F_TSO)) { 515 /* Avoid the costly divide in the normal 516 * non-TSO case. 517 */ 518 skb_shinfo(skb)->tso_segs = 1; 519 skb_shinfo(skb)->tso_size = 0; 520 } else { 521 unsigned int factor; 522 523 factor = skb->len + (mss_now - 1); 524 factor /= mss_now; 525 skb_shinfo(skb)->tso_segs = factor; 526 skb_shinfo(skb)->tso_size = mss_now; 527 } 528 } 529 530 /* Function to create two new TCP segments. Shrinks the given segment 531 * to the specified size and appends a new segment with the rest of the 532 * packet to the list. This won't be called frequently, I hope. 533 * Remember, these are still headerless SKBs at this point. 534 */ 535 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) 536 { 537 struct tcp_sock *tp = tcp_sk(sk); 538 struct sk_buff *buff; 539 int nsize, old_factor; 540 int nlen; 541 u16 flags; 542 543 BUG_ON(len > skb->len); 544 545 clear_all_retrans_hints(tp); 546 nsize = skb_headlen(skb) - len; 547 if (nsize < 0) 548 nsize = 0; 549 550 if (skb_cloned(skb) && 551 skb_is_nonlinear(skb) && 552 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 553 return -ENOMEM; 554 555 /* Get a new skb... force flag on. */ 556 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 557 if (buff == NULL) 558 return -ENOMEM; /* We'll just try again later. */ 559 560 sk_charge_skb(sk, buff); 561 nlen = skb->len - len - nsize; 562 buff->truesize += nlen; 563 skb->truesize -= nlen; 564 565 /* Correct the sequence numbers. */ 566 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 567 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 568 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 569 570 /* PSH and FIN should only be set in the second packet. */ 571 flags = TCP_SKB_CB(skb)->flags; 572 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 573 TCP_SKB_CB(buff)->flags = flags; 574 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 575 TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; 576 577 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) { 578 /* Copy and checksum data tail into the new buffer. */ 579 buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), 580 nsize, 0); 581 582 skb_trim(skb, len); 583 584 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 585 } else { 586 skb->ip_summed = CHECKSUM_HW; 587 skb_split(skb, buff, len); 588 } 589 590 buff->ip_summed = skb->ip_summed; 591 592 /* Looks stupid, but our code really uses when of 593 * skbs, which it never sent before. --ANK 594 */ 595 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 596 buff->tstamp = skb->tstamp; 597 598 old_factor = tcp_skb_pcount(skb); 599 600 /* Fix up tso_factor for both original and new SKB. */ 601 tcp_set_skb_tso_segs(sk, skb, mss_now); 602 tcp_set_skb_tso_segs(sk, buff, mss_now); 603 604 /* If this packet has been sent out already, we must 605 * adjust the various packet counters. 606 */ 607 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 608 int diff = old_factor - tcp_skb_pcount(skb) - 609 tcp_skb_pcount(buff); 610 611 tp->packets_out -= diff; 612 613 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 614 tp->sacked_out -= diff; 615 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 616 tp->retrans_out -= diff; 617 618 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { 619 tp->lost_out -= diff; 620 tp->left_out -= diff; 621 } 622 623 if (diff > 0) { 624 /* Adjust Reno SACK estimate. */ 625 if (!tp->rx_opt.sack_ok) { 626 tp->sacked_out -= diff; 627 if ((int)tp->sacked_out < 0) 628 tp->sacked_out = 0; 629 tcp_sync_left_out(tp); 630 } 631 632 tp->fackets_out -= diff; 633 if ((int)tp->fackets_out < 0) 634 tp->fackets_out = 0; 635 } 636 } 637 638 /* Link BUFF into the send queue. */ 639 skb_header_release(buff); 640 __skb_append(skb, buff, &sk->sk_write_queue); 641 642 return 0; 643 } 644 645 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 646 * eventually). The difference is that pulled data not copied, but 647 * immediately discarded. 648 */ 649 static void __pskb_trim_head(struct sk_buff *skb, int len) 650 { 651 int i, k, eat; 652 653 eat = len; 654 k = 0; 655 for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { 656 if (skb_shinfo(skb)->frags[i].size <= eat) { 657 put_page(skb_shinfo(skb)->frags[i].page); 658 eat -= skb_shinfo(skb)->frags[i].size; 659 } else { 660 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 661 if (eat) { 662 skb_shinfo(skb)->frags[k].page_offset += eat; 663 skb_shinfo(skb)->frags[k].size -= eat; 664 eat = 0; 665 } 666 k++; 667 } 668 } 669 skb_shinfo(skb)->nr_frags = k; 670 671 skb->tail = skb->data; 672 skb->data_len -= len; 673 skb->len = skb->data_len; 674 } 675 676 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 677 { 678 if (skb_cloned(skb) && 679 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 680 return -ENOMEM; 681 682 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 683 if (unlikely(len < skb_headlen(skb))) 684 __skb_pull(skb, len); 685 else 686 __pskb_trim_head(skb, len - skb_headlen(skb)); 687 688 TCP_SKB_CB(skb)->seq += len; 689 skb->ip_summed = CHECKSUM_HW; 690 691 skb->truesize -= len; 692 sk->sk_wmem_queued -= len; 693 sk->sk_forward_alloc += len; 694 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 695 696 /* Any change of skb->len requires recalculation of tso 697 * factor and mss. 698 */ 699 if (tcp_skb_pcount(skb) > 1) 700 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 701 702 return 0; 703 } 704 705 /* Not accounting for SACKs here. */ 706 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 707 { 708 struct tcp_sock *tp = tcp_sk(sk); 709 struct inet_connection_sock *icsk = inet_csk(sk); 710 int mss_now; 711 712 /* Calculate base mss without TCP options: 713 It is MMS_S - sizeof(tcphdr) of rfc1122 714 */ 715 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 716 717 /* Clamp it (mss_clamp does not include tcp options) */ 718 if (mss_now > tp->rx_opt.mss_clamp) 719 mss_now = tp->rx_opt.mss_clamp; 720 721 /* Now subtract optional transport overhead */ 722 mss_now -= icsk->icsk_ext_hdr_len; 723 724 /* Then reserve room for full set of TCP options and 8 bytes of data */ 725 if (mss_now < 48) 726 mss_now = 48; 727 728 /* Now subtract TCP options size, not including SACKs */ 729 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 730 731 return mss_now; 732 } 733 734 /* Inverse of above */ 735 int tcp_mss_to_mtu(struct sock *sk, int mss) 736 { 737 struct tcp_sock *tp = tcp_sk(sk); 738 struct inet_connection_sock *icsk = inet_csk(sk); 739 int mtu; 740 741 mtu = mss + 742 tp->tcp_header_len + 743 icsk->icsk_ext_hdr_len + 744 icsk->icsk_af_ops->net_header_len; 745 746 return mtu; 747 } 748 749 void tcp_mtup_init(struct sock *sk) 750 { 751 struct tcp_sock *tp = tcp_sk(sk); 752 struct inet_connection_sock *icsk = inet_csk(sk); 753 754 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 755 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 756 icsk->icsk_af_ops->net_header_len; 757 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 758 icsk->icsk_mtup.probe_size = 0; 759 } 760 761 /* This function synchronize snd mss to current pmtu/exthdr set. 762 763 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 764 for TCP options, but includes only bare TCP header. 765 766 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 767 It is minimum of user_mss and mss received with SYN. 768 It also does not include TCP options. 769 770 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 771 772 tp->mss_cache is current effective sending mss, including 773 all tcp options except for SACKs. It is evaluated, 774 taking into account current pmtu, but never exceeds 775 tp->rx_opt.mss_clamp. 776 777 NOTE1. rfc1122 clearly states that advertised MSS 778 DOES NOT include either tcp or ip options. 779 780 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 781 are READ ONLY outside this function. --ANK (980731) 782 */ 783 784 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 785 { 786 struct tcp_sock *tp = tcp_sk(sk); 787 struct inet_connection_sock *icsk = inet_csk(sk); 788 int mss_now; 789 790 if (icsk->icsk_mtup.search_high > pmtu) 791 icsk->icsk_mtup.search_high = pmtu; 792 793 mss_now = tcp_mtu_to_mss(sk, pmtu); 794 795 /* Bound mss with half of window */ 796 if (tp->max_window && mss_now > (tp->max_window>>1)) 797 mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); 798 799 /* And store cached results */ 800 icsk->icsk_pmtu_cookie = pmtu; 801 if (icsk->icsk_mtup.enabled) 802 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 803 tp->mss_cache = mss_now; 804 805 return mss_now; 806 } 807 808 /* Compute the current effective MSS, taking SACKs and IP options, 809 * and even PMTU discovery events into account. 810 * 811 * LARGESEND note: !urg_mode is overkill, only frames up to snd_up 812 * cannot be large. However, taking into account rare use of URG, this 813 * is not a big flaw. 814 */ 815 unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 816 { 817 struct tcp_sock *tp = tcp_sk(sk); 818 struct dst_entry *dst = __sk_dst_get(sk); 819 u32 mss_now; 820 u16 xmit_size_goal; 821 int doing_tso = 0; 822 823 mss_now = tp->mss_cache; 824 825 if (large_allowed && 826 (sk->sk_route_caps & NETIF_F_TSO) && 827 !tp->urg_mode) 828 doing_tso = 1; 829 830 if (dst) { 831 u32 mtu = dst_mtu(dst); 832 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 833 mss_now = tcp_sync_mss(sk, mtu); 834 } 835 836 if (tp->rx_opt.eff_sacks) 837 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + 838 (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); 839 840 xmit_size_goal = mss_now; 841 842 if (doing_tso) { 843 xmit_size_goal = (65535 - 844 inet_csk(sk)->icsk_af_ops->net_header_len - 845 inet_csk(sk)->icsk_ext_hdr_len - 846 tp->tcp_header_len); 847 848 if (tp->max_window && 849 (xmit_size_goal > (tp->max_window >> 1))) 850 xmit_size_goal = max((tp->max_window >> 1), 851 68U - tp->tcp_header_len); 852 853 xmit_size_goal -= (xmit_size_goal % mss_now); 854 } 855 tp->xmit_size_goal = xmit_size_goal; 856 857 return mss_now; 858 } 859 860 /* Congestion window validation. (RFC2861) */ 861 862 static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) 863 { 864 __u32 packets_out = tp->packets_out; 865 866 if (packets_out >= tp->snd_cwnd) { 867 /* Network is feed fully. */ 868 tp->snd_cwnd_used = 0; 869 tp->snd_cwnd_stamp = tcp_time_stamp; 870 } else { 871 /* Network starves. */ 872 if (tp->packets_out > tp->snd_cwnd_used) 873 tp->snd_cwnd_used = tp->packets_out; 874 875 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 876 tcp_cwnd_application_limited(sk); 877 } 878 } 879 880 static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) 881 { 882 u32 window, cwnd_len; 883 884 window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); 885 cwnd_len = mss_now * cwnd; 886 return min(window, cwnd_len); 887 } 888 889 /* Can at least one segment of SKB be sent right now, according to the 890 * congestion window rules? If so, return how many segments are allowed. 891 */ 892 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 893 { 894 u32 in_flight, cwnd; 895 896 /* Don't be strict about the congestion window for the final FIN. */ 897 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 898 return 1; 899 900 in_flight = tcp_packets_in_flight(tp); 901 cwnd = tp->snd_cwnd; 902 if (in_flight < cwnd) 903 return (cwnd - in_flight); 904 905 return 0; 906 } 907 908 /* This must be invoked the first time we consider transmitting 909 * SKB onto the wire. 910 */ 911 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) 912 { 913 int tso_segs = tcp_skb_pcount(skb); 914 915 if (!tso_segs || 916 (tso_segs > 1 && 917 skb_shinfo(skb)->tso_size != mss_now)) { 918 tcp_set_skb_tso_segs(sk, skb, mss_now); 919 tso_segs = tcp_skb_pcount(skb); 920 } 921 return tso_segs; 922 } 923 924 static inline int tcp_minshall_check(const struct tcp_sock *tp) 925 { 926 return after(tp->snd_sml,tp->snd_una) && 927 !after(tp->snd_sml, tp->snd_nxt); 928 } 929 930 /* Return 0, if packet can be sent now without violation Nagle's rules: 931 * 1. It is full sized. 932 * 2. Or it contains FIN. (already checked by caller) 933 * 3. Or TCP_NODELAY was set. 934 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 935 * With Minshall's modification: all sent small packets are ACKed. 936 */ 937 938 static inline int tcp_nagle_check(const struct tcp_sock *tp, 939 const struct sk_buff *skb, 940 unsigned mss_now, int nonagle) 941 { 942 return (skb->len < mss_now && 943 ((nonagle&TCP_NAGLE_CORK) || 944 (!nonagle && 945 tp->packets_out && 946 tcp_minshall_check(tp)))); 947 } 948 949 /* Return non-zero if the Nagle test allows this packet to be 950 * sent now. 951 */ 952 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 953 unsigned int cur_mss, int nonagle) 954 { 955 /* Nagle rule does not apply to frames, which sit in the middle of the 956 * write_queue (they have no chances to get new data). 957 * 958 * This is implemented in the callers, where they modify the 'nonagle' 959 * argument based upon the location of SKB in the send queue. 960 */ 961 if (nonagle & TCP_NAGLE_PUSH) 962 return 1; 963 964 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 965 if (tp->urg_mode || 966 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 967 return 1; 968 969 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 970 return 1; 971 972 return 0; 973 } 974 975 /* Does at least the first segment of SKB fit into the send window? */ 976 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 977 { 978 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 979 980 if (skb->len > cur_mss) 981 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 982 983 return !after(end_seq, tp->snd_una + tp->snd_wnd); 984 } 985 986 /* This checks if the data bearing packet SKB (usually sk->sk_send_head) 987 * should be put on the wire right now. If so, it returns the number of 988 * packets allowed by the congestion window. 989 */ 990 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 991 unsigned int cur_mss, int nonagle) 992 { 993 struct tcp_sock *tp = tcp_sk(sk); 994 unsigned int cwnd_quota; 995 996 tcp_init_tso_segs(sk, skb, cur_mss); 997 998 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 999 return 0; 1000 1001 cwnd_quota = tcp_cwnd_test(tp, skb); 1002 if (cwnd_quota && 1003 !tcp_snd_wnd_test(tp, skb, cur_mss)) 1004 cwnd_quota = 0; 1005 1006 return cwnd_quota; 1007 } 1008 1009 static inline int tcp_skb_is_last(const struct sock *sk, 1010 const struct sk_buff *skb) 1011 { 1012 return skb->next == (struct sk_buff *)&sk->sk_write_queue; 1013 } 1014 1015 int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) 1016 { 1017 struct sk_buff *skb = sk->sk_send_head; 1018 1019 return (skb && 1020 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1021 (tcp_skb_is_last(sk, skb) ? 1022 TCP_NAGLE_PUSH : 1023 tp->nonagle))); 1024 } 1025 1026 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1027 * which is put after SKB on the list. It is very much like 1028 * tcp_fragment() except that it may make several kinds of assumptions 1029 * in order to speed up the splitting operation. In particular, we 1030 * know that all the data is in scatter-gather pages, and that the 1031 * packet has never been sent out before (and thus is not cloned). 1032 */ 1033 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) 1034 { 1035 struct sk_buff *buff; 1036 int nlen = skb->len - len; 1037 u16 flags; 1038 1039 /* All of a TSO frame must be composed of paged data. */ 1040 if (skb->len != skb->data_len) 1041 return tcp_fragment(sk, skb, len, mss_now); 1042 1043 buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); 1044 if (unlikely(buff == NULL)) 1045 return -ENOMEM; 1046 1047 sk_charge_skb(sk, buff); 1048 buff->truesize += nlen; 1049 skb->truesize -= nlen; 1050 1051 /* Correct the sequence numbers. */ 1052 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1053 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1054 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1055 1056 /* PSH and FIN should only be set in the second packet. */ 1057 flags = TCP_SKB_CB(skb)->flags; 1058 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1059 TCP_SKB_CB(buff)->flags = flags; 1060 1061 /* This packet was never sent out yet, so no SACK bits. */ 1062 TCP_SKB_CB(buff)->sacked = 0; 1063 1064 buff->ip_summed = skb->ip_summed = CHECKSUM_HW; 1065 skb_split(skb, buff, len); 1066 1067 /* Fix up tso_factor for both original and new SKB. */ 1068 tcp_set_skb_tso_segs(sk, skb, mss_now); 1069 tcp_set_skb_tso_segs(sk, buff, mss_now); 1070 1071 /* Link BUFF into the send queue. */ 1072 skb_header_release(buff); 1073 __skb_append(skb, buff, &sk->sk_write_queue); 1074 1075 return 0; 1076 } 1077 1078 /* Try to defer sending, if possible, in order to minimize the amount 1079 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1080 * 1081 * This algorithm is from John Heffner. 1082 */ 1083 static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 1084 { 1085 const struct inet_connection_sock *icsk = inet_csk(sk); 1086 u32 send_win, cong_win, limit, in_flight; 1087 1088 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1089 return 0; 1090 1091 if (icsk->icsk_ca_state != TCP_CA_Open) 1092 return 0; 1093 1094 in_flight = tcp_packets_in_flight(tp); 1095 1096 BUG_ON(tcp_skb_pcount(skb) <= 1 || 1097 (tp->snd_cwnd <= in_flight)); 1098 1099 send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; 1100 1101 /* From in_flight test above, we know that cwnd > in_flight. */ 1102 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1103 1104 limit = min(send_win, cong_win); 1105 1106 /* If a full-sized TSO skb can be sent, do it. */ 1107 if (limit >= 65536) 1108 return 0; 1109 1110 if (sysctl_tcp_tso_win_divisor) { 1111 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1112 1113 /* If at least some fraction of a window is available, 1114 * just use it. 1115 */ 1116 chunk /= sysctl_tcp_tso_win_divisor; 1117 if (limit >= chunk) 1118 return 0; 1119 } else { 1120 /* Different approach, try not to defer past a single 1121 * ACK. Receiver should ACK every other full sized 1122 * frame, so if we have space for more than 3 frames 1123 * then send now. 1124 */ 1125 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1126 return 0; 1127 } 1128 1129 /* Ok, it looks like it is advisable to defer. */ 1130 return 1; 1131 } 1132 1133 /* Create a new MTU probe if we are ready. 1134 * Returns 0 if we should wait to probe (no cwnd available), 1135 * 1 if a probe was sent, 1136 * -1 otherwise */ 1137 static int tcp_mtu_probe(struct sock *sk) 1138 { 1139 struct tcp_sock *tp = tcp_sk(sk); 1140 struct inet_connection_sock *icsk = inet_csk(sk); 1141 struct sk_buff *skb, *nskb, *next; 1142 int len; 1143 int probe_size; 1144 unsigned int pif; 1145 int copy; 1146 int mss_now; 1147 1148 /* Not currently probing/verifying, 1149 * not in recovery, 1150 * have enough cwnd, and 1151 * not SACKing (the variable headers throw things off) */ 1152 if (!icsk->icsk_mtup.enabled || 1153 icsk->icsk_mtup.probe_size || 1154 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1155 tp->snd_cwnd < 11 || 1156 tp->rx_opt.eff_sacks) 1157 return -1; 1158 1159 /* Very simple search strategy: just double the MSS. */ 1160 mss_now = tcp_current_mss(sk, 0); 1161 probe_size = 2*tp->mss_cache; 1162 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1163 /* TODO: set timer for probe_converge_event */ 1164 return -1; 1165 } 1166 1167 /* Have enough data in the send queue to probe? */ 1168 len = 0; 1169 if ((skb = sk->sk_send_head) == NULL) 1170 return -1; 1171 while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) 1172 skb = skb->next; 1173 if (len < probe_size) 1174 return -1; 1175 1176 /* Receive window check. */ 1177 if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { 1178 if (tp->snd_wnd < probe_size) 1179 return -1; 1180 else 1181 return 0; 1182 } 1183 1184 /* Do we need to wait to drain cwnd? */ 1185 pif = tcp_packets_in_flight(tp); 1186 if (pif + 2 > tp->snd_cwnd) { 1187 /* With no packets in flight, don't stall. */ 1188 if (pif == 0) 1189 return -1; 1190 else 1191 return 0; 1192 } 1193 1194 /* We're allowed to probe. Build it now. */ 1195 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1196 return -1; 1197 sk_charge_skb(sk, nskb); 1198 1199 skb = sk->sk_send_head; 1200 __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); 1201 sk->sk_send_head = nskb; 1202 1203 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1204 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1205 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1206 TCP_SKB_CB(nskb)->sacked = 0; 1207 nskb->csum = 0; 1208 if (skb->ip_summed == CHECKSUM_HW) 1209 nskb->ip_summed = CHECKSUM_HW; 1210 1211 len = 0; 1212 while (len < probe_size) { 1213 next = skb->next; 1214 1215 copy = min_t(int, skb->len, probe_size - len); 1216 if (nskb->ip_summed) 1217 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1218 else 1219 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1220 skb_put(nskb, copy), copy, nskb->csum); 1221 1222 if (skb->len <= copy) { 1223 /* We've eaten all the data from this skb. 1224 * Throw it away. */ 1225 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1226 __skb_unlink(skb, &sk->sk_write_queue); 1227 sk_stream_free_skb(sk, skb); 1228 } else { 1229 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1230 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1231 if (!skb_shinfo(skb)->nr_frags) { 1232 skb_pull(skb, copy); 1233 if (skb->ip_summed != CHECKSUM_HW) 1234 skb->csum = csum_partial(skb->data, skb->len, 0); 1235 } else { 1236 __pskb_trim_head(skb, copy); 1237 tcp_set_skb_tso_segs(sk, skb, mss_now); 1238 } 1239 TCP_SKB_CB(skb)->seq += copy; 1240 } 1241 1242 len += copy; 1243 skb = next; 1244 } 1245 tcp_init_tso_segs(sk, nskb, nskb->len); 1246 1247 /* We're ready to send. If this fails, the probe will 1248 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1249 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1250 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1251 /* Decrement cwnd here because we are sending 1252 * effectively two packets. */ 1253 tp->snd_cwnd--; 1254 update_send_head(sk, tp, nskb); 1255 1256 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1257 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1258 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1259 1260 return 1; 1261 } 1262 1263 return -1; 1264 } 1265 1266 1267 /* This routine writes packets to the network. It advances the 1268 * send_head. This happens as incoming acks open up the remote 1269 * window for us. 1270 * 1271 * Returns 1, if no segments are in flight and we have queued segments, but 1272 * cannot send anything now because of SWS or another problem. 1273 */ 1274 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) 1275 { 1276 struct tcp_sock *tp = tcp_sk(sk); 1277 struct sk_buff *skb; 1278 unsigned int tso_segs, sent_pkts; 1279 int cwnd_quota; 1280 int result; 1281 1282 /* If we are closed, the bytes will have to remain here. 1283 * In time closedown will finish, we empty the write queue and all 1284 * will be happy. 1285 */ 1286 if (unlikely(sk->sk_state == TCP_CLOSE)) 1287 return 0; 1288 1289 sent_pkts = 0; 1290 1291 /* Do MTU probing. */ 1292 if ((result = tcp_mtu_probe(sk)) == 0) { 1293 return 0; 1294 } else if (result > 0) { 1295 sent_pkts = 1; 1296 } 1297 1298 while ((skb = sk->sk_send_head)) { 1299 unsigned int limit; 1300 1301 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1302 BUG_ON(!tso_segs); 1303 1304 cwnd_quota = tcp_cwnd_test(tp, skb); 1305 if (!cwnd_quota) 1306 break; 1307 1308 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1309 break; 1310 1311 if (tso_segs == 1) { 1312 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1313 (tcp_skb_is_last(sk, skb) ? 1314 nonagle : TCP_NAGLE_PUSH)))) 1315 break; 1316 } else { 1317 if (tcp_tso_should_defer(sk, tp, skb)) 1318 break; 1319 } 1320 1321 limit = mss_now; 1322 if (tso_segs > 1) { 1323 limit = tcp_window_allows(tp, skb, 1324 mss_now, cwnd_quota); 1325 1326 if (skb->len < limit) { 1327 unsigned int trim = skb->len % mss_now; 1328 1329 if (trim) 1330 limit = skb->len - trim; 1331 } 1332 } 1333 1334 if (skb->len > limit && 1335 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1336 break; 1337 1338 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1339 1340 if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC))) 1341 break; 1342 1343 /* Advance the send_head. This one is sent out. 1344 * This call will increment packets_out. 1345 */ 1346 update_send_head(sk, tp, skb); 1347 1348 tcp_minshall_update(tp, mss_now, skb); 1349 sent_pkts++; 1350 } 1351 1352 if (likely(sent_pkts)) { 1353 tcp_cwnd_validate(sk, tp); 1354 return 0; 1355 } 1356 return !tp->packets_out && sk->sk_send_head; 1357 } 1358 1359 /* Push out any pending frames which were held back due to 1360 * TCP_CORK or attempt at coalescing tiny packets. 1361 * The socket must be locked by the caller. 1362 */ 1363 void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, 1364 unsigned int cur_mss, int nonagle) 1365 { 1366 struct sk_buff *skb = sk->sk_send_head; 1367 1368 if (skb) { 1369 if (tcp_write_xmit(sk, cur_mss, nonagle)) 1370 tcp_check_probe_timer(sk, tp); 1371 } 1372 } 1373 1374 /* Send _single_ skb sitting at the send head. This function requires 1375 * true push pending frames to setup probe timer etc. 1376 */ 1377 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1378 { 1379 struct tcp_sock *tp = tcp_sk(sk); 1380 struct sk_buff *skb = sk->sk_send_head; 1381 unsigned int tso_segs, cwnd_quota; 1382 1383 BUG_ON(!skb || skb->len < mss_now); 1384 1385 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1386 cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); 1387 1388 if (likely(cwnd_quota)) { 1389 unsigned int limit; 1390 1391 BUG_ON(!tso_segs); 1392 1393 limit = mss_now; 1394 if (tso_segs > 1) { 1395 limit = tcp_window_allows(tp, skb, 1396 mss_now, cwnd_quota); 1397 1398 if (skb->len < limit) { 1399 unsigned int trim = skb->len % mss_now; 1400 1401 if (trim) 1402 limit = skb->len - trim; 1403 } 1404 } 1405 1406 if (skb->len > limit && 1407 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1408 return; 1409 1410 /* Send it out now. */ 1411 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1412 1413 if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { 1414 update_send_head(sk, tp, skb); 1415 tcp_cwnd_validate(sk, tp); 1416 return; 1417 } 1418 } 1419 } 1420 1421 /* This function returns the amount that we can raise the 1422 * usable window based on the following constraints 1423 * 1424 * 1. The window can never be shrunk once it is offered (RFC 793) 1425 * 2. We limit memory per socket 1426 * 1427 * RFC 1122: 1428 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1429 * RECV.NEXT + RCV.WIN fixed until: 1430 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1431 * 1432 * i.e. don't raise the right edge of the window until you can raise 1433 * it at least MSS bytes. 1434 * 1435 * Unfortunately, the recommended algorithm breaks header prediction, 1436 * since header prediction assumes th->window stays fixed. 1437 * 1438 * Strictly speaking, keeping th->window fixed violates the receiver 1439 * side SWS prevention criteria. The problem is that under this rule 1440 * a stream of single byte packets will cause the right side of the 1441 * window to always advance by a single byte. 1442 * 1443 * Of course, if the sender implements sender side SWS prevention 1444 * then this will not be a problem. 1445 * 1446 * BSD seems to make the following compromise: 1447 * 1448 * If the free space is less than the 1/4 of the maximum 1449 * space available and the free space is less than 1/2 mss, 1450 * then set the window to 0. 1451 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1452 * Otherwise, just prevent the window from shrinking 1453 * and from being larger than the largest representable value. 1454 * 1455 * This prevents incremental opening of the window in the regime 1456 * where TCP is limited by the speed of the reader side taking 1457 * data out of the TCP receive queue. It does nothing about 1458 * those cases where the window is constrained on the sender side 1459 * because the pipeline is full. 1460 * 1461 * BSD also seems to "accidentally" limit itself to windows that are a 1462 * multiple of MSS, at least until the free space gets quite small. 1463 * This would appear to be a side effect of the mbuf implementation. 1464 * Combining these two algorithms results in the observed behavior 1465 * of having a fixed window size at almost all times. 1466 * 1467 * Below we obtain similar behavior by forcing the offered window to 1468 * a multiple of the mss when it is feasible to do so. 1469 * 1470 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1471 * Regular options like TIMESTAMP are taken into account. 1472 */ 1473 u32 __tcp_select_window(struct sock *sk) 1474 { 1475 struct inet_connection_sock *icsk = inet_csk(sk); 1476 struct tcp_sock *tp = tcp_sk(sk); 1477 /* MSS for the peer's data. Previous versions used mss_clamp 1478 * here. I don't know if the value based on our guesses 1479 * of peer's MSS is better for the performance. It's more correct 1480 * but may be worse for the performance because of rcv_mss 1481 * fluctuations. --SAW 1998/11/1 1482 */ 1483 int mss = icsk->icsk_ack.rcv_mss; 1484 int free_space = tcp_space(sk); 1485 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1486 int window; 1487 1488 if (mss > full_space) 1489 mss = full_space; 1490 1491 if (free_space < full_space/2) { 1492 icsk->icsk_ack.quick = 0; 1493 1494 if (tcp_memory_pressure) 1495 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); 1496 1497 if (free_space < mss) 1498 return 0; 1499 } 1500 1501 if (free_space > tp->rcv_ssthresh) 1502 free_space = tp->rcv_ssthresh; 1503 1504 /* Don't do rounding if we are using window scaling, since the 1505 * scaled window will not line up with the MSS boundary anyway. 1506 */ 1507 window = tp->rcv_wnd; 1508 if (tp->rx_opt.rcv_wscale) { 1509 window = free_space; 1510 1511 /* Advertise enough space so that it won't get scaled away. 1512 * Import case: prevent zero window announcement if 1513 * 1<<rcv_wscale > mss. 1514 */ 1515 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1516 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1517 << tp->rx_opt.rcv_wscale); 1518 } else { 1519 /* Get the largest window that is a nice multiple of mss. 1520 * Window clamp already applied above. 1521 * If our current window offering is within 1 mss of the 1522 * free space we just keep it. This prevents the divide 1523 * and multiply from happening most of the time. 1524 * We also don't do any window rounding when the free space 1525 * is too small. 1526 */ 1527 if (window <= free_space - mss || window > free_space) 1528 window = (free_space/mss)*mss; 1529 } 1530 1531 return window; 1532 } 1533 1534 /* Attempt to collapse two adjacent SKB's during retransmission. */ 1535 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) 1536 { 1537 struct tcp_sock *tp = tcp_sk(sk); 1538 struct sk_buff *next_skb = skb->next; 1539 1540 /* The first test we must make is that neither of these two 1541 * SKB's are still referenced by someone else. 1542 */ 1543 if (!skb_cloned(skb) && !skb_cloned(next_skb)) { 1544 int skb_size = skb->len, next_skb_size = next_skb->len; 1545 u16 flags = TCP_SKB_CB(skb)->flags; 1546 1547 /* Also punt if next skb has been SACK'd. */ 1548 if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) 1549 return; 1550 1551 /* Next skb is out of window. */ 1552 if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) 1553 return; 1554 1555 /* Punt if not enough space exists in the first SKB for 1556 * the data in the second, or the total combined payload 1557 * would exceed the MSS. 1558 */ 1559 if ((next_skb_size > skb_tailroom(skb)) || 1560 ((skb_size + next_skb_size) > mss_now)) 1561 return; 1562 1563 BUG_ON(tcp_skb_pcount(skb) != 1 || 1564 tcp_skb_pcount(next_skb) != 1); 1565 1566 /* changing transmit queue under us so clear hints */ 1567 clear_all_retrans_hints(tp); 1568 1569 /* Ok. We will be able to collapse the packet. */ 1570 __skb_unlink(next_skb, &sk->sk_write_queue); 1571 1572 memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); 1573 1574 if (next_skb->ip_summed == CHECKSUM_HW) 1575 skb->ip_summed = CHECKSUM_HW; 1576 1577 if (skb->ip_summed != CHECKSUM_HW) 1578 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1579 1580 /* Update sequence range on original skb. */ 1581 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1582 1583 /* Merge over control information. */ 1584 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 1585 TCP_SKB_CB(skb)->flags = flags; 1586 1587 /* All done, get rid of second SKB and account for it so 1588 * packet counting does not break. 1589 */ 1590 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); 1591 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) 1592 tp->retrans_out -= tcp_skb_pcount(next_skb); 1593 if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) { 1594 tp->lost_out -= tcp_skb_pcount(next_skb); 1595 tp->left_out -= tcp_skb_pcount(next_skb); 1596 } 1597 /* Reno case is special. Sigh... */ 1598 if (!tp->rx_opt.sack_ok && tp->sacked_out) { 1599 tcp_dec_pcount_approx(&tp->sacked_out, next_skb); 1600 tp->left_out -= tcp_skb_pcount(next_skb); 1601 } 1602 1603 /* Not quite right: it can be > snd.fack, but 1604 * it is better to underestimate fackets. 1605 */ 1606 tcp_dec_pcount_approx(&tp->fackets_out, next_skb); 1607 tcp_packets_out_dec(tp, next_skb); 1608 sk_stream_free_skb(sk, next_skb); 1609 } 1610 } 1611 1612 /* Do a simple retransmit without using the backoff mechanisms in 1613 * tcp_timer. This is used for path mtu discovery. 1614 * The socket is already locked here. 1615 */ 1616 void tcp_simple_retransmit(struct sock *sk) 1617 { 1618 const struct inet_connection_sock *icsk = inet_csk(sk); 1619 struct tcp_sock *tp = tcp_sk(sk); 1620 struct sk_buff *skb; 1621 unsigned int mss = tcp_current_mss(sk, 0); 1622 int lost = 0; 1623 1624 sk_stream_for_retrans_queue(skb, sk) { 1625 if (skb->len > mss && 1626 !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { 1627 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1628 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1629 tp->retrans_out -= tcp_skb_pcount(skb); 1630 } 1631 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { 1632 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1633 tp->lost_out += tcp_skb_pcount(skb); 1634 lost = 1; 1635 } 1636 } 1637 } 1638 1639 clear_all_retrans_hints(tp); 1640 1641 if (!lost) 1642 return; 1643 1644 tcp_sync_left_out(tp); 1645 1646 /* Don't muck with the congestion window here. 1647 * Reason is that we do not increase amount of _data_ 1648 * in network, but units changed and effective 1649 * cwnd/ssthresh really reduced now. 1650 */ 1651 if (icsk->icsk_ca_state != TCP_CA_Loss) { 1652 tp->high_seq = tp->snd_nxt; 1653 tp->snd_ssthresh = tcp_current_ssthresh(sk); 1654 tp->prior_ssthresh = 0; 1655 tp->undo_marker = 0; 1656 tcp_set_ca_state(sk, TCP_CA_Loss); 1657 } 1658 tcp_xmit_retransmit_queue(sk); 1659 } 1660 1661 /* This retransmits one SKB. Policy decisions and retransmit queue 1662 * state updates are done by the caller. Returns non-zero if an 1663 * error occurred which prevented the send. 1664 */ 1665 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 1666 { 1667 struct tcp_sock *tp = tcp_sk(sk); 1668 struct inet_connection_sock *icsk = inet_csk(sk); 1669 unsigned int cur_mss = tcp_current_mss(sk, 0); 1670 int err; 1671 1672 /* Inconslusive MTU probe */ 1673 if (icsk->icsk_mtup.probe_size) { 1674 icsk->icsk_mtup.probe_size = 0; 1675 } 1676 1677 /* Do not sent more than we queued. 1/4 is reserved for possible 1678 * copying overhead: fragmentation, tunneling, mangling etc. 1679 */ 1680 if (atomic_read(&sk->sk_wmem_alloc) > 1681 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1682 return -EAGAIN; 1683 1684 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1685 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1686 BUG(); 1687 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1688 return -ENOMEM; 1689 } 1690 1691 /* If receiver has shrunk his window, and skb is out of 1692 * new window, do not retransmit it. The exception is the 1693 * case, when window is shrunk to zero. In this case 1694 * our retransmit serves as a zero window probe. 1695 */ 1696 if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) 1697 && TCP_SKB_CB(skb)->seq != tp->snd_una) 1698 return -EAGAIN; 1699 1700 if (skb->len > cur_mss) { 1701 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1702 return -ENOMEM; /* We'll try again later. */ 1703 } 1704 1705 /* Collapse two adjacent packets if worthwhile and we can. */ 1706 if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1707 (skb->len < (cur_mss >> 1)) && 1708 (skb->next != sk->sk_send_head) && 1709 (skb->next != (struct sk_buff *)&sk->sk_write_queue) && 1710 (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && 1711 (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && 1712 (sysctl_tcp_retrans_collapse != 0)) 1713 tcp_retrans_try_collapse(sk, skb, cur_mss); 1714 1715 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1716 return -EHOSTUNREACH; /* Routing failure or similar. */ 1717 1718 /* Some Solaris stacks overoptimize and ignore the FIN on a 1719 * retransmit when old data is attached. So strip it off 1720 * since it is cheap to do so and saves bytes on the network. 1721 */ 1722 if(skb->len > 0 && 1723 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1724 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1725 if (!pskb_trim(skb, 0)) { 1726 TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; 1727 skb_shinfo(skb)->tso_segs = 1; 1728 skb_shinfo(skb)->tso_size = 0; 1729 skb->ip_summed = CHECKSUM_NONE; 1730 skb->csum = 0; 1731 } 1732 } 1733 1734 /* Make a copy, if the first transmission SKB clone we made 1735 * is still in somebody's hands, else make a clone. 1736 */ 1737 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1738 1739 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 1740 1741 if (err == 0) { 1742 /* Update global TCP statistics. */ 1743 TCP_INC_STATS(TCP_MIB_RETRANSSEGS); 1744 1745 tp->total_retrans++; 1746 1747 #if FASTRETRANS_DEBUG > 0 1748 if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { 1749 if (net_ratelimit()) 1750 printk(KERN_DEBUG "retrans_out leaked.\n"); 1751 } 1752 #endif 1753 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 1754 tp->retrans_out += tcp_skb_pcount(skb); 1755 1756 /* Save stamp of the first retransmit. */ 1757 if (!tp->retrans_stamp) 1758 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 1759 1760 tp->undo_retrans++; 1761 1762 /* snd_nxt is stored to detect loss of retransmitted segment, 1763 * see tcp_input.c tcp_sacktag_write_queue(). 1764 */ 1765 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 1766 } 1767 return err; 1768 } 1769 1770 /* This gets called after a retransmit timeout, and the initially 1771 * retransmitted data is acknowledged. It tries to continue 1772 * resending the rest of the retransmit queue, until either 1773 * we've sent it all or the congestion window limit is reached. 1774 * If doing SACK, the first ACK which comes back for a timeout 1775 * based retransmit packet might feed us FACK information again. 1776 * If so, we use it to avoid unnecessarily retransmissions. 1777 */ 1778 void tcp_xmit_retransmit_queue(struct sock *sk) 1779 { 1780 const struct inet_connection_sock *icsk = inet_csk(sk); 1781 struct tcp_sock *tp = tcp_sk(sk); 1782 struct sk_buff *skb; 1783 int packet_cnt; 1784 1785 if (tp->retransmit_skb_hint) { 1786 skb = tp->retransmit_skb_hint; 1787 packet_cnt = tp->retransmit_cnt_hint; 1788 }else{ 1789 skb = sk->sk_write_queue.next; 1790 packet_cnt = 0; 1791 } 1792 1793 /* First pass: retransmit lost packets. */ 1794 if (tp->lost_out) { 1795 sk_stream_for_retrans_queue_from(skb, sk) { 1796 __u8 sacked = TCP_SKB_CB(skb)->sacked; 1797 1798 /* we could do better than to assign each time */ 1799 tp->retransmit_skb_hint = skb; 1800 tp->retransmit_cnt_hint = packet_cnt; 1801 1802 /* Assume this retransmit will generate 1803 * only one packet for congestion window 1804 * calculation purposes. This works because 1805 * tcp_retransmit_skb() will chop up the 1806 * packet to be MSS sized and all the 1807 * packet counting works out. 1808 */ 1809 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 1810 return; 1811 1812 if (sacked & TCPCB_LOST) { 1813 if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 1814 if (tcp_retransmit_skb(sk, skb)) { 1815 tp->retransmit_skb_hint = NULL; 1816 return; 1817 } 1818 if (icsk->icsk_ca_state != TCP_CA_Loss) 1819 NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); 1820 else 1821 NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); 1822 1823 if (skb == 1824 skb_peek(&sk->sk_write_queue)) 1825 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1826 inet_csk(sk)->icsk_rto, 1827 TCP_RTO_MAX); 1828 } 1829 1830 packet_cnt += tcp_skb_pcount(skb); 1831 if (packet_cnt >= tp->lost_out) 1832 break; 1833 } 1834 } 1835 } 1836 1837 /* OK, demanded retransmission is finished. */ 1838 1839 /* Forward retransmissions are possible only during Recovery. */ 1840 if (icsk->icsk_ca_state != TCP_CA_Recovery) 1841 return; 1842 1843 /* No forward retransmissions in Reno are possible. */ 1844 if (!tp->rx_opt.sack_ok) 1845 return; 1846 1847 /* Yeah, we have to make difficult choice between forward transmission 1848 * and retransmission... Both ways have their merits... 1849 * 1850 * For now we do not retransmit anything, while we have some new 1851 * segments to send. 1852 */ 1853 1854 if (tcp_may_send_now(sk, tp)) 1855 return; 1856 1857 if (tp->forward_skb_hint) { 1858 skb = tp->forward_skb_hint; 1859 packet_cnt = tp->forward_cnt_hint; 1860 } else{ 1861 skb = sk->sk_write_queue.next; 1862 packet_cnt = 0; 1863 } 1864 1865 sk_stream_for_retrans_queue_from(skb, sk) { 1866 tp->forward_cnt_hint = packet_cnt; 1867 tp->forward_skb_hint = skb; 1868 1869 /* Similar to the retransmit loop above we 1870 * can pretend that the retransmitted SKB 1871 * we send out here will be composed of one 1872 * real MSS sized packet because tcp_retransmit_skb() 1873 * will fragment it if necessary. 1874 */ 1875 if (++packet_cnt > tp->fackets_out) 1876 break; 1877 1878 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 1879 break; 1880 1881 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 1882 continue; 1883 1884 /* Ok, retransmit it. */ 1885 if (tcp_retransmit_skb(sk, skb)) { 1886 tp->forward_skb_hint = NULL; 1887 break; 1888 } 1889 1890 if (skb == skb_peek(&sk->sk_write_queue)) 1891 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1892 inet_csk(sk)->icsk_rto, 1893 TCP_RTO_MAX); 1894 1895 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 1896 } 1897 } 1898 1899 1900 /* Send a fin. The caller locks the socket for us. This cannot be 1901 * allowed to fail queueing a FIN frame under any circumstances. 1902 */ 1903 void tcp_send_fin(struct sock *sk) 1904 { 1905 struct tcp_sock *tp = tcp_sk(sk); 1906 struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); 1907 int mss_now; 1908 1909 /* Optimization, tack on the FIN if we have a queue of 1910 * unsent frames. But be careful about outgoing SACKS 1911 * and IP options. 1912 */ 1913 mss_now = tcp_current_mss(sk, 1); 1914 1915 if (sk->sk_send_head != NULL) { 1916 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 1917 TCP_SKB_CB(skb)->end_seq++; 1918 tp->write_seq++; 1919 } else { 1920 /* Socket is locked, keep trying until memory is available. */ 1921 for (;;) { 1922 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 1923 if (skb) 1924 break; 1925 yield(); 1926 } 1927 1928 /* Reserve space for headers and prepare control bits. */ 1929 skb_reserve(skb, MAX_TCP_HEADER); 1930 skb->csum = 0; 1931 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 1932 TCP_SKB_CB(skb)->sacked = 0; 1933 skb_shinfo(skb)->tso_segs = 1; 1934 skb_shinfo(skb)->tso_size = 0; 1935 1936 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 1937 TCP_SKB_CB(skb)->seq = tp->write_seq; 1938 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 1939 tcp_queue_skb(sk, skb); 1940 } 1941 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); 1942 } 1943 1944 /* We get here when a process closes a file descriptor (either due to 1945 * an explicit close() or as a byproduct of exit()'ing) and there 1946 * was unread data in the receive queue. This behavior is recommended 1947 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1948 */ 1949 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 1950 { 1951 struct tcp_sock *tp = tcp_sk(sk); 1952 struct sk_buff *skb; 1953 1954 /* NOTE: No TCP options attached and we never retransmit this. */ 1955 skb = alloc_skb(MAX_TCP_HEADER, priority); 1956 if (!skb) { 1957 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 1958 return; 1959 } 1960 1961 /* Reserve space for headers and prepare control bits. */ 1962 skb_reserve(skb, MAX_TCP_HEADER); 1963 skb->csum = 0; 1964 TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 1965 TCP_SKB_CB(skb)->sacked = 0; 1966 skb_shinfo(skb)->tso_segs = 1; 1967 skb_shinfo(skb)->tso_size = 0; 1968 1969 /* Send it off. */ 1970 TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); 1971 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 1972 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1973 if (tcp_transmit_skb(sk, skb, 0, priority)) 1974 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 1975 } 1976 1977 /* WARNING: This routine must only be called when we have already sent 1978 * a SYN packet that crossed the incoming SYN that caused this routine 1979 * to get called. If this assumption fails then the initial rcv_wnd 1980 * and rcv_wscale values will not be correct. 1981 */ 1982 int tcp_send_synack(struct sock *sk) 1983 { 1984 struct sk_buff* skb; 1985 1986 skb = skb_peek(&sk->sk_write_queue); 1987 if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { 1988 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 1989 return -EFAULT; 1990 } 1991 if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { 1992 if (skb_cloned(skb)) { 1993 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 1994 if (nskb == NULL) 1995 return -ENOMEM; 1996 __skb_unlink(skb, &sk->sk_write_queue); 1997 skb_header_release(nskb); 1998 __skb_queue_head(&sk->sk_write_queue, nskb); 1999 sk_stream_free_skb(sk, skb); 2000 sk_charge_skb(sk, nskb); 2001 skb = nskb; 2002 } 2003 2004 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2005 TCP_ECN_send_synack(tcp_sk(sk), skb); 2006 } 2007 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2008 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2009 } 2010 2011 /* 2012 * Prepare a SYN-ACK. 2013 */ 2014 struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2015 struct request_sock *req) 2016 { 2017 struct inet_request_sock *ireq = inet_rsk(req); 2018 struct tcp_sock *tp = tcp_sk(sk); 2019 struct tcphdr *th; 2020 int tcp_header_size; 2021 struct sk_buff *skb; 2022 2023 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2024 if (skb == NULL) 2025 return NULL; 2026 2027 /* Reserve space for headers. */ 2028 skb_reserve(skb, MAX_TCP_HEADER); 2029 2030 skb->dst = dst_clone(dst); 2031 2032 tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + 2033 (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + 2034 (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + 2035 /* SACK_PERM is in the place of NOP NOP of TS */ 2036 ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); 2037 skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); 2038 2039 memset(th, 0, sizeof(struct tcphdr)); 2040 th->syn = 1; 2041 th->ack = 1; 2042 if (dst->dev->features&NETIF_F_TSO) 2043 ireq->ecn_ok = 0; 2044 TCP_ECN_make_synack(req, th); 2045 th->source = inet_sk(sk)->sport; 2046 th->dest = ireq->rmt_port; 2047 TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; 2048 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; 2049 TCP_SKB_CB(skb)->sacked = 0; 2050 skb_shinfo(skb)->tso_segs = 1; 2051 skb_shinfo(skb)->tso_size = 0; 2052 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2053 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2054 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2055 __u8 rcv_wscale; 2056 /* Set this up on the first call only */ 2057 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2058 /* tcp_full_space because it is guaranteed to be the first packet */ 2059 tcp_select_initial_window(tcp_full_space(sk), 2060 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2061 &req->rcv_wnd, 2062 &req->window_clamp, 2063 ireq->wscale_ok, 2064 &rcv_wscale); 2065 ireq->rcv_wscale = rcv_wscale; 2066 } 2067 2068 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2069 th->window = htons(req->rcv_wnd); 2070 2071 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2072 tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, 2073 ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, 2074 TCP_SKB_CB(skb)->when, 2075 req->ts_recent); 2076 2077 skb->csum = 0; 2078 th->doff = (tcp_header_size >> 2); 2079 TCP_INC_STATS(TCP_MIB_OUTSEGS); 2080 return skb; 2081 } 2082 2083 /* 2084 * Do all connect socket setups that can be done AF independent. 2085 */ 2086 static void tcp_connect_init(struct sock *sk) 2087 { 2088 struct dst_entry *dst = __sk_dst_get(sk); 2089 struct tcp_sock *tp = tcp_sk(sk); 2090 __u8 rcv_wscale; 2091 2092 /* We'll fix this up when we get a response from the other end. 2093 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2094 */ 2095 tp->tcp_header_len = sizeof(struct tcphdr) + 2096 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2097 2098 /* If user gave his TCP_MAXSEG, record it to clamp */ 2099 if (tp->rx_opt.user_mss) 2100 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2101 tp->max_window = 0; 2102 tcp_mtup_init(sk); 2103 tcp_sync_mss(sk, dst_mtu(dst)); 2104 2105 if (!tp->window_clamp) 2106 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2107 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2108 tcp_initialize_rcv_mss(sk); 2109 2110 tcp_select_initial_window(tcp_full_space(sk), 2111 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2112 &tp->rcv_wnd, 2113 &tp->window_clamp, 2114 sysctl_tcp_window_scaling, 2115 &rcv_wscale); 2116 2117 tp->rx_opt.rcv_wscale = rcv_wscale; 2118 tp->rcv_ssthresh = tp->rcv_wnd; 2119 2120 sk->sk_err = 0; 2121 sock_reset_flag(sk, SOCK_DONE); 2122 tp->snd_wnd = 0; 2123 tcp_init_wl(tp, tp->write_seq, 0); 2124 tp->snd_una = tp->write_seq; 2125 tp->snd_sml = tp->write_seq; 2126 tp->rcv_nxt = 0; 2127 tp->rcv_wup = 0; 2128 tp->copied_seq = 0; 2129 2130 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2131 inet_csk(sk)->icsk_retransmits = 0; 2132 tcp_clear_retrans(tp); 2133 } 2134 2135 /* 2136 * Build a SYN and send it off. 2137 */ 2138 int tcp_connect(struct sock *sk) 2139 { 2140 struct tcp_sock *tp = tcp_sk(sk); 2141 struct sk_buff *buff; 2142 2143 tcp_connect_init(sk); 2144 2145 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2146 if (unlikely(buff == NULL)) 2147 return -ENOBUFS; 2148 2149 /* Reserve space for headers. */ 2150 skb_reserve(buff, MAX_TCP_HEADER); 2151 2152 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; 2153 TCP_ECN_send_syn(sk, tp, buff); 2154 TCP_SKB_CB(buff)->sacked = 0; 2155 skb_shinfo(buff)->tso_segs = 1; 2156 skb_shinfo(buff)->tso_size = 0; 2157 buff->csum = 0; 2158 TCP_SKB_CB(buff)->seq = tp->write_seq++; 2159 TCP_SKB_CB(buff)->end_seq = tp->write_seq; 2160 tp->snd_nxt = tp->write_seq; 2161 tp->pushed_seq = tp->write_seq; 2162 2163 /* Send it off. */ 2164 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2165 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2166 skb_header_release(buff); 2167 __skb_queue_tail(&sk->sk_write_queue, buff); 2168 sk_charge_skb(sk, buff); 2169 tp->packets_out += tcp_skb_pcount(buff); 2170 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2171 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 2172 2173 /* Timer for repeating the SYN until an answer. */ 2174 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2175 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2176 return 0; 2177 } 2178 2179 /* Send out a delayed ack, the caller does the policy checking 2180 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2181 * for details. 2182 */ 2183 void tcp_send_delayed_ack(struct sock *sk) 2184 { 2185 struct inet_connection_sock *icsk = inet_csk(sk); 2186 int ato = icsk->icsk_ack.ato; 2187 unsigned long timeout; 2188 2189 if (ato > TCP_DELACK_MIN) { 2190 const struct tcp_sock *tp = tcp_sk(sk); 2191 int max_ato = HZ/2; 2192 2193 if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2194 max_ato = TCP_DELACK_MAX; 2195 2196 /* Slow path, intersegment interval is "high". */ 2197 2198 /* If some rtt estimate is known, use it to bound delayed ack. 2199 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2200 * directly. 2201 */ 2202 if (tp->srtt) { 2203 int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); 2204 2205 if (rtt < max_ato) 2206 max_ato = rtt; 2207 } 2208 2209 ato = min(ato, max_ato); 2210 } 2211 2212 /* Stay within the limit we were given */ 2213 timeout = jiffies + ato; 2214 2215 /* Use new timeout only if there wasn't a older one earlier. */ 2216 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2217 /* If delack timer was blocked or is about to expire, 2218 * send ACK now. 2219 */ 2220 if (icsk->icsk_ack.blocked || 2221 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2222 tcp_send_ack(sk); 2223 return; 2224 } 2225 2226 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2227 timeout = icsk->icsk_ack.timeout; 2228 } 2229 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2230 icsk->icsk_ack.timeout = timeout; 2231 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2232 } 2233 2234 /* This routine sends an ack and also updates the window. */ 2235 void tcp_send_ack(struct sock *sk) 2236 { 2237 /* If we have been reset, we may not send again. */ 2238 if (sk->sk_state != TCP_CLOSE) { 2239 struct tcp_sock *tp = tcp_sk(sk); 2240 struct sk_buff *buff; 2241 2242 /* We are not putting this on the write queue, so 2243 * tcp_transmit_skb() will set the ownership to this 2244 * sock. 2245 */ 2246 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2247 if (buff == NULL) { 2248 inet_csk_schedule_ack(sk); 2249 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2250 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2251 TCP_DELACK_MAX, TCP_RTO_MAX); 2252 return; 2253 } 2254 2255 /* Reserve space for headers and prepare control bits. */ 2256 skb_reserve(buff, MAX_TCP_HEADER); 2257 buff->csum = 0; 2258 TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; 2259 TCP_SKB_CB(buff)->sacked = 0; 2260 skb_shinfo(buff)->tso_segs = 1; 2261 skb_shinfo(buff)->tso_size = 0; 2262 2263 /* Send it off, this clears delayed acks for us. */ 2264 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); 2265 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2266 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2267 } 2268 } 2269 2270 /* This routine sends a packet with an out of date sequence 2271 * number. It assumes the other end will try to ack it. 2272 * 2273 * Question: what should we make while urgent mode? 2274 * 4.4BSD forces sending single byte of data. We cannot send 2275 * out of window data, because we have SND.NXT==SND.MAX... 2276 * 2277 * Current solution: to send TWO zero-length segments in urgent mode: 2278 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2279 * out-of-date with SND.UNA-1 to probe window. 2280 */ 2281 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2282 { 2283 struct tcp_sock *tp = tcp_sk(sk); 2284 struct sk_buff *skb; 2285 2286 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2287 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2288 if (skb == NULL) 2289 return -1; 2290 2291 /* Reserve space for headers and set control bits. */ 2292 skb_reserve(skb, MAX_TCP_HEADER); 2293 skb->csum = 0; 2294 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; 2295 TCP_SKB_CB(skb)->sacked = urgent; 2296 skb_shinfo(skb)->tso_segs = 1; 2297 skb_shinfo(skb)->tso_size = 0; 2298 2299 /* Use a previous sequence. This should cause the other 2300 * end to send an ack. Don't queue or clone SKB, just 2301 * send it. 2302 */ 2303 TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; 2304 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; 2305 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2306 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2307 } 2308 2309 int tcp_write_wakeup(struct sock *sk) 2310 { 2311 if (sk->sk_state != TCP_CLOSE) { 2312 struct tcp_sock *tp = tcp_sk(sk); 2313 struct sk_buff *skb; 2314 2315 if ((skb = sk->sk_send_head) != NULL && 2316 before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { 2317 int err; 2318 unsigned int mss = tcp_current_mss(sk, 0); 2319 unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; 2320 2321 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2322 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2323 2324 /* We are probing the opening of a window 2325 * but the window size is != 0 2326 * must have been a result SWS avoidance ( sender ) 2327 */ 2328 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2329 skb->len > mss) { 2330 seg_size = min(seg_size, mss); 2331 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2332 if (tcp_fragment(sk, skb, seg_size, mss)) 2333 return -1; 2334 } else if (!tcp_skb_pcount(skb)) 2335 tcp_set_skb_tso_segs(sk, skb, mss); 2336 2337 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2338 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2339 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2340 if (!err) { 2341 update_send_head(sk, tp, skb); 2342 } 2343 return err; 2344 } else { 2345 if (tp->urg_mode && 2346 between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) 2347 tcp_xmit_probe_skb(sk, TCPCB_URG); 2348 return tcp_xmit_probe_skb(sk, 0); 2349 } 2350 } 2351 return -1; 2352 } 2353 2354 /* A window probe timeout has occurred. If window is not closed send 2355 * a partial packet else a zero probe. 2356 */ 2357 void tcp_send_probe0(struct sock *sk) 2358 { 2359 struct inet_connection_sock *icsk = inet_csk(sk); 2360 struct tcp_sock *tp = tcp_sk(sk); 2361 int err; 2362 2363 err = tcp_write_wakeup(sk); 2364 2365 if (tp->packets_out || !sk->sk_send_head) { 2366 /* Cancel probe timer, if it is not required. */ 2367 icsk->icsk_probes_out = 0; 2368 icsk->icsk_backoff = 0; 2369 return; 2370 } 2371 2372 if (err <= 0) { 2373 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2374 icsk->icsk_backoff++; 2375 icsk->icsk_probes_out++; 2376 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2377 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2378 TCP_RTO_MAX); 2379 } else { 2380 /* If packet was not sent due to local congestion, 2381 * do not backoff and do not remember icsk_probes_out. 2382 * Let local senders to fight for local resources. 2383 * 2384 * Use accumulated backoff yet. 2385 */ 2386 if (!icsk->icsk_probes_out) 2387 icsk->icsk_probes_out = 1; 2388 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2389 min(icsk->icsk_rto << icsk->icsk_backoff, 2390 TCP_RESOURCE_PROBE_INTERVAL), 2391 TCP_RTO_MAX); 2392 } 2393 } 2394 2395 EXPORT_SYMBOL(tcp_connect); 2396 EXPORT_SYMBOL(tcp_make_synack); 2397 EXPORT_SYMBOL(tcp_simple_retransmit); 2398 EXPORT_SYMBOL(tcp_sync_mss); 2399 EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor); 2400 EXPORT_SYMBOL(tcp_mtup_init); 2401