1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37 #define pr_fmt(fmt) "TCP: " fmt 38 39 #include <net/tcp.h> 40 41 #include <linux/compiler.h> 42 #include <linux/gfp.h> 43 #include <linux/module.h> 44 45 /* People can turn this off for buggy TCP's found in printers etc. */ 46 int sysctl_tcp_retrans_collapse __read_mostly = 1; 47 48 /* People can turn this on to work with those rare, broken TCPs that 49 * interpret the window field as a signed quantity. 50 */ 51 int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 52 53 /* Default TSQ limit of two TSO segments */ 54 int sysctl_tcp_limit_output_bytes __read_mostly = 131072; 55 56 /* This limits the percentage of the congestion window which we 57 * will allow a single TSO frame to consume. Building TSO frames 58 * which are too large can cause TCP streams to be bursty. 59 */ 60 int sysctl_tcp_tso_win_divisor __read_mostly = 3; 61 62 int sysctl_tcp_mtu_probing __read_mostly = 0; 63 int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; 64 65 /* By default, RFC2861 behavior. */ 66 int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 67 68 unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX; 69 EXPORT_SYMBOL(sysctl_tcp_notsent_lowat); 70 71 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 72 int push_one, gfp_t gfp); 73 74 /* Account for new data that has been sent to the network. */ 75 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 76 { 77 struct inet_connection_sock *icsk = inet_csk(sk); 78 struct tcp_sock *tp = tcp_sk(sk); 79 unsigned int prior_packets = tp->packets_out; 80 81 tcp_advance_send_head(sk, skb); 82 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 83 84 tp->packets_out += tcp_skb_pcount(skb); 85 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 86 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 87 tcp_rearm_rto(sk); 88 } 89 } 90 91 /* SND.NXT, if window was not shrunk. 92 * If window has been shrunk, what should we make? It is not clear at all. 93 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 94 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 95 * invalid. OK, let's make this for now: 96 */ 97 static inline __u32 tcp_acceptable_seq(const struct sock *sk) 98 { 99 const struct tcp_sock *tp = tcp_sk(sk); 100 101 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 102 return tp->snd_nxt; 103 else 104 return tcp_wnd_end(tp); 105 } 106 107 /* Calculate mss to advertise in SYN segment. 108 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 109 * 110 * 1. It is independent of path mtu. 111 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 112 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 113 * attached devices, because some buggy hosts are confused by 114 * large MSS. 115 * 4. We do not make 3, we advertise MSS, calculated from first 116 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 117 * This may be overridden via information stored in routing table. 118 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 119 * probably even Jumbo". 120 */ 121 static __u16 tcp_advertise_mss(struct sock *sk) 122 { 123 struct tcp_sock *tp = tcp_sk(sk); 124 const struct dst_entry *dst = __sk_dst_get(sk); 125 int mss = tp->advmss; 126 127 if (dst) { 128 unsigned int metric = dst_metric_advmss(dst); 129 130 if (metric < mss) { 131 mss = metric; 132 tp->advmss = mss; 133 } 134 } 135 136 return (__u16)mss; 137 } 138 139 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 140 * This is the first part of cwnd validation mechanism. */ 141 static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) 142 { 143 struct tcp_sock *tp = tcp_sk(sk); 144 s32 delta = tcp_time_stamp - tp->lsndtime; 145 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 146 u32 cwnd = tp->snd_cwnd; 147 148 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 149 150 tp->snd_ssthresh = tcp_current_ssthresh(sk); 151 restart_cwnd = min(restart_cwnd, cwnd); 152 153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 154 cwnd >>= 1; 155 tp->snd_cwnd = max(cwnd, restart_cwnd); 156 tp->snd_cwnd_stamp = tcp_time_stamp; 157 tp->snd_cwnd_used = 0; 158 } 159 160 /* Congestion state accounting after a packet has been sent. */ 161 static void tcp_event_data_sent(struct tcp_sock *tp, 162 struct sock *sk) 163 { 164 struct inet_connection_sock *icsk = inet_csk(sk); 165 const u32 now = tcp_time_stamp; 166 const struct dst_entry *dst = __sk_dst_get(sk); 167 168 if (sysctl_tcp_slow_start_after_idle && 169 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 170 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 171 172 tp->lsndtime = now; 173 174 /* If it is a reply for ato after last received 175 * packet, enter pingpong mode. 176 */ 177 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato && 178 (!dst || !dst_metric(dst, RTAX_QUICKACK))) 179 icsk->icsk_ack.pingpong = 1; 180 } 181 182 /* Account for an ACK we sent. */ 183 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 184 { 185 tcp_dec_quickack_mode(sk, pkts); 186 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 187 } 188 189 190 u32 tcp_default_init_rwnd(u32 mss) 191 { 192 /* Initial receive window should be twice of TCP_INIT_CWND to 193 * enable proper sending of new unsent data during fast recovery 194 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a 195 * limit when mss is larger than 1460. 196 */ 197 u32 init_rwnd = TCP_INIT_CWND * 2; 198 199 if (mss > 1460) 200 init_rwnd = max((1460 * init_rwnd) / mss, 2U); 201 return init_rwnd; 202 } 203 204 /* Determine a window scaling and initial window to offer. 205 * Based on the assumption that the given amount of space 206 * will be offered. Store the results in the tp structure. 207 * NOTE: for smooth operation initial space offering should 208 * be a multiple of mss if possible. We assume here that mss >= 1. 209 * This MUST be enforced by all callers. 210 */ 211 void tcp_select_initial_window(int __space, __u32 mss, 212 __u32 *rcv_wnd, __u32 *window_clamp, 213 int wscale_ok, __u8 *rcv_wscale, 214 __u32 init_rcv_wnd) 215 { 216 unsigned int space = (__space < 0 ? 0 : __space); 217 218 /* If no clamp set the clamp to the max possible scaled window */ 219 if (*window_clamp == 0) 220 (*window_clamp) = (65535 << 14); 221 space = min(*window_clamp, space); 222 223 /* Quantize space offering to a multiple of mss if possible. */ 224 if (space > mss) 225 space = (space / mss) * mss; 226 227 /* NOTE: offering an initial window larger than 32767 228 * will break some buggy TCP stacks. If the admin tells us 229 * it is likely we could be speaking with such a buggy stack 230 * we will truncate our initial window offering to 32K-1 231 * unless the remote has sent us a window scaling option, 232 * which we interpret as a sign the remote TCP is not 233 * misinterpreting the window field as a signed quantity. 234 */ 235 if (sysctl_tcp_workaround_signed_windows) 236 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 237 else 238 (*rcv_wnd) = space; 239 240 (*rcv_wscale) = 0; 241 if (wscale_ok) { 242 /* Set window scaling on max possible window 243 * See RFC1323 for an explanation of the limit to 14 244 */ 245 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 246 space = min_t(u32, space, *window_clamp); 247 while (space > 65535 && (*rcv_wscale) < 14) { 248 space >>= 1; 249 (*rcv_wscale)++; 250 } 251 } 252 253 if (mss > (1 << *rcv_wscale)) { 254 if (!init_rcv_wnd) /* Use default unless specified otherwise */ 255 init_rcv_wnd = tcp_default_init_rwnd(mss); 256 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 257 } 258 259 /* Set the clamp no higher than max representable value */ 260 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 261 } 262 EXPORT_SYMBOL(tcp_select_initial_window); 263 264 /* Chose a new window to advertise, update state in tcp_sock for the 265 * socket, and return result with RFC1323 scaling applied. The return 266 * value can be stuffed directly into th->window for an outgoing 267 * frame. 268 */ 269 static u16 tcp_select_window(struct sock *sk) 270 { 271 struct tcp_sock *tp = tcp_sk(sk); 272 u32 cur_win = tcp_receive_window(tp); 273 u32 new_win = __tcp_select_window(sk); 274 275 /* Never shrink the offered window */ 276 if (new_win < cur_win) { 277 /* Danger Will Robinson! 278 * Don't update rcv_wup/rcv_wnd here or else 279 * we will not be able to advertise a zero 280 * window in time. --DaveM 281 * 282 * Relax Will Robinson. 283 */ 284 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 285 } 286 tp->rcv_wnd = new_win; 287 tp->rcv_wup = tp->rcv_nxt; 288 289 /* Make sure we do not exceed the maximum possible 290 * scaled window. 291 */ 292 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 293 new_win = min(new_win, MAX_TCP_WINDOW); 294 else 295 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 296 297 /* RFC1323 scaling applied */ 298 new_win >>= tp->rx_opt.rcv_wscale; 299 300 /* If we advertise zero window, disable fast path. */ 301 if (new_win == 0) 302 tp->pred_flags = 0; 303 304 return new_win; 305 } 306 307 /* Packet ECN state for a SYN-ACK */ 308 static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 309 { 310 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 311 if (!(tp->ecn_flags & TCP_ECN_OK)) 312 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 313 } 314 315 /* Packet ECN state for a SYN. */ 316 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 317 { 318 struct tcp_sock *tp = tcp_sk(sk); 319 320 tp->ecn_flags = 0; 321 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { 322 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 323 tp->ecn_flags = TCP_ECN_OK; 324 } 325 } 326 327 static __inline__ void 328 TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 329 { 330 if (inet_rsk(req)->ecn_ok) 331 th->ece = 1; 332 } 333 334 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 335 * be sent. 336 */ 337 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 338 int tcp_header_len) 339 { 340 struct tcp_sock *tp = tcp_sk(sk); 341 342 if (tp->ecn_flags & TCP_ECN_OK) { 343 /* Not-retransmitted data segment: set ECT and inject CWR. */ 344 if (skb->len != tcp_header_len && 345 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 346 INET_ECN_xmit(sk); 347 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 348 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 349 tcp_hdr(skb)->cwr = 1; 350 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 351 } 352 } else { 353 /* ACK or retransmitted segment: clear ECT|CE */ 354 INET_ECN_dontxmit(sk); 355 } 356 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 357 tcp_hdr(skb)->ece = 1; 358 } 359 } 360 361 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 362 * auto increment end seqno. 363 */ 364 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 365 { 366 skb->ip_summed = CHECKSUM_PARTIAL; 367 skb->csum = 0; 368 369 TCP_SKB_CB(skb)->tcp_flags = flags; 370 TCP_SKB_CB(skb)->sacked = 0; 371 372 skb_shinfo(skb)->gso_segs = 1; 373 skb_shinfo(skb)->gso_size = 0; 374 skb_shinfo(skb)->gso_type = 0; 375 376 TCP_SKB_CB(skb)->seq = seq; 377 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 378 seq++; 379 TCP_SKB_CB(skb)->end_seq = seq; 380 } 381 382 static inline bool tcp_urg_mode(const struct tcp_sock *tp) 383 { 384 return tp->snd_una != tp->snd_up; 385 } 386 387 #define OPTION_SACK_ADVERTISE (1 << 0) 388 #define OPTION_TS (1 << 1) 389 #define OPTION_MD5 (1 << 2) 390 #define OPTION_WSCALE (1 << 3) 391 #define OPTION_FAST_OPEN_COOKIE (1 << 8) 392 393 struct tcp_out_options { 394 u16 options; /* bit field of OPTION_* */ 395 u16 mss; /* 0 to disable */ 396 u8 ws; /* window scale, 0 to disable */ 397 u8 num_sack_blocks; /* number of SACK blocks to include */ 398 u8 hash_size; /* bytes in hash_location */ 399 __u8 *hash_location; /* temporary pointer, overloaded */ 400 __u32 tsval, tsecr; /* need to include OPTION_TS */ 401 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 402 }; 403 404 /* Write previously computed TCP options to the packet. 405 * 406 * Beware: Something in the Internet is very sensitive to the ordering of 407 * TCP options, we learned this through the hard way, so be careful here. 408 * Luckily we can at least blame others for their non-compliance but from 409 * inter-operatibility perspective it seems that we're somewhat stuck with 410 * the ordering which we have been using if we want to keep working with 411 * those broken things (not that it currently hurts anybody as there isn't 412 * particular reason why the ordering would need to be changed). 413 * 414 * At least SACK_PERM as the first option is known to lead to a disaster 415 * (but it may well be that other scenarios fail similarly). 416 */ 417 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 418 struct tcp_out_options *opts) 419 { 420 u16 options = opts->options; /* mungable copy */ 421 422 if (unlikely(OPTION_MD5 & options)) { 423 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 424 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 425 /* overload cookie hash location */ 426 opts->hash_location = (__u8 *)ptr; 427 ptr += 4; 428 } 429 430 if (unlikely(opts->mss)) { 431 *ptr++ = htonl((TCPOPT_MSS << 24) | 432 (TCPOLEN_MSS << 16) | 433 opts->mss); 434 } 435 436 if (likely(OPTION_TS & options)) { 437 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 438 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 439 (TCPOLEN_SACK_PERM << 16) | 440 (TCPOPT_TIMESTAMP << 8) | 441 TCPOLEN_TIMESTAMP); 442 options &= ~OPTION_SACK_ADVERTISE; 443 } else { 444 *ptr++ = htonl((TCPOPT_NOP << 24) | 445 (TCPOPT_NOP << 16) | 446 (TCPOPT_TIMESTAMP << 8) | 447 TCPOLEN_TIMESTAMP); 448 } 449 *ptr++ = htonl(opts->tsval); 450 *ptr++ = htonl(opts->tsecr); 451 } 452 453 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 454 *ptr++ = htonl((TCPOPT_NOP << 24) | 455 (TCPOPT_NOP << 16) | 456 (TCPOPT_SACK_PERM << 8) | 457 TCPOLEN_SACK_PERM); 458 } 459 460 if (unlikely(OPTION_WSCALE & options)) { 461 *ptr++ = htonl((TCPOPT_NOP << 24) | 462 (TCPOPT_WINDOW << 16) | 463 (TCPOLEN_WINDOW << 8) | 464 opts->ws); 465 } 466 467 if (unlikely(opts->num_sack_blocks)) { 468 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 469 tp->duplicate_sack : tp->selective_acks; 470 int this_sack; 471 472 *ptr++ = htonl((TCPOPT_NOP << 24) | 473 (TCPOPT_NOP << 16) | 474 (TCPOPT_SACK << 8) | 475 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 476 TCPOLEN_SACK_PERBLOCK))); 477 478 for (this_sack = 0; this_sack < opts->num_sack_blocks; 479 ++this_sack) { 480 *ptr++ = htonl(sp[this_sack].start_seq); 481 *ptr++ = htonl(sp[this_sack].end_seq); 482 } 483 484 tp->rx_opt.dsack = 0; 485 } 486 487 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 488 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 489 490 *ptr++ = htonl((TCPOPT_EXP << 24) | 491 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) | 492 TCPOPT_FASTOPEN_MAGIC); 493 494 memcpy(ptr, foc->val, foc->len); 495 if ((foc->len & 3) == 2) { 496 u8 *align = ((u8 *)ptr) + foc->len; 497 align[0] = align[1] = TCPOPT_NOP; 498 } 499 ptr += (foc->len + 3) >> 2; 500 } 501 } 502 503 /* Compute TCP options for SYN packets. This is not the final 504 * network wire format yet. 505 */ 506 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 507 struct tcp_out_options *opts, 508 struct tcp_md5sig_key **md5) 509 { 510 struct tcp_sock *tp = tcp_sk(sk); 511 unsigned int remaining = MAX_TCP_OPTION_SPACE; 512 struct tcp_fastopen_request *fastopen = tp->fastopen_req; 513 514 #ifdef CONFIG_TCP_MD5SIG 515 *md5 = tp->af_specific->md5_lookup(sk, sk); 516 if (*md5) { 517 opts->options |= OPTION_MD5; 518 remaining -= TCPOLEN_MD5SIG_ALIGNED; 519 } 520 #else 521 *md5 = NULL; 522 #endif 523 524 /* We always get an MSS option. The option bytes which will be seen in 525 * normal data packets should timestamps be used, must be in the MSS 526 * advertised. But we subtract them from tp->mss_cache so that 527 * calculations in tcp_sendmsg are simpler etc. So account for this 528 * fact here if necessary. If we don't do this correctly, as a 529 * receiver we won't recognize data packets as being full sized when we 530 * should, and thus we won't abide by the delayed ACK rules correctly. 531 * SACKs don't matter, we never delay an ACK when we have any of those 532 * going out. */ 533 opts->mss = tcp_advertise_mss(sk); 534 remaining -= TCPOLEN_MSS_ALIGNED; 535 536 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 537 opts->options |= OPTION_TS; 538 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; 539 opts->tsecr = tp->rx_opt.ts_recent; 540 remaining -= TCPOLEN_TSTAMP_ALIGNED; 541 } 542 if (likely(sysctl_tcp_window_scaling)) { 543 opts->ws = tp->rx_opt.rcv_wscale; 544 opts->options |= OPTION_WSCALE; 545 remaining -= TCPOLEN_WSCALE_ALIGNED; 546 } 547 if (likely(sysctl_tcp_sack)) { 548 opts->options |= OPTION_SACK_ADVERTISE; 549 if (unlikely(!(OPTION_TS & opts->options))) 550 remaining -= TCPOLEN_SACKPERM_ALIGNED; 551 } 552 553 if (fastopen && fastopen->cookie.len >= 0) { 554 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len; 555 need = (need + 3) & ~3U; /* Align to 32 bits */ 556 if (remaining >= need) { 557 opts->options |= OPTION_FAST_OPEN_COOKIE; 558 opts->fastopen_cookie = &fastopen->cookie; 559 remaining -= need; 560 tp->syn_fastopen = 1; 561 } 562 } 563 564 return MAX_TCP_OPTION_SPACE - remaining; 565 } 566 567 /* Set up TCP options for SYN-ACKs. */ 568 static unsigned int tcp_synack_options(struct sock *sk, 569 struct request_sock *req, 570 unsigned int mss, struct sk_buff *skb, 571 struct tcp_out_options *opts, 572 struct tcp_md5sig_key **md5, 573 struct tcp_fastopen_cookie *foc) 574 { 575 struct inet_request_sock *ireq = inet_rsk(req); 576 unsigned int remaining = MAX_TCP_OPTION_SPACE; 577 578 #ifdef CONFIG_TCP_MD5SIG 579 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 580 if (*md5) { 581 opts->options |= OPTION_MD5; 582 remaining -= TCPOLEN_MD5SIG_ALIGNED; 583 584 /* We can't fit any SACK blocks in a packet with MD5 + TS 585 * options. There was discussion about disabling SACK 586 * rather than TS in order to fit in better with old, 587 * buggy kernels, but that was deemed to be unnecessary. 588 */ 589 ireq->tstamp_ok &= !ireq->sack_ok; 590 } 591 #else 592 *md5 = NULL; 593 #endif 594 595 /* We always send an MSS option. */ 596 opts->mss = mss; 597 remaining -= TCPOLEN_MSS_ALIGNED; 598 599 if (likely(ireq->wscale_ok)) { 600 opts->ws = ireq->rcv_wscale; 601 opts->options |= OPTION_WSCALE; 602 remaining -= TCPOLEN_WSCALE_ALIGNED; 603 } 604 if (likely(ireq->tstamp_ok)) { 605 opts->options |= OPTION_TS; 606 opts->tsval = TCP_SKB_CB(skb)->when; 607 opts->tsecr = req->ts_recent; 608 remaining -= TCPOLEN_TSTAMP_ALIGNED; 609 } 610 if (likely(ireq->sack_ok)) { 611 opts->options |= OPTION_SACK_ADVERTISE; 612 if (unlikely(!ireq->tstamp_ok)) 613 remaining -= TCPOLEN_SACKPERM_ALIGNED; 614 } 615 if (foc != NULL) { 616 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 617 need = (need + 3) & ~3U; /* Align to 32 bits */ 618 if (remaining >= need) { 619 opts->options |= OPTION_FAST_OPEN_COOKIE; 620 opts->fastopen_cookie = foc; 621 remaining -= need; 622 } 623 } 624 625 return MAX_TCP_OPTION_SPACE - remaining; 626 } 627 628 /* Compute TCP options for ESTABLISHED sockets. This is not the 629 * final wire format yet. 630 */ 631 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 632 struct tcp_out_options *opts, 633 struct tcp_md5sig_key **md5) 634 { 635 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 636 struct tcp_sock *tp = tcp_sk(sk); 637 unsigned int size = 0; 638 unsigned int eff_sacks; 639 640 #ifdef CONFIG_TCP_MD5SIG 641 *md5 = tp->af_specific->md5_lookup(sk, sk); 642 if (unlikely(*md5)) { 643 opts->options |= OPTION_MD5; 644 size += TCPOLEN_MD5SIG_ALIGNED; 645 } 646 #else 647 *md5 = NULL; 648 #endif 649 650 if (likely(tp->rx_opt.tstamp_ok)) { 651 opts->options |= OPTION_TS; 652 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; 653 opts->tsecr = tp->rx_opt.ts_recent; 654 size += TCPOLEN_TSTAMP_ALIGNED; 655 } 656 657 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 658 if (unlikely(eff_sacks)) { 659 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 660 opts->num_sack_blocks = 661 min_t(unsigned int, eff_sacks, 662 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 663 TCPOLEN_SACK_PERBLOCK); 664 size += TCPOLEN_SACK_BASE_ALIGNED + 665 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 666 } 667 668 return size; 669 } 670 671 672 /* TCP SMALL QUEUES (TSQ) 673 * 674 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 675 * to reduce RTT and bufferbloat. 676 * We do this using a special skb destructor (tcp_wfree). 677 * 678 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 679 * needs to be reallocated in a driver. 680 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc 681 * 682 * Since transmit from skb destructor is forbidden, we use a tasklet 683 * to process all sockets that eventually need to send more skbs. 684 * We use one tasklet per cpu, with its own queue of sockets. 685 */ 686 struct tsq_tasklet { 687 struct tasklet_struct tasklet; 688 struct list_head head; /* queue of tcp sockets */ 689 }; 690 static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 691 692 static void tcp_tsq_handler(struct sock *sk) 693 { 694 if ((1 << sk->sk_state) & 695 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 696 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 697 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); 698 } 699 /* 700 * One tasklest per cpu tries to send more skbs. 701 * We run in tasklet context but need to disable irqs when 702 * transfering tsq->head because tcp_wfree() might 703 * interrupt us (non NAPI drivers) 704 */ 705 static void tcp_tasklet_func(unsigned long data) 706 { 707 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 708 LIST_HEAD(list); 709 unsigned long flags; 710 struct list_head *q, *n; 711 struct tcp_sock *tp; 712 struct sock *sk; 713 714 local_irq_save(flags); 715 list_splice_init(&tsq->head, &list); 716 local_irq_restore(flags); 717 718 list_for_each_safe(q, n, &list) { 719 tp = list_entry(q, struct tcp_sock, tsq_node); 720 list_del(&tp->tsq_node); 721 722 sk = (struct sock *)tp; 723 bh_lock_sock(sk); 724 725 if (!sock_owned_by_user(sk)) { 726 tcp_tsq_handler(sk); 727 } else { 728 /* defer the work to tcp_release_cb() */ 729 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); 730 } 731 bh_unlock_sock(sk); 732 733 clear_bit(TSQ_QUEUED, &tp->tsq_flags); 734 sk_free(sk); 735 } 736 } 737 738 #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ 739 (1UL << TCP_WRITE_TIMER_DEFERRED) | \ 740 (1UL << TCP_DELACK_TIMER_DEFERRED) | \ 741 (1UL << TCP_MTU_REDUCED_DEFERRED)) 742 /** 743 * tcp_release_cb - tcp release_sock() callback 744 * @sk: socket 745 * 746 * called from release_sock() to perform protocol dependent 747 * actions before socket release. 748 */ 749 void tcp_release_cb(struct sock *sk) 750 { 751 struct tcp_sock *tp = tcp_sk(sk); 752 unsigned long flags, nflags; 753 754 /* perform an atomic operation only if at least one flag is set */ 755 do { 756 flags = tp->tsq_flags; 757 if (!(flags & TCP_DEFERRED_ALL)) 758 return; 759 nflags = flags & ~TCP_DEFERRED_ALL; 760 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); 761 762 if (flags & (1UL << TCP_TSQ_DEFERRED)) 763 tcp_tsq_handler(sk); 764 765 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 766 tcp_write_timer_handler(sk); 767 __sock_put(sk); 768 } 769 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { 770 tcp_delack_timer_handler(sk); 771 __sock_put(sk); 772 } 773 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { 774 sk->sk_prot->mtu_reduced(sk); 775 __sock_put(sk); 776 } 777 } 778 EXPORT_SYMBOL(tcp_release_cb); 779 780 void __init tcp_tasklet_init(void) 781 { 782 int i; 783 784 for_each_possible_cpu(i) { 785 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 786 787 INIT_LIST_HEAD(&tsq->head); 788 tasklet_init(&tsq->tasklet, 789 tcp_tasklet_func, 790 (unsigned long)tsq); 791 } 792 } 793 794 /* 795 * Write buffer destructor automatically called from kfree_skb. 796 * We cant xmit new skbs from this context, as we might already 797 * hold qdisc lock. 798 */ 799 void tcp_wfree(struct sk_buff *skb) 800 { 801 struct sock *sk = skb->sk; 802 struct tcp_sock *tp = tcp_sk(sk); 803 804 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && 805 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { 806 unsigned long flags; 807 struct tsq_tasklet *tsq; 808 809 /* Keep a ref on socket. 810 * This last ref will be released in tcp_tasklet_func() 811 */ 812 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); 813 814 /* queue this socket to tasklet queue */ 815 local_irq_save(flags); 816 tsq = &__get_cpu_var(tsq_tasklet); 817 list_add(&tp->tsq_node, &tsq->head); 818 tasklet_schedule(&tsq->tasklet); 819 local_irq_restore(flags); 820 } else { 821 sock_wfree(skb); 822 } 823 } 824 825 /* This routine actually transmits TCP packets queued in by 826 * tcp_do_sendmsg(). This is used by both the initial 827 * transmission and possible later retransmissions. 828 * All SKB's seen here are completely headerless. It is our 829 * job to build the TCP header, and pass the packet down to 830 * IP so it can do the same plus pass the packet off to the 831 * device. 832 * 833 * We are working here with either a clone of the original 834 * SKB, or a fresh unique copy made by the retransmit engine. 835 */ 836 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 837 gfp_t gfp_mask) 838 { 839 const struct inet_connection_sock *icsk = inet_csk(sk); 840 struct inet_sock *inet; 841 struct tcp_sock *tp; 842 struct tcp_skb_cb *tcb; 843 struct tcp_out_options opts; 844 unsigned int tcp_options_size, tcp_header_size; 845 struct tcp_md5sig_key *md5; 846 struct tcphdr *th; 847 int err; 848 849 BUG_ON(!skb || !tcp_skb_pcount(skb)); 850 851 /* If congestion control is doing timestamping, we must 852 * take such a timestamp before we potentially clone/copy. 853 */ 854 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 855 __net_timestamp(skb); 856 857 if (likely(clone_it)) { 858 const struct sk_buff *fclone = skb + 1; 859 860 if (unlikely(skb->fclone == SKB_FCLONE_ORIG && 861 fclone->fclone == SKB_FCLONE_CLONE)) 862 NET_INC_STATS_BH(sock_net(sk), 863 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 864 865 if (unlikely(skb_cloned(skb))) 866 skb = pskb_copy(skb, gfp_mask); 867 else 868 skb = skb_clone(skb, gfp_mask); 869 if (unlikely(!skb)) 870 return -ENOBUFS; 871 } 872 873 inet = inet_sk(sk); 874 tp = tcp_sk(sk); 875 tcb = TCP_SKB_CB(skb); 876 memset(&opts, 0, sizeof(opts)); 877 878 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 879 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 880 else 881 tcp_options_size = tcp_established_options(sk, skb, &opts, 882 &md5); 883 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 884 885 if (tcp_packets_in_flight(tp) == 0) 886 tcp_ca_event(sk, CA_EVENT_TX_START); 887 888 /* if no packet is in qdisc/device queue, then allow XPS to select 889 * another queue. 890 */ 891 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; 892 893 skb_push(skb, tcp_header_size); 894 skb_reset_transport_header(skb); 895 896 skb_orphan(skb); 897 skb->sk = sk; 898 skb->destructor = tcp_wfree; 899 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 900 901 /* Build TCP header and checksum it. */ 902 th = tcp_hdr(skb); 903 th->source = inet->inet_sport; 904 th->dest = inet->inet_dport; 905 th->seq = htonl(tcb->seq); 906 th->ack_seq = htonl(tp->rcv_nxt); 907 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 908 tcb->tcp_flags); 909 910 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 911 /* RFC1323: The window in SYN & SYN/ACK segments 912 * is never scaled. 913 */ 914 th->window = htons(min(tp->rcv_wnd, 65535U)); 915 } else { 916 th->window = htons(tcp_select_window(sk)); 917 } 918 th->check = 0; 919 th->urg_ptr = 0; 920 921 /* The urg_mode check is necessary during a below snd_una win probe */ 922 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 923 if (before(tp->snd_up, tcb->seq + 0x10000)) { 924 th->urg_ptr = htons(tp->snd_up - tcb->seq); 925 th->urg = 1; 926 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 927 th->urg_ptr = htons(0xFFFF); 928 th->urg = 1; 929 } 930 } 931 932 tcp_options_write((__be32 *)(th + 1), tp, &opts); 933 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) 934 TCP_ECN_send(sk, skb, tcp_header_size); 935 936 #ifdef CONFIG_TCP_MD5SIG 937 /* Calculate the MD5 hash, as we have all we need now */ 938 if (md5) { 939 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 940 tp->af_specific->calc_md5_hash(opts.hash_location, 941 md5, sk, NULL, skb); 942 } 943 #endif 944 945 icsk->icsk_af_ops->send_check(sk, skb); 946 947 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 948 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 949 950 if (skb->len != tcp_header_size) 951 tcp_event_data_sent(tp, sk); 952 953 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 954 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 955 tcp_skb_pcount(skb)); 956 957 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); 958 if (likely(err <= 0)) 959 return err; 960 961 tcp_enter_cwr(sk, 1); 962 963 return net_xmit_eval(err); 964 } 965 966 /* This routine just queues the buffer for sending. 967 * 968 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 969 * otherwise socket can stall. 970 */ 971 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 972 { 973 struct tcp_sock *tp = tcp_sk(sk); 974 975 /* Advance write_seq and place onto the write_queue. */ 976 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 977 skb_header_release(skb); 978 tcp_add_write_queue_tail(sk, skb); 979 sk->sk_wmem_queued += skb->truesize; 980 sk_mem_charge(sk, skb->truesize); 981 } 982 983 /* Initialize TSO segments for a packet. */ 984 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 985 unsigned int mss_now) 986 { 987 if (skb->len <= mss_now || !sk_can_gso(sk) || 988 skb->ip_summed == CHECKSUM_NONE) { 989 /* Avoid the costly divide in the normal 990 * non-TSO case. 991 */ 992 skb_shinfo(skb)->gso_segs = 1; 993 skb_shinfo(skb)->gso_size = 0; 994 skb_shinfo(skb)->gso_type = 0; 995 } else { 996 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 997 skb_shinfo(skb)->gso_size = mss_now; 998 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 999 } 1000 } 1001 1002 /* When a modification to fackets out becomes necessary, we need to check 1003 * skb is counted to fackets_out or not. 1004 */ 1005 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 1006 int decr) 1007 { 1008 struct tcp_sock *tp = tcp_sk(sk); 1009 1010 if (!tp->sacked_out || tcp_is_reno(tp)) 1011 return; 1012 1013 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 1014 tp->fackets_out -= decr; 1015 } 1016 1017 /* Pcount in the middle of the write queue got changed, we need to do various 1018 * tweaks to fix counters 1019 */ 1020 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1021 { 1022 struct tcp_sock *tp = tcp_sk(sk); 1023 1024 tp->packets_out -= decr; 1025 1026 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1027 tp->sacked_out -= decr; 1028 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1029 tp->retrans_out -= decr; 1030 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1031 tp->lost_out -= decr; 1032 1033 /* Reno case is special. Sigh... */ 1034 if (tcp_is_reno(tp) && decr > 0) 1035 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1036 1037 tcp_adjust_fackets_out(sk, skb, decr); 1038 1039 if (tp->lost_skb_hint && 1040 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 1041 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 1042 tp->lost_cnt_hint -= decr; 1043 1044 tcp_verify_left_out(tp); 1045 } 1046 1047 /* Function to create two new TCP segments. Shrinks the given segment 1048 * to the specified size and appends a new segment with the rest of the 1049 * packet to the list. This won't be called frequently, I hope. 1050 * Remember, these are still headerless SKBs at this point. 1051 */ 1052 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1053 unsigned int mss_now) 1054 { 1055 struct tcp_sock *tp = tcp_sk(sk); 1056 struct sk_buff *buff; 1057 int nsize, old_factor; 1058 int nlen; 1059 u8 flags; 1060 1061 if (WARN_ON(len > skb->len)) 1062 return -EINVAL; 1063 1064 nsize = skb_headlen(skb) - len; 1065 if (nsize < 0) 1066 nsize = 0; 1067 1068 if (skb_cloned(skb) && 1069 skb_is_nonlinear(skb) && 1070 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1071 return -ENOMEM; 1072 1073 /* Get a new skb... force flag on. */ 1074 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1075 if (buff == NULL) 1076 return -ENOMEM; /* We'll just try again later. */ 1077 1078 sk->sk_wmem_queued += buff->truesize; 1079 sk_mem_charge(sk, buff->truesize); 1080 nlen = skb->len - len - nsize; 1081 buff->truesize += nlen; 1082 skb->truesize -= nlen; 1083 1084 /* Correct the sequence numbers. */ 1085 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1086 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1087 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1088 1089 /* PSH and FIN should only be set in the second packet. */ 1090 flags = TCP_SKB_CB(skb)->tcp_flags; 1091 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1092 TCP_SKB_CB(buff)->tcp_flags = flags; 1093 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1094 1095 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1096 /* Copy and checksum data tail into the new buffer. */ 1097 buff->csum = csum_partial_copy_nocheck(skb->data + len, 1098 skb_put(buff, nsize), 1099 nsize, 0); 1100 1101 skb_trim(skb, len); 1102 1103 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 1104 } else { 1105 skb->ip_summed = CHECKSUM_PARTIAL; 1106 skb_split(skb, buff, len); 1107 } 1108 1109 buff->ip_summed = skb->ip_summed; 1110 1111 /* Looks stupid, but our code really uses when of 1112 * skbs, which it never sent before. --ANK 1113 */ 1114 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1115 buff->tstamp = skb->tstamp; 1116 1117 old_factor = tcp_skb_pcount(skb); 1118 1119 /* Fix up tso_factor for both original and new SKB. */ 1120 tcp_set_skb_tso_segs(sk, skb, mss_now); 1121 tcp_set_skb_tso_segs(sk, buff, mss_now); 1122 1123 /* If this packet has been sent out already, we must 1124 * adjust the various packet counters. 1125 */ 1126 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1127 int diff = old_factor - tcp_skb_pcount(skb) - 1128 tcp_skb_pcount(buff); 1129 1130 if (diff) 1131 tcp_adjust_pcount(sk, skb, diff); 1132 } 1133 1134 /* Link BUFF into the send queue. */ 1135 skb_header_release(buff); 1136 tcp_insert_write_queue_after(skb, buff, sk); 1137 1138 return 0; 1139 } 1140 1141 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 1142 * eventually). The difference is that pulled data not copied, but 1143 * immediately discarded. 1144 */ 1145 static void __pskb_trim_head(struct sk_buff *skb, int len) 1146 { 1147 int i, k, eat; 1148 1149 eat = min_t(int, len, skb_headlen(skb)); 1150 if (eat) { 1151 __skb_pull(skb, eat); 1152 len -= eat; 1153 if (!len) 1154 return; 1155 } 1156 eat = len; 1157 k = 0; 1158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1159 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1160 1161 if (size <= eat) { 1162 skb_frag_unref(skb, i); 1163 eat -= size; 1164 } else { 1165 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1166 if (eat) { 1167 skb_shinfo(skb)->frags[k].page_offset += eat; 1168 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1169 eat = 0; 1170 } 1171 k++; 1172 } 1173 } 1174 skb_shinfo(skb)->nr_frags = k; 1175 1176 skb_reset_tail_pointer(skb); 1177 skb->data_len -= len; 1178 skb->len = skb->data_len; 1179 } 1180 1181 /* Remove acked data from a packet in the transmit queue. */ 1182 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1183 { 1184 if (skb_unclone(skb, GFP_ATOMIC)) 1185 return -ENOMEM; 1186 1187 __pskb_trim_head(skb, len); 1188 1189 TCP_SKB_CB(skb)->seq += len; 1190 skb->ip_summed = CHECKSUM_PARTIAL; 1191 1192 skb->truesize -= len; 1193 sk->sk_wmem_queued -= len; 1194 sk_mem_uncharge(sk, len); 1195 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1196 1197 /* Any change of skb->len requires recalculation of tso factor. */ 1198 if (tcp_skb_pcount(skb) > 1) 1199 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); 1200 1201 return 0; 1202 } 1203 1204 /* Calculate MSS not accounting any TCP options. */ 1205 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 1206 { 1207 const struct tcp_sock *tp = tcp_sk(sk); 1208 const struct inet_connection_sock *icsk = inet_csk(sk); 1209 int mss_now; 1210 1211 /* Calculate base mss without TCP options: 1212 It is MMS_S - sizeof(tcphdr) of rfc1122 1213 */ 1214 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1215 1216 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1217 if (icsk->icsk_af_ops->net_frag_header_len) { 1218 const struct dst_entry *dst = __sk_dst_get(sk); 1219 1220 if (dst && dst_allfrag(dst)) 1221 mss_now -= icsk->icsk_af_ops->net_frag_header_len; 1222 } 1223 1224 /* Clamp it (mss_clamp does not include tcp options) */ 1225 if (mss_now > tp->rx_opt.mss_clamp) 1226 mss_now = tp->rx_opt.mss_clamp; 1227 1228 /* Now subtract optional transport overhead */ 1229 mss_now -= icsk->icsk_ext_hdr_len; 1230 1231 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1232 if (mss_now < 48) 1233 mss_now = 48; 1234 return mss_now; 1235 } 1236 1237 /* Calculate MSS. Not accounting for SACKs here. */ 1238 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1239 { 1240 /* Subtract TCP options size, not including SACKs */ 1241 return __tcp_mtu_to_mss(sk, pmtu) - 1242 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 1243 } 1244 1245 /* Inverse of above */ 1246 int tcp_mss_to_mtu(struct sock *sk, int mss) 1247 { 1248 const struct tcp_sock *tp = tcp_sk(sk); 1249 const struct inet_connection_sock *icsk = inet_csk(sk); 1250 int mtu; 1251 1252 mtu = mss + 1253 tp->tcp_header_len + 1254 icsk->icsk_ext_hdr_len + 1255 icsk->icsk_af_ops->net_header_len; 1256 1257 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1258 if (icsk->icsk_af_ops->net_frag_header_len) { 1259 const struct dst_entry *dst = __sk_dst_get(sk); 1260 1261 if (dst && dst_allfrag(dst)) 1262 mtu += icsk->icsk_af_ops->net_frag_header_len; 1263 } 1264 return mtu; 1265 } 1266 1267 /* MTU probing init per socket */ 1268 void tcp_mtup_init(struct sock *sk) 1269 { 1270 struct tcp_sock *tp = tcp_sk(sk); 1271 struct inet_connection_sock *icsk = inet_csk(sk); 1272 1273 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 1274 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1275 icsk->icsk_af_ops->net_header_len; 1276 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1277 icsk->icsk_mtup.probe_size = 0; 1278 } 1279 EXPORT_SYMBOL(tcp_mtup_init); 1280 1281 /* This function synchronize snd mss to current pmtu/exthdr set. 1282 1283 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1284 for TCP options, but includes only bare TCP header. 1285 1286 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1287 It is minimum of user_mss and mss received with SYN. 1288 It also does not include TCP options. 1289 1290 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1291 1292 tp->mss_cache is current effective sending mss, including 1293 all tcp options except for SACKs. It is evaluated, 1294 taking into account current pmtu, but never exceeds 1295 tp->rx_opt.mss_clamp. 1296 1297 NOTE1. rfc1122 clearly states that advertised MSS 1298 DOES NOT include either tcp or ip options. 1299 1300 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1301 are READ ONLY outside this function. --ANK (980731) 1302 */ 1303 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1304 { 1305 struct tcp_sock *tp = tcp_sk(sk); 1306 struct inet_connection_sock *icsk = inet_csk(sk); 1307 int mss_now; 1308 1309 if (icsk->icsk_mtup.search_high > pmtu) 1310 icsk->icsk_mtup.search_high = pmtu; 1311 1312 mss_now = tcp_mtu_to_mss(sk, pmtu); 1313 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1314 1315 /* And store cached results */ 1316 icsk->icsk_pmtu_cookie = pmtu; 1317 if (icsk->icsk_mtup.enabled) 1318 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1319 tp->mss_cache = mss_now; 1320 1321 return mss_now; 1322 } 1323 EXPORT_SYMBOL(tcp_sync_mss); 1324 1325 /* Compute the current effective MSS, taking SACKs and IP options, 1326 * and even PMTU discovery events into account. 1327 */ 1328 unsigned int tcp_current_mss(struct sock *sk) 1329 { 1330 const struct tcp_sock *tp = tcp_sk(sk); 1331 const struct dst_entry *dst = __sk_dst_get(sk); 1332 u32 mss_now; 1333 unsigned int header_len; 1334 struct tcp_out_options opts; 1335 struct tcp_md5sig_key *md5; 1336 1337 mss_now = tp->mss_cache; 1338 1339 if (dst) { 1340 u32 mtu = dst_mtu(dst); 1341 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1342 mss_now = tcp_sync_mss(sk, mtu); 1343 } 1344 1345 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1346 sizeof(struct tcphdr); 1347 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1348 * some common options. If this is an odd packet (because we have SACK 1349 * blocks etc) then our calculated header_len will be different, and 1350 * we have to adjust mss_now correspondingly */ 1351 if (header_len != tp->tcp_header_len) { 1352 int delta = (int) header_len - tp->tcp_header_len; 1353 mss_now -= delta; 1354 } 1355 1356 return mss_now; 1357 } 1358 1359 /* Congestion window validation. (RFC2861) */ 1360 static void tcp_cwnd_validate(struct sock *sk) 1361 { 1362 struct tcp_sock *tp = tcp_sk(sk); 1363 1364 if (tp->packets_out >= tp->snd_cwnd) { 1365 /* Network is feed fully. */ 1366 tp->snd_cwnd_used = 0; 1367 tp->snd_cwnd_stamp = tcp_time_stamp; 1368 } else { 1369 /* Network starves. */ 1370 if (tp->packets_out > tp->snd_cwnd_used) 1371 tp->snd_cwnd_used = tp->packets_out; 1372 1373 if (sysctl_tcp_slow_start_after_idle && 1374 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1375 tcp_cwnd_application_limited(sk); 1376 } 1377 } 1378 1379 /* Returns the portion of skb which can be sent right away without 1380 * introducing MSS oddities to segment boundaries. In rare cases where 1381 * mss_now != mss_cache, we will request caller to create a small skb 1382 * per input skb which could be mostly avoided here (if desired). 1383 * 1384 * We explicitly want to create a request for splitting write queue tail 1385 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1386 * thus all the complexity (cwnd_len is always MSS multiple which we 1387 * return whenever allowed by the other factors). Basically we need the 1388 * modulo only when the receiver window alone is the limiting factor or 1389 * when we would be allowed to send the split-due-to-Nagle skb fully. 1390 */ 1391 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1392 unsigned int mss_now, unsigned int max_segs) 1393 { 1394 const struct tcp_sock *tp = tcp_sk(sk); 1395 u32 needed, window, max_len; 1396 1397 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1398 max_len = mss_now * max_segs; 1399 1400 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 1401 return max_len; 1402 1403 needed = min(skb->len, window); 1404 1405 if (max_len <= needed) 1406 return max_len; 1407 1408 return needed - needed % mss_now; 1409 } 1410 1411 /* Can at least one segment of SKB be sent right now, according to the 1412 * congestion window rules? If so, return how many segments are allowed. 1413 */ 1414 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1415 const struct sk_buff *skb) 1416 { 1417 u32 in_flight, cwnd; 1418 1419 /* Don't be strict about the congestion window for the final FIN. */ 1420 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 1421 tcp_skb_pcount(skb) == 1) 1422 return 1; 1423 1424 in_flight = tcp_packets_in_flight(tp); 1425 cwnd = tp->snd_cwnd; 1426 if (in_flight < cwnd) 1427 return (cwnd - in_flight); 1428 1429 return 0; 1430 } 1431 1432 /* Initialize TSO state of a skb. 1433 * This must be invoked the first time we consider transmitting 1434 * SKB onto the wire. 1435 */ 1436 static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, 1437 unsigned int mss_now) 1438 { 1439 int tso_segs = tcp_skb_pcount(skb); 1440 1441 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1442 tcp_set_skb_tso_segs(sk, skb, mss_now); 1443 tso_segs = tcp_skb_pcount(skb); 1444 } 1445 return tso_segs; 1446 } 1447 1448 /* Minshall's variant of the Nagle send check. */ 1449 static inline bool tcp_minshall_check(const struct tcp_sock *tp) 1450 { 1451 return after(tp->snd_sml, tp->snd_una) && 1452 !after(tp->snd_sml, tp->snd_nxt); 1453 } 1454 1455 /* Return false, if packet can be sent now without violation Nagle's rules: 1456 * 1. It is full sized. 1457 * 2. Or it contains FIN. (already checked by caller) 1458 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1459 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1460 * With Minshall's modification: all sent small packets are ACKed. 1461 */ 1462 static inline bool tcp_nagle_check(const struct tcp_sock *tp, 1463 const struct sk_buff *skb, 1464 unsigned int mss_now, int nonagle) 1465 { 1466 return skb->len < mss_now && 1467 ((nonagle & TCP_NAGLE_CORK) || 1468 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1469 } 1470 1471 /* Return true if the Nagle test allows this packet to be 1472 * sent now. 1473 */ 1474 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1475 unsigned int cur_mss, int nonagle) 1476 { 1477 /* Nagle rule does not apply to frames, which sit in the middle of the 1478 * write_queue (they have no chances to get new data). 1479 * 1480 * This is implemented in the callers, where they modify the 'nonagle' 1481 * argument based upon the location of SKB in the send queue. 1482 */ 1483 if (nonagle & TCP_NAGLE_PUSH) 1484 return true; 1485 1486 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 1487 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1488 return true; 1489 1490 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1491 return true; 1492 1493 return false; 1494 } 1495 1496 /* Does at least the first segment of SKB fit into the send window? */ 1497 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1498 const struct sk_buff *skb, 1499 unsigned int cur_mss) 1500 { 1501 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1502 1503 if (skb->len > cur_mss) 1504 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1505 1506 return !after(end_seq, tcp_wnd_end(tp)); 1507 } 1508 1509 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1510 * should be put on the wire right now. If so, it returns the number of 1511 * packets allowed by the congestion window. 1512 */ 1513 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1514 unsigned int cur_mss, int nonagle) 1515 { 1516 const struct tcp_sock *tp = tcp_sk(sk); 1517 unsigned int cwnd_quota; 1518 1519 tcp_init_tso_segs(sk, skb, cur_mss); 1520 1521 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1522 return 0; 1523 1524 cwnd_quota = tcp_cwnd_test(tp, skb); 1525 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1526 cwnd_quota = 0; 1527 1528 return cwnd_quota; 1529 } 1530 1531 /* Test if sending is allowed right now. */ 1532 bool tcp_may_send_now(struct sock *sk) 1533 { 1534 const struct tcp_sock *tp = tcp_sk(sk); 1535 struct sk_buff *skb = tcp_send_head(sk); 1536 1537 return skb && 1538 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1539 (tcp_skb_is_last(sk, skb) ? 1540 tp->nonagle : TCP_NAGLE_PUSH)); 1541 } 1542 1543 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1544 * which is put after SKB on the list. It is very much like 1545 * tcp_fragment() except that it may make several kinds of assumptions 1546 * in order to speed up the splitting operation. In particular, we 1547 * know that all the data is in scatter-gather pages, and that the 1548 * packet has never been sent out before (and thus is not cloned). 1549 */ 1550 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1551 unsigned int mss_now, gfp_t gfp) 1552 { 1553 struct sk_buff *buff; 1554 int nlen = skb->len - len; 1555 u8 flags; 1556 1557 /* All of a TSO frame must be composed of paged data. */ 1558 if (skb->len != skb->data_len) 1559 return tcp_fragment(sk, skb, len, mss_now); 1560 1561 buff = sk_stream_alloc_skb(sk, 0, gfp); 1562 if (unlikely(buff == NULL)) 1563 return -ENOMEM; 1564 1565 sk->sk_wmem_queued += buff->truesize; 1566 sk_mem_charge(sk, buff->truesize); 1567 buff->truesize += nlen; 1568 skb->truesize -= nlen; 1569 1570 /* Correct the sequence numbers. */ 1571 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1572 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1573 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1574 1575 /* PSH and FIN should only be set in the second packet. */ 1576 flags = TCP_SKB_CB(skb)->tcp_flags; 1577 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1578 TCP_SKB_CB(buff)->tcp_flags = flags; 1579 1580 /* This packet was never sent out yet, so no SACK bits. */ 1581 TCP_SKB_CB(buff)->sacked = 0; 1582 1583 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1584 skb_split(skb, buff, len); 1585 1586 /* Fix up tso_factor for both original and new SKB. */ 1587 tcp_set_skb_tso_segs(sk, skb, mss_now); 1588 tcp_set_skb_tso_segs(sk, buff, mss_now); 1589 1590 /* Link BUFF into the send queue. */ 1591 skb_header_release(buff); 1592 tcp_insert_write_queue_after(skb, buff, sk); 1593 1594 return 0; 1595 } 1596 1597 /* Try to defer sending, if possible, in order to minimize the amount 1598 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1599 * 1600 * This algorithm is from John Heffner. 1601 */ 1602 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1603 { 1604 struct tcp_sock *tp = tcp_sk(sk); 1605 const struct inet_connection_sock *icsk = inet_csk(sk); 1606 u32 send_win, cong_win, limit, in_flight; 1607 int win_divisor; 1608 1609 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1610 goto send_now; 1611 1612 if (icsk->icsk_ca_state != TCP_CA_Open) 1613 goto send_now; 1614 1615 /* Defer for less than two clock ticks. */ 1616 if (tp->tso_deferred && 1617 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1618 goto send_now; 1619 1620 in_flight = tcp_packets_in_flight(tp); 1621 1622 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1623 1624 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1625 1626 /* From in_flight test above, we know that cwnd > in_flight. */ 1627 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1628 1629 limit = min(send_win, cong_win); 1630 1631 /* If a full-sized TSO skb can be sent, do it. */ 1632 if (limit >= min_t(unsigned int, sk->sk_gso_max_size, 1633 tp->xmit_size_goal_segs * tp->mss_cache)) 1634 goto send_now; 1635 1636 /* Middle in queue won't get any more data, full sendable already? */ 1637 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1638 goto send_now; 1639 1640 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1641 if (win_divisor) { 1642 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1643 1644 /* If at least some fraction of a window is available, 1645 * just use it. 1646 */ 1647 chunk /= win_divisor; 1648 if (limit >= chunk) 1649 goto send_now; 1650 } else { 1651 /* Different approach, try not to defer past a single 1652 * ACK. Receiver should ACK every other full sized 1653 * frame, so if we have space for more than 3 frames 1654 * then send now. 1655 */ 1656 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1657 goto send_now; 1658 } 1659 1660 /* Ok, it looks like it is advisable to defer. 1661 * Do not rearm the timer if already set to not break TCP ACK clocking. 1662 */ 1663 if (!tp->tso_deferred) 1664 tp->tso_deferred = 1 | (jiffies << 1); 1665 1666 return true; 1667 1668 send_now: 1669 tp->tso_deferred = 0; 1670 return false; 1671 } 1672 1673 /* Create a new MTU probe if we are ready. 1674 * MTU probe is regularly attempting to increase the path MTU by 1675 * deliberately sending larger packets. This discovers routing 1676 * changes resulting in larger path MTUs. 1677 * 1678 * Returns 0 if we should wait to probe (no cwnd available), 1679 * 1 if a probe was sent, 1680 * -1 otherwise 1681 */ 1682 static int tcp_mtu_probe(struct sock *sk) 1683 { 1684 struct tcp_sock *tp = tcp_sk(sk); 1685 struct inet_connection_sock *icsk = inet_csk(sk); 1686 struct sk_buff *skb, *nskb, *next; 1687 int len; 1688 int probe_size; 1689 int size_needed; 1690 int copy; 1691 int mss_now; 1692 1693 /* Not currently probing/verifying, 1694 * not in recovery, 1695 * have enough cwnd, and 1696 * not SACKing (the variable headers throw things off) */ 1697 if (!icsk->icsk_mtup.enabled || 1698 icsk->icsk_mtup.probe_size || 1699 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1700 tp->snd_cwnd < 11 || 1701 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1702 return -1; 1703 1704 /* Very simple search strategy: just double the MSS. */ 1705 mss_now = tcp_current_mss(sk); 1706 probe_size = 2 * tp->mss_cache; 1707 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1708 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1709 /* TODO: set timer for probe_converge_event */ 1710 return -1; 1711 } 1712 1713 /* Have enough data in the send queue to probe? */ 1714 if (tp->write_seq - tp->snd_nxt < size_needed) 1715 return -1; 1716 1717 if (tp->snd_wnd < size_needed) 1718 return -1; 1719 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1720 return 0; 1721 1722 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1723 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1724 if (!tcp_packets_in_flight(tp)) 1725 return -1; 1726 else 1727 return 0; 1728 } 1729 1730 /* We're allowed to probe. Build it now. */ 1731 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1732 return -1; 1733 sk->sk_wmem_queued += nskb->truesize; 1734 sk_mem_charge(sk, nskb->truesize); 1735 1736 skb = tcp_send_head(sk); 1737 1738 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1739 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1740 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 1741 TCP_SKB_CB(nskb)->sacked = 0; 1742 nskb->csum = 0; 1743 nskb->ip_summed = skb->ip_summed; 1744 1745 tcp_insert_write_queue_before(nskb, skb, sk); 1746 1747 len = 0; 1748 tcp_for_write_queue_from_safe(skb, next, sk) { 1749 copy = min_t(int, skb->len, probe_size - len); 1750 if (nskb->ip_summed) 1751 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1752 else 1753 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1754 skb_put(nskb, copy), 1755 copy, nskb->csum); 1756 1757 if (skb->len <= copy) { 1758 /* We've eaten all the data from this skb. 1759 * Throw it away. */ 1760 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1761 tcp_unlink_write_queue(skb, sk); 1762 sk_wmem_free_skb(sk, skb); 1763 } else { 1764 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 1765 ~(TCPHDR_FIN|TCPHDR_PSH); 1766 if (!skb_shinfo(skb)->nr_frags) { 1767 skb_pull(skb, copy); 1768 if (skb->ip_summed != CHECKSUM_PARTIAL) 1769 skb->csum = csum_partial(skb->data, 1770 skb->len, 0); 1771 } else { 1772 __pskb_trim_head(skb, copy); 1773 tcp_set_skb_tso_segs(sk, skb, mss_now); 1774 } 1775 TCP_SKB_CB(skb)->seq += copy; 1776 } 1777 1778 len += copy; 1779 1780 if (len >= probe_size) 1781 break; 1782 } 1783 tcp_init_tso_segs(sk, nskb, nskb->len); 1784 1785 /* We're ready to send. If this fails, the probe will 1786 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1787 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1788 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1789 /* Decrement cwnd here because we are sending 1790 * effectively two packets. */ 1791 tp->snd_cwnd--; 1792 tcp_event_new_data_sent(sk, nskb); 1793 1794 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1795 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1796 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1797 1798 return 1; 1799 } 1800 1801 return -1; 1802 } 1803 1804 /* This routine writes packets to the network. It advances the 1805 * send_head. This happens as incoming acks open up the remote 1806 * window for us. 1807 * 1808 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1809 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1810 * account rare use of URG, this is not a big flaw. 1811 * 1812 * Send at most one packet when push_one > 0. Temporarily ignore 1813 * cwnd limit to force at most one packet out when push_one == 2. 1814 1815 * Returns true, if no segments are in flight and we have queued segments, 1816 * but cannot send anything now because of SWS or another problem. 1817 */ 1818 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1819 int push_one, gfp_t gfp) 1820 { 1821 struct tcp_sock *tp = tcp_sk(sk); 1822 struct sk_buff *skb; 1823 unsigned int tso_segs, sent_pkts; 1824 int cwnd_quota; 1825 int result; 1826 1827 sent_pkts = 0; 1828 1829 if (!push_one) { 1830 /* Do MTU probing. */ 1831 result = tcp_mtu_probe(sk); 1832 if (!result) { 1833 return false; 1834 } else if (result > 0) { 1835 sent_pkts = 1; 1836 } 1837 } 1838 1839 while ((skb = tcp_send_head(sk))) { 1840 unsigned int limit; 1841 1842 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1843 BUG_ON(!tso_segs); 1844 1845 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) 1846 goto repair; /* Skip network transmission */ 1847 1848 cwnd_quota = tcp_cwnd_test(tp, skb); 1849 if (!cwnd_quota) { 1850 if (push_one == 2) 1851 /* Force out a loss probe pkt. */ 1852 cwnd_quota = 1; 1853 else 1854 break; 1855 } 1856 1857 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1858 break; 1859 1860 if (tso_segs == 1) { 1861 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1862 (tcp_skb_is_last(sk, skb) ? 1863 nonagle : TCP_NAGLE_PUSH)))) 1864 break; 1865 } else { 1866 if (!push_one && tcp_tso_should_defer(sk, skb)) 1867 break; 1868 } 1869 1870 /* TCP Small Queues : 1871 * Control number of packets in qdisc/devices to two packets / or ~1 ms. 1872 * This allows for : 1873 * - better RTT estimation and ACK scheduling 1874 * - faster recovery 1875 * - high rates 1876 */ 1877 limit = max(skb->truesize, sk->sk_pacing_rate >> 10); 1878 1879 if (atomic_read(&sk->sk_wmem_alloc) > limit) { 1880 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 1881 break; 1882 } 1883 1884 limit = mss_now; 1885 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1886 limit = tcp_mss_split_point(sk, skb, mss_now, 1887 min_t(unsigned int, 1888 cwnd_quota, 1889 sk->sk_gso_max_segs)); 1890 1891 if (skb->len > limit && 1892 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 1893 break; 1894 1895 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1896 1897 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1898 break; 1899 1900 repair: 1901 /* Advance the send_head. This one is sent out. 1902 * This call will increment packets_out. 1903 */ 1904 tcp_event_new_data_sent(sk, skb); 1905 1906 tcp_minshall_update(tp, mss_now, skb); 1907 sent_pkts += tcp_skb_pcount(skb); 1908 1909 if (push_one) 1910 break; 1911 } 1912 1913 if (likely(sent_pkts)) { 1914 if (tcp_in_cwnd_reduction(sk)) 1915 tp->prr_out += sent_pkts; 1916 1917 /* Send one loss probe per tail loss episode. */ 1918 if (push_one != 2) 1919 tcp_schedule_loss_probe(sk); 1920 tcp_cwnd_validate(sk); 1921 return false; 1922 } 1923 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); 1924 } 1925 1926 bool tcp_schedule_loss_probe(struct sock *sk) 1927 { 1928 struct inet_connection_sock *icsk = inet_csk(sk); 1929 struct tcp_sock *tp = tcp_sk(sk); 1930 u32 timeout, tlp_time_stamp, rto_time_stamp; 1931 u32 rtt = tp->srtt >> 3; 1932 1933 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) 1934 return false; 1935 /* No consecutive loss probes. */ 1936 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 1937 tcp_rearm_rto(sk); 1938 return false; 1939 } 1940 /* Don't do any loss probe on a Fast Open connection before 3WHS 1941 * finishes. 1942 */ 1943 if (sk->sk_state == TCP_SYN_RECV) 1944 return false; 1945 1946 /* TLP is only scheduled when next timer event is RTO. */ 1947 if (icsk->icsk_pending != ICSK_TIME_RETRANS) 1948 return false; 1949 1950 /* Schedule a loss probe in 2*RTT for SACK capable connections 1951 * in Open state, that are either limited by cwnd or application. 1952 */ 1953 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || 1954 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 1955 return false; 1956 1957 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && 1958 tcp_send_head(sk)) 1959 return false; 1960 1961 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account 1962 * for delayed ack when there's one outstanding packet. 1963 */ 1964 timeout = rtt << 1; 1965 if (tp->packets_out == 1) 1966 timeout = max_t(u32, timeout, 1967 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 1968 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 1969 1970 /* If RTO is shorter, just schedule TLP in its place. */ 1971 tlp_time_stamp = tcp_time_stamp + timeout; 1972 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 1973 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 1974 s32 delta = rto_time_stamp - tcp_time_stamp; 1975 if (delta > 0) 1976 timeout = delta; 1977 } 1978 1979 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 1980 TCP_RTO_MAX); 1981 return true; 1982 } 1983 1984 /* When probe timeout (PTO) fires, send a new segment if one exists, else 1985 * retransmit the last segment. 1986 */ 1987 void tcp_send_loss_probe(struct sock *sk) 1988 { 1989 struct tcp_sock *tp = tcp_sk(sk); 1990 struct sk_buff *skb; 1991 int pcount; 1992 int mss = tcp_current_mss(sk); 1993 int err = -1; 1994 1995 if (tcp_send_head(sk) != NULL) { 1996 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 1997 goto rearm_timer; 1998 } 1999 2000 /* At most one outstanding TLP retransmission. */ 2001 if (tp->tlp_high_seq) 2002 goto rearm_timer; 2003 2004 /* Retransmit last segment. */ 2005 skb = tcp_write_queue_tail(sk); 2006 if (WARN_ON(!skb)) 2007 goto rearm_timer; 2008 2009 pcount = tcp_skb_pcount(skb); 2010 if (WARN_ON(!pcount)) 2011 goto rearm_timer; 2012 2013 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 2014 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) 2015 goto rearm_timer; 2016 skb = tcp_write_queue_tail(sk); 2017 } 2018 2019 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 2020 goto rearm_timer; 2021 2022 /* Probe with zero data doesn't trigger fast recovery. */ 2023 if (skb->len > 0) 2024 err = __tcp_retransmit_skb(sk, skb); 2025 2026 /* Record snd_nxt for loss detection. */ 2027 if (likely(!err)) 2028 tp->tlp_high_seq = tp->snd_nxt; 2029 2030 rearm_timer: 2031 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2032 inet_csk(sk)->icsk_rto, 2033 TCP_RTO_MAX); 2034 2035 if (likely(!err)) 2036 NET_INC_STATS_BH(sock_net(sk), 2037 LINUX_MIB_TCPLOSSPROBES); 2038 return; 2039 } 2040 2041 /* Push out any pending frames which were held back due to 2042 * TCP_CORK or attempt at coalescing tiny packets. 2043 * The socket must be locked by the caller. 2044 */ 2045 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 2046 int nonagle) 2047 { 2048 /* If we are closed, the bytes will have to remain here. 2049 * In time closedown will finish, we empty the write queue and 2050 * all will be happy. 2051 */ 2052 if (unlikely(sk->sk_state == TCP_CLOSE)) 2053 return; 2054 2055 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 2056 sk_gfp_atomic(sk, GFP_ATOMIC))) 2057 tcp_check_probe_timer(sk); 2058 } 2059 2060 /* Send _single_ skb sitting at the send head. This function requires 2061 * true push pending frames to setup probe timer etc. 2062 */ 2063 void tcp_push_one(struct sock *sk, unsigned int mss_now) 2064 { 2065 struct sk_buff *skb = tcp_send_head(sk); 2066 2067 BUG_ON(!skb || skb->len < mss_now); 2068 2069 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2070 } 2071 2072 /* This function returns the amount that we can raise the 2073 * usable window based on the following constraints 2074 * 2075 * 1. The window can never be shrunk once it is offered (RFC 793) 2076 * 2. We limit memory per socket 2077 * 2078 * RFC 1122: 2079 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 2080 * RECV.NEXT + RCV.WIN fixed until: 2081 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 2082 * 2083 * i.e. don't raise the right edge of the window until you can raise 2084 * it at least MSS bytes. 2085 * 2086 * Unfortunately, the recommended algorithm breaks header prediction, 2087 * since header prediction assumes th->window stays fixed. 2088 * 2089 * Strictly speaking, keeping th->window fixed violates the receiver 2090 * side SWS prevention criteria. The problem is that under this rule 2091 * a stream of single byte packets will cause the right side of the 2092 * window to always advance by a single byte. 2093 * 2094 * Of course, if the sender implements sender side SWS prevention 2095 * then this will not be a problem. 2096 * 2097 * BSD seems to make the following compromise: 2098 * 2099 * If the free space is less than the 1/4 of the maximum 2100 * space available and the free space is less than 1/2 mss, 2101 * then set the window to 0. 2102 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 2103 * Otherwise, just prevent the window from shrinking 2104 * and from being larger than the largest representable value. 2105 * 2106 * This prevents incremental opening of the window in the regime 2107 * where TCP is limited by the speed of the reader side taking 2108 * data out of the TCP receive queue. It does nothing about 2109 * those cases where the window is constrained on the sender side 2110 * because the pipeline is full. 2111 * 2112 * BSD also seems to "accidentally" limit itself to windows that are a 2113 * multiple of MSS, at least until the free space gets quite small. 2114 * This would appear to be a side effect of the mbuf implementation. 2115 * Combining these two algorithms results in the observed behavior 2116 * of having a fixed window size at almost all times. 2117 * 2118 * Below we obtain similar behavior by forcing the offered window to 2119 * a multiple of the mss when it is feasible to do so. 2120 * 2121 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 2122 * Regular options like TIMESTAMP are taken into account. 2123 */ 2124 u32 __tcp_select_window(struct sock *sk) 2125 { 2126 struct inet_connection_sock *icsk = inet_csk(sk); 2127 struct tcp_sock *tp = tcp_sk(sk); 2128 /* MSS for the peer's data. Previous versions used mss_clamp 2129 * here. I don't know if the value based on our guesses 2130 * of peer's MSS is better for the performance. It's more correct 2131 * but may be worse for the performance because of rcv_mss 2132 * fluctuations. --SAW 1998/11/1 2133 */ 2134 int mss = icsk->icsk_ack.rcv_mss; 2135 int free_space = tcp_space(sk); 2136 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 2137 int window; 2138 2139 if (mss > full_space) 2140 mss = full_space; 2141 2142 if (free_space < (full_space >> 1)) { 2143 icsk->icsk_ack.quick = 0; 2144 2145 if (sk_under_memory_pressure(sk)) 2146 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2147 4U * tp->advmss); 2148 2149 if (free_space < mss) 2150 return 0; 2151 } 2152 2153 if (free_space > tp->rcv_ssthresh) 2154 free_space = tp->rcv_ssthresh; 2155 2156 /* Don't do rounding if we are using window scaling, since the 2157 * scaled window will not line up with the MSS boundary anyway. 2158 */ 2159 window = tp->rcv_wnd; 2160 if (tp->rx_opt.rcv_wscale) { 2161 window = free_space; 2162 2163 /* Advertise enough space so that it won't get scaled away. 2164 * Import case: prevent zero window announcement if 2165 * 1<<rcv_wscale > mss. 2166 */ 2167 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 2168 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 2169 << tp->rx_opt.rcv_wscale); 2170 } else { 2171 /* Get the largest window that is a nice multiple of mss. 2172 * Window clamp already applied above. 2173 * If our current window offering is within 1 mss of the 2174 * free space we just keep it. This prevents the divide 2175 * and multiply from happening most of the time. 2176 * We also don't do any window rounding when the free space 2177 * is too small. 2178 */ 2179 if (window <= free_space - mss || window > free_space) 2180 window = (free_space / mss) * mss; 2181 else if (mss == full_space && 2182 free_space > window + (full_space >> 1)) 2183 window = free_space; 2184 } 2185 2186 return window; 2187 } 2188 2189 /* Collapses two adjacent SKB's during retransmission. */ 2190 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2191 { 2192 struct tcp_sock *tp = tcp_sk(sk); 2193 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 2194 int skb_size, next_skb_size; 2195 2196 skb_size = skb->len; 2197 next_skb_size = next_skb->len; 2198 2199 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 2200 2201 tcp_highest_sack_combine(sk, next_skb, skb); 2202 2203 tcp_unlink_write_queue(next_skb, sk); 2204 2205 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 2206 next_skb_size); 2207 2208 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 2209 skb->ip_summed = CHECKSUM_PARTIAL; 2210 2211 if (skb->ip_summed != CHECKSUM_PARTIAL) 2212 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 2213 2214 /* Update sequence range on original skb. */ 2215 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 2216 2217 /* Merge over control information. This moves PSH/FIN etc. over */ 2218 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 2219 2220 /* All done, get rid of second SKB and account for it so 2221 * packet counting does not break. 2222 */ 2223 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2224 2225 /* changed transmit queue under us so clear hints */ 2226 tcp_clear_retrans_hints_partial(tp); 2227 if (next_skb == tp->retransmit_skb_hint) 2228 tp->retransmit_skb_hint = skb; 2229 2230 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2231 2232 sk_wmem_free_skb(sk, next_skb); 2233 } 2234 2235 /* Check if coalescing SKBs is legal. */ 2236 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2237 { 2238 if (tcp_skb_pcount(skb) > 1) 2239 return false; 2240 /* TODO: SACK collapsing could be used to remove this condition */ 2241 if (skb_shinfo(skb)->nr_frags != 0) 2242 return false; 2243 if (skb_cloned(skb)) 2244 return false; 2245 if (skb == tcp_send_head(sk)) 2246 return false; 2247 /* Some heurestics for collapsing over SACK'd could be invented */ 2248 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2249 return false; 2250 2251 return true; 2252 } 2253 2254 /* Collapse packets in the retransmit queue to make to create 2255 * less packets on the wire. This is only done on retransmission. 2256 */ 2257 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 2258 int space) 2259 { 2260 struct tcp_sock *tp = tcp_sk(sk); 2261 struct sk_buff *skb = to, *tmp; 2262 bool first = true; 2263 2264 if (!sysctl_tcp_retrans_collapse) 2265 return; 2266 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2267 return; 2268 2269 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2270 if (!tcp_can_collapse(sk, skb)) 2271 break; 2272 2273 space -= skb->len; 2274 2275 if (first) { 2276 first = false; 2277 continue; 2278 } 2279 2280 if (space < 0) 2281 break; 2282 /* Punt if not enough space exists in the first SKB for 2283 * the data in the second 2284 */ 2285 if (skb->len > skb_availroom(to)) 2286 break; 2287 2288 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2289 break; 2290 2291 tcp_collapse_retrans(sk, to); 2292 } 2293 } 2294 2295 /* This retransmits one SKB. Policy decisions and retransmit queue 2296 * state updates are done by the caller. Returns non-zero if an 2297 * error occurred which prevented the send. 2298 */ 2299 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2300 { 2301 struct tcp_sock *tp = tcp_sk(sk); 2302 struct inet_connection_sock *icsk = inet_csk(sk); 2303 unsigned int cur_mss; 2304 2305 /* Inconslusive MTU probe */ 2306 if (icsk->icsk_mtup.probe_size) { 2307 icsk->icsk_mtup.probe_size = 0; 2308 } 2309 2310 /* Do not sent more than we queued. 1/4 is reserved for possible 2311 * copying overhead: fragmentation, tunneling, mangling etc. 2312 */ 2313 if (atomic_read(&sk->sk_wmem_alloc) > 2314 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2315 return -EAGAIN; 2316 2317 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2318 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2319 BUG(); 2320 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2321 return -ENOMEM; 2322 } 2323 2324 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 2325 return -EHOSTUNREACH; /* Routing failure or similar. */ 2326 2327 cur_mss = tcp_current_mss(sk); 2328 2329 /* If receiver has shrunk his window, and skb is out of 2330 * new window, do not retransmit it. The exception is the 2331 * case, when window is shrunk to zero. In this case 2332 * our retransmit serves as a zero window probe. 2333 */ 2334 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 2335 TCP_SKB_CB(skb)->seq != tp->snd_una) 2336 return -EAGAIN; 2337 2338 if (skb->len > cur_mss) { 2339 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2340 return -ENOMEM; /* We'll try again later. */ 2341 } else { 2342 int oldpcount = tcp_skb_pcount(skb); 2343 2344 if (unlikely(oldpcount > 1)) { 2345 tcp_init_tso_segs(sk, skb, cur_mss); 2346 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2347 } 2348 } 2349 2350 tcp_retrans_try_collapse(sk, skb, cur_mss); 2351 2352 /* Some Solaris stacks overoptimize and ignore the FIN on a 2353 * retransmit when old data is attached. So strip it off 2354 * since it is cheap to do so and saves bytes on the network. 2355 */ 2356 if (skb->len > 0 && 2357 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 2358 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2359 if (!pskb_trim(skb, 0)) { 2360 /* Reuse, even though it does some unnecessary work */ 2361 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2362 TCP_SKB_CB(skb)->tcp_flags); 2363 skb->ip_summed = CHECKSUM_NONE; 2364 } 2365 } 2366 2367 /* Make a copy, if the first transmission SKB clone we made 2368 * is still in somebody's hands, else make a clone. 2369 */ 2370 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2371 2372 /* make sure skb->data is aligned on arches that require it 2373 * and check if ack-trimming & collapsing extended the headroom 2374 * beyond what csum_start can cover. 2375 */ 2376 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2377 skb_headroom(skb) >= 0xFFFF)) { 2378 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2379 GFP_ATOMIC); 2380 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2381 -ENOBUFS; 2382 } else { 2383 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2384 } 2385 } 2386 2387 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2388 { 2389 struct tcp_sock *tp = tcp_sk(sk); 2390 int err = __tcp_retransmit_skb(sk, skb); 2391 2392 if (err == 0) { 2393 /* Update global TCP statistics. */ 2394 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2395 2396 tp->total_retrans++; 2397 2398 #if FASTRETRANS_DEBUG > 0 2399 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2400 net_dbg_ratelimited("retrans_out leaked\n"); 2401 } 2402 #endif 2403 if (!tp->retrans_out) 2404 tp->lost_retrans_low = tp->snd_nxt; 2405 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2406 tp->retrans_out += tcp_skb_pcount(skb); 2407 2408 /* Save stamp of the first retransmit. */ 2409 if (!tp->retrans_stamp) 2410 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2411 2412 tp->undo_retrans += tcp_skb_pcount(skb); 2413 2414 /* snd_nxt is stored to detect loss of retransmitted segment, 2415 * see tcp_input.c tcp_sacktag_write_queue(). 2416 */ 2417 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2418 } else { 2419 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2420 } 2421 return err; 2422 } 2423 2424 /* Check if we forward retransmits are possible in the current 2425 * window/congestion state. 2426 */ 2427 static bool tcp_can_forward_retransmit(struct sock *sk) 2428 { 2429 const struct inet_connection_sock *icsk = inet_csk(sk); 2430 const struct tcp_sock *tp = tcp_sk(sk); 2431 2432 /* Forward retransmissions are possible only during Recovery. */ 2433 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2434 return false; 2435 2436 /* No forward retransmissions in Reno are possible. */ 2437 if (tcp_is_reno(tp)) 2438 return false; 2439 2440 /* Yeah, we have to make difficult choice between forward transmission 2441 * and retransmission... Both ways have their merits... 2442 * 2443 * For now we do not retransmit anything, while we have some new 2444 * segments to send. In the other cases, follow rule 3 for 2445 * NextSeg() specified in RFC3517. 2446 */ 2447 2448 if (tcp_may_send_now(sk)) 2449 return false; 2450 2451 return true; 2452 } 2453 2454 /* This gets called after a retransmit timeout, and the initially 2455 * retransmitted data is acknowledged. It tries to continue 2456 * resending the rest of the retransmit queue, until either 2457 * we've sent it all or the congestion window limit is reached. 2458 * If doing SACK, the first ACK which comes back for a timeout 2459 * based retransmit packet might feed us FACK information again. 2460 * If so, we use it to avoid unnecessarily retransmissions. 2461 */ 2462 void tcp_xmit_retransmit_queue(struct sock *sk) 2463 { 2464 const struct inet_connection_sock *icsk = inet_csk(sk); 2465 struct tcp_sock *tp = tcp_sk(sk); 2466 struct sk_buff *skb; 2467 struct sk_buff *hole = NULL; 2468 u32 last_lost; 2469 int mib_idx; 2470 int fwd_rexmitting = 0; 2471 2472 if (!tp->packets_out) 2473 return; 2474 2475 if (!tp->lost_out) 2476 tp->retransmit_high = tp->snd_una; 2477 2478 if (tp->retransmit_skb_hint) { 2479 skb = tp->retransmit_skb_hint; 2480 last_lost = TCP_SKB_CB(skb)->end_seq; 2481 if (after(last_lost, tp->retransmit_high)) 2482 last_lost = tp->retransmit_high; 2483 } else { 2484 skb = tcp_write_queue_head(sk); 2485 last_lost = tp->snd_una; 2486 } 2487 2488 tcp_for_write_queue_from(skb, sk) { 2489 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2490 2491 if (skb == tcp_send_head(sk)) 2492 break; 2493 /* we could do better than to assign each time */ 2494 if (hole == NULL) 2495 tp->retransmit_skb_hint = skb; 2496 2497 /* Assume this retransmit will generate 2498 * only one packet for congestion window 2499 * calculation purposes. This works because 2500 * tcp_retransmit_skb() will chop up the 2501 * packet to be MSS sized and all the 2502 * packet counting works out. 2503 */ 2504 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2505 return; 2506 2507 if (fwd_rexmitting) { 2508 begin_fwd: 2509 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2510 break; 2511 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2512 2513 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2514 tp->retransmit_high = last_lost; 2515 if (!tcp_can_forward_retransmit(sk)) 2516 break; 2517 /* Backtrack if necessary to non-L'ed skb */ 2518 if (hole != NULL) { 2519 skb = hole; 2520 hole = NULL; 2521 } 2522 fwd_rexmitting = 1; 2523 goto begin_fwd; 2524 2525 } else if (!(sacked & TCPCB_LOST)) { 2526 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2527 hole = skb; 2528 continue; 2529 2530 } else { 2531 last_lost = TCP_SKB_CB(skb)->end_seq; 2532 if (icsk->icsk_ca_state != TCP_CA_Loss) 2533 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2534 else 2535 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2536 } 2537 2538 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2539 continue; 2540 2541 if (tcp_retransmit_skb(sk, skb)) 2542 return; 2543 2544 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2545 2546 if (tcp_in_cwnd_reduction(sk)) 2547 tp->prr_out += tcp_skb_pcount(skb); 2548 2549 if (skb == tcp_write_queue_head(sk)) 2550 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2551 inet_csk(sk)->icsk_rto, 2552 TCP_RTO_MAX); 2553 } 2554 } 2555 2556 /* Send a fin. The caller locks the socket for us. This cannot be 2557 * allowed to fail queueing a FIN frame under any circumstances. 2558 */ 2559 void tcp_send_fin(struct sock *sk) 2560 { 2561 struct tcp_sock *tp = tcp_sk(sk); 2562 struct sk_buff *skb = tcp_write_queue_tail(sk); 2563 int mss_now; 2564 2565 /* Optimization, tack on the FIN if we have a queue of 2566 * unsent frames. But be careful about outgoing SACKS 2567 * and IP options. 2568 */ 2569 mss_now = tcp_current_mss(sk); 2570 2571 if (tcp_send_head(sk) != NULL) { 2572 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; 2573 TCP_SKB_CB(skb)->end_seq++; 2574 tp->write_seq++; 2575 } else { 2576 /* Socket is locked, keep trying until memory is available. */ 2577 for (;;) { 2578 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2579 sk->sk_allocation); 2580 if (skb) 2581 break; 2582 yield(); 2583 } 2584 2585 /* Reserve space for headers and prepare control bits. */ 2586 skb_reserve(skb, MAX_TCP_HEADER); 2587 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2588 tcp_init_nondata_skb(skb, tp->write_seq, 2589 TCPHDR_ACK | TCPHDR_FIN); 2590 tcp_queue_skb(sk, skb); 2591 } 2592 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2593 } 2594 2595 /* We get here when a process closes a file descriptor (either due to 2596 * an explicit close() or as a byproduct of exit()'ing) and there 2597 * was unread data in the receive queue. This behavior is recommended 2598 * by RFC 2525, section 2.17. -DaveM 2599 */ 2600 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2601 { 2602 struct sk_buff *skb; 2603 2604 /* NOTE: No TCP options attached and we never retransmit this. */ 2605 skb = alloc_skb(MAX_TCP_HEADER, priority); 2606 if (!skb) { 2607 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2608 return; 2609 } 2610 2611 /* Reserve space for headers and prepare control bits. */ 2612 skb_reserve(skb, MAX_TCP_HEADER); 2613 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2614 TCPHDR_ACK | TCPHDR_RST); 2615 /* Send it off. */ 2616 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2617 if (tcp_transmit_skb(sk, skb, 0, priority)) 2618 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2619 2620 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2621 } 2622 2623 /* Send a crossed SYN-ACK during socket establishment. 2624 * WARNING: This routine must only be called when we have already sent 2625 * a SYN packet that crossed the incoming SYN that caused this routine 2626 * to get called. If this assumption fails then the initial rcv_wnd 2627 * and rcv_wscale values will not be correct. 2628 */ 2629 int tcp_send_synack(struct sock *sk) 2630 { 2631 struct sk_buff *skb; 2632 2633 skb = tcp_write_queue_head(sk); 2634 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2635 pr_debug("%s: wrong queue state\n", __func__); 2636 return -EFAULT; 2637 } 2638 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 2639 if (skb_cloned(skb)) { 2640 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2641 if (nskb == NULL) 2642 return -ENOMEM; 2643 tcp_unlink_write_queue(skb, sk); 2644 skb_header_release(nskb); 2645 __tcp_add_write_queue_head(sk, nskb); 2646 sk_wmem_free_skb(sk, skb); 2647 sk->sk_wmem_queued += nskb->truesize; 2648 sk_mem_charge(sk, nskb->truesize); 2649 skb = nskb; 2650 } 2651 2652 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2653 TCP_ECN_send_synack(tcp_sk(sk), skb); 2654 } 2655 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2656 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2657 } 2658 2659 /** 2660 * tcp_make_synack - Prepare a SYN-ACK. 2661 * sk: listener socket 2662 * dst: dst entry attached to the SYNACK 2663 * req: request_sock pointer 2664 * 2665 * Allocate one skb and build a SYNACK packet. 2666 * @dst is consumed : Caller should not use it again. 2667 */ 2668 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2669 struct request_sock *req, 2670 struct tcp_fastopen_cookie *foc) 2671 { 2672 struct tcp_out_options opts; 2673 struct inet_request_sock *ireq = inet_rsk(req); 2674 struct tcp_sock *tp = tcp_sk(sk); 2675 struct tcphdr *th; 2676 struct sk_buff *skb; 2677 struct tcp_md5sig_key *md5; 2678 int tcp_header_size; 2679 int mss; 2680 2681 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2682 if (unlikely(!skb)) { 2683 dst_release(dst); 2684 return NULL; 2685 } 2686 /* Reserve space for headers. */ 2687 skb_reserve(skb, MAX_TCP_HEADER); 2688 2689 skb_dst_set(skb, dst); 2690 security_skb_owned_by(skb, sk); 2691 2692 mss = dst_metric_advmss(dst); 2693 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2694 mss = tp->rx_opt.user_mss; 2695 2696 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2697 __u8 rcv_wscale; 2698 /* Set this up on the first call only */ 2699 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2700 2701 /* limit the window selection if the user enforce a smaller rx buffer */ 2702 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2703 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 2704 req->window_clamp = tcp_full_space(sk); 2705 2706 /* tcp_full_space because it is guaranteed to be the first packet */ 2707 tcp_select_initial_window(tcp_full_space(sk), 2708 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2709 &req->rcv_wnd, 2710 &req->window_clamp, 2711 ireq->wscale_ok, 2712 &rcv_wscale, 2713 dst_metric(dst, RTAX_INITRWND)); 2714 ireq->rcv_wscale = rcv_wscale; 2715 } 2716 2717 memset(&opts, 0, sizeof(opts)); 2718 #ifdef CONFIG_SYN_COOKIES 2719 if (unlikely(req->cookie_ts)) 2720 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2721 else 2722 #endif 2723 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2724 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 2725 foc) + sizeof(*th); 2726 2727 skb_push(skb, tcp_header_size); 2728 skb_reset_transport_header(skb); 2729 2730 th = tcp_hdr(skb); 2731 memset(th, 0, sizeof(struct tcphdr)); 2732 th->syn = 1; 2733 th->ack = 1; 2734 TCP_ECN_make_synack(req, th); 2735 th->source = ireq->loc_port; 2736 th->dest = ireq->rmt_port; 2737 /* Setting of flags are superfluous here for callers (and ECE is 2738 * not even correctly set) 2739 */ 2740 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2741 TCPHDR_SYN | TCPHDR_ACK); 2742 2743 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2744 /* XXX data is queued and acked as is. No buffer/window check */ 2745 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 2746 2747 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2748 th->window = htons(min(req->rcv_wnd, 65535U)); 2749 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2750 th->doff = (tcp_header_size >> 2); 2751 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2752 2753 #ifdef CONFIG_TCP_MD5SIG 2754 /* Okay, we have all we need - do the md5 hash if needed */ 2755 if (md5) { 2756 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 2757 md5, NULL, req, skb); 2758 } 2759 #endif 2760 2761 return skb; 2762 } 2763 EXPORT_SYMBOL(tcp_make_synack); 2764 2765 /* Do all connect socket setups that can be done AF independent. */ 2766 void tcp_connect_init(struct sock *sk) 2767 { 2768 const struct dst_entry *dst = __sk_dst_get(sk); 2769 struct tcp_sock *tp = tcp_sk(sk); 2770 __u8 rcv_wscale; 2771 2772 /* We'll fix this up when we get a response from the other end. 2773 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2774 */ 2775 tp->tcp_header_len = sizeof(struct tcphdr) + 2776 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2777 2778 #ifdef CONFIG_TCP_MD5SIG 2779 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2780 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2781 #endif 2782 2783 /* If user gave his TCP_MAXSEG, record it to clamp */ 2784 if (tp->rx_opt.user_mss) 2785 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2786 tp->max_window = 0; 2787 tcp_mtup_init(sk); 2788 tcp_sync_mss(sk, dst_mtu(dst)); 2789 2790 if (!tp->window_clamp) 2791 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2792 tp->advmss = dst_metric_advmss(dst); 2793 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2794 tp->advmss = tp->rx_opt.user_mss; 2795 2796 tcp_initialize_rcv_mss(sk); 2797 2798 /* limit the window selection if the user enforce a smaller rx buffer */ 2799 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2800 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 2801 tp->window_clamp = tcp_full_space(sk); 2802 2803 tcp_select_initial_window(tcp_full_space(sk), 2804 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2805 &tp->rcv_wnd, 2806 &tp->window_clamp, 2807 sysctl_tcp_window_scaling, 2808 &rcv_wscale, 2809 dst_metric(dst, RTAX_INITRWND)); 2810 2811 tp->rx_opt.rcv_wscale = rcv_wscale; 2812 tp->rcv_ssthresh = tp->rcv_wnd; 2813 2814 sk->sk_err = 0; 2815 sock_reset_flag(sk, SOCK_DONE); 2816 tp->snd_wnd = 0; 2817 tcp_init_wl(tp, 0); 2818 tp->snd_una = tp->write_seq; 2819 tp->snd_sml = tp->write_seq; 2820 tp->snd_up = tp->write_seq; 2821 tp->snd_nxt = tp->write_seq; 2822 2823 if (likely(!tp->repair)) 2824 tp->rcv_nxt = 0; 2825 else 2826 tp->rcv_tstamp = tcp_time_stamp; 2827 tp->rcv_wup = tp->rcv_nxt; 2828 tp->copied_seq = tp->rcv_nxt; 2829 2830 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2831 inet_csk(sk)->icsk_retransmits = 0; 2832 tcp_clear_retrans(tp); 2833 } 2834 2835 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 2836 { 2837 struct tcp_sock *tp = tcp_sk(sk); 2838 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 2839 2840 tcb->end_seq += skb->len; 2841 skb_header_release(skb); 2842 __tcp_add_write_queue_tail(sk, skb); 2843 sk->sk_wmem_queued += skb->truesize; 2844 sk_mem_charge(sk, skb->truesize); 2845 tp->write_seq = tcb->end_seq; 2846 tp->packets_out += tcp_skb_pcount(skb); 2847 } 2848 2849 /* Build and send a SYN with data and (cached) Fast Open cookie. However, 2850 * queue a data-only packet after the regular SYN, such that regular SYNs 2851 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 2852 * only the SYN sequence, the data are retransmitted in the first ACK. 2853 * If cookie is not cached or other error occurs, falls back to send a 2854 * regular SYN with Fast Open cookie request option. 2855 */ 2856 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 2857 { 2858 struct tcp_sock *tp = tcp_sk(sk); 2859 struct tcp_fastopen_request *fo = tp->fastopen_req; 2860 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; 2861 struct sk_buff *syn_data = NULL, *data; 2862 unsigned long last_syn_loss = 0; 2863 2864 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 2865 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, 2866 &syn_loss, &last_syn_loss); 2867 /* Recurring FO SYN losses: revert to regular handshake temporarily */ 2868 if (syn_loss > 1 && 2869 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { 2870 fo->cookie.len = -1; 2871 goto fallback; 2872 } 2873 2874 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) 2875 fo->cookie.len = -1; 2876 else if (fo->cookie.len <= 0) 2877 goto fallback; 2878 2879 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 2880 * user-MSS. Reserve maximum option space for middleboxes that add 2881 * private TCP options. The cost is reduced data space in SYN :( 2882 */ 2883 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 2884 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2885 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 2886 MAX_TCP_OPTION_SPACE; 2887 2888 syn_data = skb_copy_expand(syn, skb_headroom(syn), space, 2889 sk->sk_allocation); 2890 if (syn_data == NULL) 2891 goto fallback; 2892 2893 for (i = 0; i < iovlen && syn_data->len < space; ++i) { 2894 struct iovec *iov = &fo->data->msg_iov[i]; 2895 unsigned char __user *from = iov->iov_base; 2896 int len = iov->iov_len; 2897 2898 if (syn_data->len + len > space) 2899 len = space - syn_data->len; 2900 else if (i + 1 == iovlen) 2901 /* No more data pending in inet_wait_for_connect() */ 2902 fo->data = NULL; 2903 2904 if (skb_add_data(syn_data, from, len)) 2905 goto fallback; 2906 } 2907 2908 /* Queue a data-only packet after the regular SYN for retransmission */ 2909 data = pskb_copy(syn_data, sk->sk_allocation); 2910 if (data == NULL) 2911 goto fallback; 2912 TCP_SKB_CB(data)->seq++; 2913 TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; 2914 TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); 2915 tcp_connect_queue_skb(sk, data); 2916 fo->copied = data->len; 2917 2918 if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { 2919 tp->syn_data = (fo->copied > 0); 2920 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 2921 goto done; 2922 } 2923 syn_data = NULL; 2924 2925 fallback: 2926 /* Send a regular SYN with Fast Open cookie request option */ 2927 if (fo->cookie.len > 0) 2928 fo->cookie.len = 0; 2929 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 2930 if (err) 2931 tp->syn_fastopen = 0; 2932 kfree_skb(syn_data); 2933 done: 2934 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 2935 return err; 2936 } 2937 2938 /* Build a SYN and send it off. */ 2939 int tcp_connect(struct sock *sk) 2940 { 2941 struct tcp_sock *tp = tcp_sk(sk); 2942 struct sk_buff *buff; 2943 int err; 2944 2945 tcp_connect_init(sk); 2946 2947 if (unlikely(tp->repair)) { 2948 tcp_finish_connect(sk, NULL); 2949 return 0; 2950 } 2951 2952 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2953 if (unlikely(buff == NULL)) 2954 return -ENOBUFS; 2955 2956 /* Reserve space for headers. */ 2957 skb_reserve(buff, MAX_TCP_HEADER); 2958 2959 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2960 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; 2961 tcp_connect_queue_skb(sk, buff); 2962 TCP_ECN_send_syn(sk, buff); 2963 2964 /* Send off SYN; include data in Fast Open. */ 2965 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 2966 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2967 if (err == -ECONNREFUSED) 2968 return err; 2969 2970 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2971 * in order to make this packet get counted in tcpOutSegs. 2972 */ 2973 tp->snd_nxt = tp->write_seq; 2974 tp->pushed_seq = tp->write_seq; 2975 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2976 2977 /* Timer for repeating the SYN until an answer. */ 2978 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2979 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2980 return 0; 2981 } 2982 EXPORT_SYMBOL(tcp_connect); 2983 2984 /* Send out a delayed ack, the caller does the policy checking 2985 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2986 * for details. 2987 */ 2988 void tcp_send_delayed_ack(struct sock *sk) 2989 { 2990 struct inet_connection_sock *icsk = inet_csk(sk); 2991 int ato = icsk->icsk_ack.ato; 2992 unsigned long timeout; 2993 2994 if (ato > TCP_DELACK_MIN) { 2995 const struct tcp_sock *tp = tcp_sk(sk); 2996 int max_ato = HZ / 2; 2997 2998 if (icsk->icsk_ack.pingpong || 2999 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 3000 max_ato = TCP_DELACK_MAX; 3001 3002 /* Slow path, intersegment interval is "high". */ 3003 3004 /* If some rtt estimate is known, use it to bound delayed ack. 3005 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 3006 * directly. 3007 */ 3008 if (tp->srtt) { 3009 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 3010 3011 if (rtt < max_ato) 3012 max_ato = rtt; 3013 } 3014 3015 ato = min(ato, max_ato); 3016 } 3017 3018 /* Stay within the limit we were given */ 3019 timeout = jiffies + ato; 3020 3021 /* Use new timeout only if there wasn't a older one earlier. */ 3022 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 3023 /* If delack timer was blocked or is about to expire, 3024 * send ACK now. 3025 */ 3026 if (icsk->icsk_ack.blocked || 3027 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 3028 tcp_send_ack(sk); 3029 return; 3030 } 3031 3032 if (!time_before(timeout, icsk->icsk_ack.timeout)) 3033 timeout = icsk->icsk_ack.timeout; 3034 } 3035 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3036 icsk->icsk_ack.timeout = timeout; 3037 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 3038 } 3039 3040 /* This routine sends an ack and also updates the window. */ 3041 void tcp_send_ack(struct sock *sk) 3042 { 3043 struct sk_buff *buff; 3044 3045 /* If we have been reset, we may not send again. */ 3046 if (sk->sk_state == TCP_CLOSE) 3047 return; 3048 3049 /* We are not putting this on the write queue, so 3050 * tcp_transmit_skb() will set the ownership to this 3051 * sock. 3052 */ 3053 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 3054 if (buff == NULL) { 3055 inet_csk_schedule_ack(sk); 3056 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 3057 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 3058 TCP_DELACK_MAX, TCP_RTO_MAX); 3059 return; 3060 } 3061 3062 /* Reserve space for headers and prepare control bits. */ 3063 skb_reserve(buff, MAX_TCP_HEADER); 3064 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3065 3066 /* Send it off, this clears delayed acks for us. */ 3067 TCP_SKB_CB(buff)->when = tcp_time_stamp; 3068 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3069 } 3070 3071 /* This routine sends a packet with an out of date sequence 3072 * number. It assumes the other end will try to ack it. 3073 * 3074 * Question: what should we make while urgent mode? 3075 * 4.4BSD forces sending single byte of data. We cannot send 3076 * out of window data, because we have SND.NXT==SND.MAX... 3077 * 3078 * Current solution: to send TWO zero-length segments in urgent mode: 3079 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 3080 * out-of-date with SND.UNA-1 to probe window. 3081 */ 3082 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 3083 { 3084 struct tcp_sock *tp = tcp_sk(sk); 3085 struct sk_buff *skb; 3086 3087 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 3088 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); 3089 if (skb == NULL) 3090 return -1; 3091 3092 /* Reserve space for headers and set control bits. */ 3093 skb_reserve(skb, MAX_TCP_HEADER); 3094 /* Use a previous sequence. This should cause the other 3095 * end to send an ack. Don't queue or clone SKB, just 3096 * send it. 3097 */ 3098 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3099 TCP_SKB_CB(skb)->when = tcp_time_stamp; 3100 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3101 } 3102 3103 void tcp_send_window_probe(struct sock *sk) 3104 { 3105 if (sk->sk_state == TCP_ESTABLISHED) { 3106 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3107 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; 3108 tcp_xmit_probe_skb(sk, 0); 3109 } 3110 } 3111 3112 /* Initiate keepalive or window probe from timer. */ 3113 int tcp_write_wakeup(struct sock *sk) 3114 { 3115 struct tcp_sock *tp = tcp_sk(sk); 3116 struct sk_buff *skb; 3117 3118 if (sk->sk_state == TCP_CLOSE) 3119 return -1; 3120 3121 if ((skb = tcp_send_head(sk)) != NULL && 3122 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 3123 int err; 3124 unsigned int mss = tcp_current_mss(sk); 3125 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 3126 3127 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 3128 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 3129 3130 /* We are probing the opening of a window 3131 * but the window size is != 0 3132 * must have been a result SWS avoidance ( sender ) 3133 */ 3134 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 3135 skb->len > mss) { 3136 seg_size = min(seg_size, mss); 3137 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3138 if (tcp_fragment(sk, skb, seg_size, mss)) 3139 return -1; 3140 } else if (!tcp_skb_pcount(skb)) 3141 tcp_set_skb_tso_segs(sk, skb, mss); 3142 3143 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3144 TCP_SKB_CB(skb)->when = tcp_time_stamp; 3145 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3146 if (!err) 3147 tcp_event_new_data_sent(sk, skb); 3148 return err; 3149 } else { 3150 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3151 tcp_xmit_probe_skb(sk, 1); 3152 return tcp_xmit_probe_skb(sk, 0); 3153 } 3154 } 3155 3156 /* A window probe timeout has occurred. If window is not closed send 3157 * a partial packet else a zero probe. 3158 */ 3159 void tcp_send_probe0(struct sock *sk) 3160 { 3161 struct inet_connection_sock *icsk = inet_csk(sk); 3162 struct tcp_sock *tp = tcp_sk(sk); 3163 int err; 3164 3165 err = tcp_write_wakeup(sk); 3166 3167 if (tp->packets_out || !tcp_send_head(sk)) { 3168 /* Cancel probe timer, if it is not required. */ 3169 icsk->icsk_probes_out = 0; 3170 icsk->icsk_backoff = 0; 3171 return; 3172 } 3173 3174 if (err <= 0) { 3175 if (icsk->icsk_backoff < sysctl_tcp_retries2) 3176 icsk->icsk_backoff++; 3177 icsk->icsk_probes_out++; 3178 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3179 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3180 TCP_RTO_MAX); 3181 } else { 3182 /* If packet was not sent due to local congestion, 3183 * do not backoff and do not remember icsk_probes_out. 3184 * Let local senders to fight for local resources. 3185 * 3186 * Use accumulated backoff yet. 3187 */ 3188 if (!icsk->icsk_probes_out) 3189 icsk->icsk_probes_out = 1; 3190 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3191 min(icsk->icsk_rto << icsk->icsk_backoff, 3192 TCP_RESOURCE_PROBE_INTERVAL), 3193 TCP_RTO_MAX); 3194 } 3195 } 3196