1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 /* 23 * Changes: Pedro Roque : Retransmit queue handled by TCP. 24 * : Fragmentation on mtu decrease 25 * : Segment collapse on retransmit 26 * : AF independence 27 * 28 * Linus Torvalds : send_delayed_ack 29 * David S. Miller : Charge memory using the right skb 30 * during syn/ack processing. 31 * David S. Miller : Output engine completely rewritten. 32 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 33 * Cacophonix Gaul : draft-minshall-nagle-01 34 * J Hadi Salim : ECN support 35 * 36 */ 37 38 #define pr_fmt(fmt) "TCP: " fmt 39 40 #include <net/tcp.h> 41 #include <net/tcp_ecn.h> 42 #include <net/mptcp.h> 43 #include <net/smc.h> 44 #include <net/proto_memory.h> 45 #include <net/psp.h> 46 47 #include <linux/compiler.h> 48 #include <linux/gfp.h> 49 #include <linux/module.h> 50 #include <linux/static_key.h> 51 #include <linux/skbuff_ref.h> 52 53 #include <trace/events/tcp.h> 54 55 /* Refresh clocks of a TCP socket, 56 * ensuring monotically increasing values. 57 */ 58 void tcp_mstamp_refresh(struct tcp_sock *tp) 59 { 60 u64 val = tcp_clock_ns(); 61 62 tp->tcp_clock_cache = val; 63 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); 64 } 65 66 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 67 int push_one, gfp_t gfp); 68 69 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */ 70 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) 71 { 72 struct rb_node **p = &root->rb_node; 73 struct rb_node *parent = NULL; 74 struct sk_buff *skb1; 75 76 while (*p) { 77 parent = *p; 78 skb1 = rb_to_skb(parent); 79 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) 80 p = &parent->rb_left; 81 else 82 p = &parent->rb_right; 83 } 84 rb_link_node(&skb->rbnode, parent, p); 85 rb_insert_color(&skb->rbnode, root); 86 } 87 88 /* Account for new data that has been sent to the network. */ 89 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 90 { 91 struct inet_connection_sock *icsk = inet_csk(sk); 92 struct tcp_sock *tp = tcp_sk(sk); 93 unsigned int prior_packets = tp->packets_out; 94 95 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); 96 97 __skb_unlink(skb, &sk->sk_write_queue); 98 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); 99 100 if (tp->highest_sack == NULL) 101 tp->highest_sack = skb; 102 103 tp->packets_out += tcp_skb_pcount(skb); 104 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 105 tcp_rearm_rto(sk); 106 107 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, 108 tcp_skb_pcount(skb)); 109 tcp_check_space(sk); 110 } 111 112 /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one 113 * window scaling factor due to loss of precision. 114 * If window has been shrunk, what should we make? It is not clear at all. 115 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 116 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 117 * invalid. OK, let's make this for now: 118 */ 119 static inline __u32 tcp_acceptable_seq(const struct sock *sk) 120 { 121 const struct tcp_sock *tp = tcp_sk(sk); 122 123 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || 124 (tp->rx_opt.wscale_ok && 125 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) 126 return tp->snd_nxt; 127 else 128 return tcp_wnd_end(tp); 129 } 130 131 /* Calculate mss to advertise in SYN segment. 132 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 133 * 134 * 1. It is independent of path mtu. 135 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 136 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 137 * attached devices, because some buggy hosts are confused by 138 * large MSS. 139 * 4. We do not make 3, we advertise MSS, calculated from first 140 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 141 * This may be overridden via information stored in routing table. 142 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 143 * probably even Jumbo". 144 */ 145 static __u16 tcp_advertise_mss(struct sock *sk) 146 { 147 struct tcp_sock *tp = tcp_sk(sk); 148 const struct dst_entry *dst = __sk_dst_get(sk); 149 int mss = tp->advmss; 150 151 if (dst) { 152 unsigned int metric = dst_metric_advmss(dst); 153 154 if (metric < mss) { 155 mss = metric; 156 tp->advmss = mss; 157 } 158 } 159 160 return (__u16)mss; 161 } 162 163 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 164 * This is the first part of cwnd validation mechanism. 165 */ 166 void tcp_cwnd_restart(struct sock *sk, s32 delta) 167 { 168 struct tcp_sock *tp = tcp_sk(sk); 169 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 170 u32 cwnd = tcp_snd_cwnd(tp); 171 172 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 173 174 WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk)); 175 restart_cwnd = min(restart_cwnd, cwnd); 176 177 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 178 cwnd >>= 1; 179 tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd)); 180 tp->snd_cwnd_stamp = tcp_jiffies32; 181 tp->snd_cwnd_used = 0; 182 } 183 184 /* Congestion state accounting after a packet has been sent. */ 185 static void tcp_event_data_sent(struct tcp_sock *tp, 186 struct sock *sk) 187 { 188 struct inet_connection_sock *icsk = inet_csk(sk); 189 const u32 now = tcp_jiffies32; 190 191 if (tcp_packets_in_flight(tp) == 0) 192 tcp_ca_event(sk, CA_EVENT_TX_START); 193 194 tp->lsndtime = now; 195 196 /* If it is a reply for ato after last received 197 * packet, increase pingpong count. 198 */ 199 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 200 inet_csk_inc_pingpong_cnt(sk); 201 } 202 203 /* Account for an ACK we sent. */ 204 static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt) 205 { 206 struct tcp_sock *tp = tcp_sk(sk); 207 208 if (unlikely(tp->compressed_ack)) { 209 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, 210 tp->compressed_ack); 211 tp->compressed_ack = 0; 212 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 213 __sock_put(sk); 214 } 215 216 if (unlikely(rcv_nxt != tp->rcv_nxt)) 217 return; /* Special ACK sent by DCTCP to reflect ECN */ 218 tcp_dec_quickack_mode(sk); 219 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 220 } 221 222 /* Determine a window scaling and initial window to offer. 223 * Based on the assumption that the given amount of space 224 * will be offered. Store the results in the tp structure. 225 * NOTE: for smooth operation initial space offering should 226 * be a multiple of mss if possible. We assume here that mss >= 1. 227 * This MUST be enforced by all callers. 228 */ 229 void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, 230 __u32 *rcv_wnd, __u32 *__window_clamp, 231 int wscale_ok, __u8 *rcv_wscale, 232 __u32 init_rcv_wnd) 233 { 234 unsigned int space = (__space < 0 ? 0 : __space); 235 u32 window_clamp = READ_ONCE(*__window_clamp); 236 237 /* If no clamp set the clamp to the max possible scaled window */ 238 if (window_clamp == 0) 239 window_clamp = (U16_MAX << TCP_MAX_WSCALE); 240 space = min(window_clamp, space); 241 242 /* Quantize space offering to a multiple of mss if possible. */ 243 if (space > mss) 244 space = rounddown(space, mss); 245 246 /* NOTE: offering an initial window larger than 32767 247 * will break some buggy TCP stacks. If the admin tells us 248 * it is likely we could be speaking with such a buggy stack 249 * we will truncate our initial window offering to 32K-1 250 * unless the remote has sent us a window scaling option, 251 * which we interpret as a sign the remote TCP is not 252 * misinterpreting the window field as a signed quantity. 253 */ 254 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) 255 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 256 else 257 (*rcv_wnd) = space; 258 259 if (init_rcv_wnd) 260 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 261 262 *rcv_wscale = 0; 263 if (wscale_ok) { 264 /* Set window scaling on max possible window */ 265 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 266 space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); 267 space = min_t(u32, space, window_clamp); 268 *rcv_wscale = clamp_t(int, ilog2(space) - 15, 269 0, TCP_MAX_WSCALE); 270 } 271 /* Set the clamp no higher than max representable value */ 272 WRITE_ONCE(*__window_clamp, 273 min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp)); 274 } 275 276 /* Chose a new window to advertise, update state in tcp_sock for the 277 * socket, and return result with RFC1323 scaling applied. The return 278 * value can be stuffed directly into th->window for an outgoing 279 * frame. 280 */ 281 static u16 tcp_select_window(struct sock *sk) 282 { 283 struct tcp_sock *tp = tcp_sk(sk); 284 struct net *net = sock_net(sk); 285 u32 old_win = tp->rcv_wnd; 286 u32 cur_win, new_win; 287 288 /* Make the window 0 if we failed to queue the data because we 289 * are out of memory. 290 */ 291 if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) { 292 tp->pred_flags = 0; 293 tp->rcv_wnd = 0; 294 tp->rcv_wup = tp->rcv_nxt; 295 tcp_update_max_rcv_wnd_seq(tp); 296 return 0; 297 } 298 299 cur_win = tcp_receive_window(tp); 300 new_win = __tcp_select_window(sk); 301 if (new_win < cur_win) { 302 /* Danger Will Robinson! 303 * Don't update rcv_wup/rcv_wnd here or else 304 * we will not be able to advertise a zero 305 * window in time. --DaveM 306 * 307 * Relax Will Robinson. 308 */ 309 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { 310 /* Never shrink the offered window */ 311 if (new_win == 0) 312 NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV); 313 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 314 } 315 } 316 317 tp->rcv_wnd = new_win; 318 tp->rcv_wup = tp->rcv_nxt; 319 tcp_update_max_rcv_wnd_seq(tp); 320 321 /* Make sure we do not exceed the maximum possible 322 * scaled window. 323 */ 324 if (!tp->rx_opt.rcv_wscale && 325 READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) 326 new_win = min(new_win, MAX_TCP_WINDOW); 327 else 328 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 329 330 /* RFC1323 scaling applied */ 331 new_win >>= tp->rx_opt.rcv_wscale; 332 333 /* If we advertise zero window, disable fast path. */ 334 if (new_win == 0) { 335 tp->pred_flags = 0; 336 if (old_win) 337 NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV); 338 } else if (old_win == 0) { 339 NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV); 340 } 341 342 return new_win; 343 } 344 345 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 346 * be sent. 347 */ 348 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, 349 struct tcphdr *th, int tcp_header_len) 350 { 351 struct tcp_sock *tp = tcp_sk(sk); 352 353 if (!tcp_ecn_mode_any(tp)) 354 return; 355 356 if (tcp_ecn_mode_accecn(tp)) { 357 if (!tcp_accecn_ace_fail_recv(tp) && 358 !tcp_accecn_ace_fail_send(tp)) 359 INET_ECN_xmit(sk); 360 else 361 INET_ECN_dontxmit(sk); 362 tcp_accecn_set_ace(tp, skb, th); 363 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ACCECN; 364 } else { 365 /* Not-retransmitted data segment: set ECT and inject CWR. */ 366 if (skb->len != tcp_header_len && 367 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 368 INET_ECN_xmit(sk); 369 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 370 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 371 th->cwr = 1; 372 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 373 } 374 } else if (!tcp_ca_needs_ecn(sk)) { 375 /* ACK or retransmitted segment: clear ECT|CE */ 376 INET_ECN_dontxmit(sk); 377 } 378 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 379 th->ece = 1; 380 } 381 } 382 383 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 384 * auto increment end seqno. 385 */ 386 static void tcp_init_nondata_skb(struct sk_buff *skb, struct sock *sk, 387 u32 seq, u16 flags) 388 { 389 skb->ip_summed = CHECKSUM_PARTIAL; 390 391 TCP_SKB_CB(skb)->tcp_flags = flags; 392 393 tcp_skb_pcount_set(skb, 1); 394 psp_enqueue_set_decrypted(sk, skb); 395 396 TCP_SKB_CB(skb)->seq = seq; 397 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 398 seq++; 399 TCP_SKB_CB(skb)->end_seq = seq; 400 } 401 402 static inline bool tcp_urg_mode(const struct tcp_sock *tp) 403 { 404 return tp->snd_una != tp->snd_up; 405 } 406 407 #define OPTION_SACK_ADVERTISE BIT(0) 408 #define OPTION_TS BIT(1) 409 #define OPTION_MD5 BIT(2) 410 #define OPTION_WSCALE BIT(3) 411 #define OPTION_FAST_OPEN_COOKIE BIT(8) 412 #define OPTION_SMC BIT(9) 413 #define OPTION_MPTCP BIT(10) 414 #define OPTION_AO BIT(11) 415 #define OPTION_ACCECN BIT(12) 416 417 static void smc_options_write(__be32 *ptr, u16 *options) 418 { 419 #if IS_ENABLED(CONFIG_SMC) 420 if (static_branch_unlikely(&tcp_have_smc)) { 421 if (unlikely(OPTION_SMC & *options)) { 422 *ptr++ = htonl((TCPOPT_NOP << 24) | 423 (TCPOPT_NOP << 16) | 424 (TCPOPT_EXP << 8) | 425 (TCPOLEN_EXP_SMC_BASE)); 426 *ptr++ = htonl(TCPOPT_SMC_MAGIC); 427 } 428 } 429 #endif 430 } 431 432 struct tcp_out_options { 433 /* Following group is cleared in __tcp_transmit_skb() */ 434 struct_group(cleared, 435 u16 mss; /* 0 to disable */ 436 u8 bpf_opt_len; /* length of BPF hdr option */ 437 u8 num_sack_blocks; /* number of SACK blocks to include */ 438 ); 439 440 /* Caution: following fields are not cleared in __tcp_transmit_skb() */ 441 u16 options; /* bit field of OPTION_* */ 442 u8 ws; /* window scale, 0 to disable */ 443 u8 num_accecn_fields:7, /* number of AccECN fields needed */ 444 use_synack_ecn_bytes:1; /* Use synack_ecn_bytes or not */ 445 __u8 *hash_location; /* temporary pointer, overloaded */ 446 __u32 tsval, tsecr; /* need to include OPTION_TS */ 447 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 448 struct mptcp_out_options mptcp; 449 }; 450 451 static void mptcp_options_write(struct tcphdr *th, __be32 *ptr, 452 struct tcp_sock *tp, 453 struct tcp_out_options *opts) 454 { 455 #if IS_ENABLED(CONFIG_MPTCP) 456 if (unlikely(OPTION_MPTCP & opts->options)) 457 mptcp_write_options(th, ptr, tp, &opts->mptcp); 458 #endif 459 } 460 461 #ifdef CONFIG_CGROUP_BPF 462 static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb, 463 enum tcp_synack_type synack_type) 464 { 465 if (unlikely(!skb)) 466 return BPF_WRITE_HDR_TCP_CURRENT_MSS; 467 468 if (unlikely(synack_type == TCP_SYNACK_COOKIE)) 469 return BPF_WRITE_HDR_TCP_SYNACK_COOKIE; 470 471 return 0; 472 } 473 474 /* req, syn_skb and synack_type are used when writing synack */ 475 static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, 476 struct request_sock *req, 477 struct sk_buff *syn_skb, 478 enum tcp_synack_type synack_type, 479 struct tcp_out_options *opts, 480 unsigned int *remaining) 481 { 482 struct bpf_sock_ops_kern sock_ops; 483 int err; 484 485 if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), 486 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) || 487 !*remaining) 488 return; 489 490 /* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */ 491 492 /* init sock_ops */ 493 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 494 495 sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB; 496 497 if (req) { 498 /* The listen "sk" cannot be passed here because 499 * it is not locked. It would not make too much 500 * sense to do bpf_setsockopt(listen_sk) based 501 * on individual connection request also. 502 * 503 * Thus, "req" is passed here and the cgroup-bpf-progs 504 * of the listen "sk" will be run. 505 * 506 * "req" is also used here for fastopen even the "sk" here is 507 * a fullsock "child" sk. It is to keep the behavior 508 * consistent between fastopen and non-fastopen on 509 * the bpf programming side. 510 */ 511 sock_ops.sk = (struct sock *)req; 512 sock_ops.syn_skb = syn_skb; 513 } else { 514 sock_owned_by_me(sk); 515 516 sock_ops.is_fullsock = 1; 517 sock_ops.is_locked_tcp_sock = 1; 518 sock_ops.sk = sk; 519 } 520 521 sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type); 522 sock_ops.remaining_opt_len = *remaining; 523 /* tcp_current_mss() does not pass a skb */ 524 if (skb) 525 bpf_skops_init_skb(&sock_ops, skb, 0); 526 527 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); 528 529 if (err || sock_ops.remaining_opt_len == *remaining) 530 return; 531 532 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; 533 /* round up to 4 bytes */ 534 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; 535 536 *remaining -= opts->bpf_opt_len; 537 } 538 539 static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, 540 struct request_sock *req, 541 struct sk_buff *syn_skb, 542 enum tcp_synack_type synack_type, 543 struct tcp_out_options *opts) 544 { 545 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; 546 struct bpf_sock_ops_kern sock_ops; 547 int err; 548 549 if (likely(!max_opt_len)) 550 return; 551 552 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 553 554 sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB; 555 556 if (req) { 557 sock_ops.sk = (struct sock *)req; 558 sock_ops.syn_skb = syn_skb; 559 } else { 560 sock_owned_by_me(sk); 561 562 sock_ops.is_fullsock = 1; 563 sock_ops.is_locked_tcp_sock = 1; 564 sock_ops.sk = sk; 565 } 566 567 sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type); 568 sock_ops.remaining_opt_len = max_opt_len; 569 first_opt_off = tcp_hdrlen(skb) - max_opt_len; 570 bpf_skops_init_skb(&sock_ops, skb, first_opt_off); 571 572 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk); 573 574 if (err) 575 nr_written = 0; 576 else 577 nr_written = max_opt_len - sock_ops.remaining_opt_len; 578 579 if (nr_written < max_opt_len) 580 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, 581 max_opt_len - nr_written); 582 } 583 #else 584 static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, 585 struct request_sock *req, 586 struct sk_buff *syn_skb, 587 enum tcp_synack_type synack_type, 588 struct tcp_out_options *opts, 589 unsigned int *remaining) 590 { 591 } 592 593 static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, 594 struct request_sock *req, 595 struct sk_buff *syn_skb, 596 enum tcp_synack_type synack_type, 597 struct tcp_out_options *opts) 598 { 599 } 600 #endif 601 602 static __be32 *process_tcp_ao_options(struct tcp_sock *tp, 603 const struct tcp_request_sock *tcprsk, 604 struct tcp_out_options *opts, 605 struct tcp_key *key, __be32 *ptr) 606 { 607 #ifdef CONFIG_TCP_AO 608 u8 maclen = tcp_ao_maclen(key->ao_key); 609 610 if (tcprsk) { 611 u8 aolen = maclen + sizeof(struct tcp_ao_hdr); 612 613 *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) | 614 (tcprsk->ao_keyid << 8) | 615 (tcprsk->ao_rcv_next)); 616 } else { 617 struct tcp_ao_key *rnext_key; 618 struct tcp_ao_info *ao_info; 619 620 ao_info = rcu_dereference_check(tp->ao_info, 621 lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); 622 rnext_key = READ_ONCE(ao_info->rnext_key); 623 if (WARN_ON_ONCE(!rnext_key)) 624 return ptr; 625 *ptr++ = htonl((TCPOPT_AO << 24) | 626 (tcp_ao_len(key->ao_key) << 16) | 627 (key->ao_key->sndid << 8) | 628 (rnext_key->rcvid)); 629 } 630 opts->hash_location = (__u8 *)ptr; 631 ptr += maclen / sizeof(*ptr); 632 if (unlikely(maclen % sizeof(*ptr))) { 633 memset(ptr, TCPOPT_NOP, sizeof(*ptr)); 634 ptr++; 635 } 636 #endif 637 return ptr; 638 } 639 640 /* Initial values for AccECN option, ordered is based on ECN field bits 641 * similar to received_ecn_bytes. Used for SYN/ACK AccECN option. 642 */ 643 static const u32 synack_ecn_bytes[3] = { 0, 0, 0 }; 644 645 /* Write previously computed TCP options to the packet. 646 * 647 * Beware: Something in the Internet is very sensitive to the ordering of 648 * TCP options, we learned this through the hard way, so be careful here. 649 * Luckily we can at least blame others for their non-compliance but from 650 * inter-operability perspective it seems that we're somewhat stuck with 651 * the ordering which we have been using if we want to keep working with 652 * those broken things (not that it currently hurts anybody as there isn't 653 * particular reason why the ordering would need to be changed). 654 * 655 * At least SACK_PERM as the first option is known to lead to a disaster 656 * (but it may well be that other scenarios fail similarly). 657 */ 658 static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp, 659 const struct tcp_request_sock *tcprsk, 660 struct tcp_out_options *opts, 661 struct tcp_key *key) 662 { 663 u8 leftover_highbyte = TCPOPT_NOP; /* replace 1st NOP if avail */ 664 u8 leftover_lowbyte = TCPOPT_NOP; /* replace 2nd NOP in succession */ 665 __be32 *ptr = (__be32 *)(th + 1); 666 u16 options = opts->options; /* mungable copy */ 667 668 if (tcp_key_is_md5(key)) { 669 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 670 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 671 /* overload cookie hash location */ 672 opts->hash_location = (__u8 *)ptr; 673 ptr += 4; 674 } else if (tcp_key_is_ao(key)) { 675 ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr); 676 } 677 if (unlikely(opts->mss)) { 678 *ptr++ = htonl((TCPOPT_MSS << 24) | 679 (TCPOLEN_MSS << 16) | 680 opts->mss); 681 } 682 683 if (likely(OPTION_TS & options)) { 684 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 685 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 686 (TCPOLEN_SACK_PERM << 16) | 687 (TCPOPT_TIMESTAMP << 8) | 688 TCPOLEN_TIMESTAMP); 689 options &= ~OPTION_SACK_ADVERTISE; 690 } else { 691 *ptr++ = htonl((TCPOPT_NOP << 24) | 692 (TCPOPT_NOP << 16) | 693 (TCPOPT_TIMESTAMP << 8) | 694 TCPOLEN_TIMESTAMP); 695 } 696 *ptr++ = htonl(opts->tsval); 697 *ptr++ = htonl(opts->tsecr); 698 } 699 700 if (OPTION_ACCECN & options) { 701 const u32 *ecn_bytes = opts->use_synack_ecn_bytes ? 702 synack_ecn_bytes : 703 tp->received_ecn_bytes; 704 const u8 ect0_idx = INET_ECN_ECT_0 - 1; 705 const u8 ect1_idx = INET_ECN_ECT_1 - 1; 706 const u8 ce_idx = INET_ECN_CE - 1; 707 u32 e0b; 708 u32 e1b; 709 u32 ceb; 710 u8 len; 711 712 e0b = ecn_bytes[ect0_idx] + TCP_ACCECN_E0B_INIT_OFFSET; 713 e1b = ecn_bytes[ect1_idx] + TCP_ACCECN_E1B_INIT_OFFSET; 714 ceb = ecn_bytes[ce_idx] + TCP_ACCECN_CEB_INIT_OFFSET; 715 len = TCPOLEN_ACCECN_BASE + 716 opts->num_accecn_fields * TCPOLEN_ACCECN_PERFIELD; 717 718 if (opts->num_accecn_fields == 2) { 719 *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | 720 ((e1b >> 8) & 0xffff)); 721 *ptr++ = htonl(((e1b & 0xff) << 24) | 722 (ceb & 0xffffff)); 723 } else if (opts->num_accecn_fields == 1) { 724 *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | 725 ((e1b >> 8) & 0xffff)); 726 leftover_highbyte = e1b & 0xff; 727 leftover_lowbyte = TCPOPT_NOP; 728 } else if (opts->num_accecn_fields == 0) { 729 leftover_highbyte = TCPOPT_ACCECN1; 730 leftover_lowbyte = len; 731 } else if (opts->num_accecn_fields == 3) { 732 *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) | 733 ((e1b >> 8) & 0xffff)); 734 *ptr++ = htonl(((e1b & 0xff) << 24) | 735 (ceb & 0xffffff)); 736 *ptr++ = htonl(((e0b & 0xffffff) << 8) | 737 TCPOPT_NOP); 738 } 739 if (tp) { 740 tp->accecn_minlen = 0; 741 tp->accecn_opt_tstamp = tp->tcp_mstamp; 742 tp->accecn_opt_sent_w_dsack = tp->rx_opt.dsack; 743 if (tp->accecn_opt_demand) 744 tp->accecn_opt_demand--; 745 } 746 } else if (tp) { 747 tp->accecn_opt_sent_w_dsack = 0; 748 } 749 750 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 751 *ptr++ = htonl((leftover_highbyte << 24) | 752 (leftover_lowbyte << 16) | 753 (TCPOPT_SACK_PERM << 8) | 754 TCPOLEN_SACK_PERM); 755 leftover_highbyte = TCPOPT_NOP; 756 leftover_lowbyte = TCPOPT_NOP; 757 } 758 759 if (unlikely(OPTION_WSCALE & options)) { 760 u8 highbyte = TCPOPT_NOP; 761 762 /* Do not split the leftover 2-byte to fit into a single 763 * NOP, i.e., replace this NOP only when 1 byte is leftover 764 * within leftover_highbyte. 765 */ 766 if (unlikely(leftover_highbyte != TCPOPT_NOP && 767 leftover_lowbyte == TCPOPT_NOP)) { 768 highbyte = leftover_highbyte; 769 leftover_highbyte = TCPOPT_NOP; 770 } 771 *ptr++ = htonl((highbyte << 24) | 772 (TCPOPT_WINDOW << 16) | 773 (TCPOLEN_WINDOW << 8) | 774 opts->ws); 775 } 776 777 if (unlikely(opts->num_sack_blocks)) { 778 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 779 tp->duplicate_sack : tp->selective_acks; 780 int this_sack; 781 782 *ptr++ = htonl((leftover_highbyte << 24) | 783 (leftover_lowbyte << 16) | 784 (TCPOPT_SACK << 8) | 785 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 786 TCPOLEN_SACK_PERBLOCK))); 787 leftover_highbyte = TCPOPT_NOP; 788 leftover_lowbyte = TCPOPT_NOP; 789 790 for (this_sack = 0; this_sack < opts->num_sack_blocks; 791 ++this_sack) { 792 *ptr++ = htonl(sp[this_sack].start_seq); 793 *ptr++ = htonl(sp[this_sack].end_seq); 794 } 795 796 tp->rx_opt.dsack = 0; 797 } else if (unlikely(leftover_highbyte != TCPOPT_NOP || 798 leftover_lowbyte != TCPOPT_NOP)) { 799 *ptr++ = htonl((leftover_highbyte << 24) | 800 (leftover_lowbyte << 16) | 801 (TCPOPT_NOP << 8) | 802 TCPOPT_NOP); 803 leftover_highbyte = TCPOPT_NOP; 804 leftover_lowbyte = TCPOPT_NOP; 805 } 806 807 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 808 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 809 u8 *p = (u8 *)ptr; 810 u32 len; /* Fast Open option length */ 811 812 if (foc->exp) { 813 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 814 *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | 815 TCPOPT_FASTOPEN_MAGIC); 816 p += TCPOLEN_EXP_FASTOPEN_BASE; 817 } else { 818 len = TCPOLEN_FASTOPEN_BASE + foc->len; 819 *p++ = TCPOPT_FASTOPEN; 820 *p++ = len; 821 } 822 823 memcpy(p, foc->val, foc->len); 824 if ((len & 3) == 2) { 825 p[foc->len] = TCPOPT_NOP; 826 p[foc->len + 1] = TCPOPT_NOP; 827 } 828 ptr += (len + 3) >> 2; 829 } 830 831 smc_options_write(ptr, &options); 832 833 mptcp_options_write(th, ptr, tp, opts); 834 } 835 836 static void smc_set_option(struct tcp_sock *tp, 837 struct tcp_out_options *opts, 838 unsigned int *remaining) 839 { 840 #if IS_ENABLED(CONFIG_SMC) 841 if (static_branch_unlikely(&tcp_have_smc) && tp->syn_smc) { 842 tp->syn_smc = !!smc_call_hsbpf(1, tp, syn_option); 843 /* re-check syn_smc */ 844 if (tp->syn_smc && 845 *remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { 846 opts->options |= OPTION_SMC; 847 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; 848 } 849 } 850 #endif 851 } 852 853 static void smc_set_option_cond(const struct tcp_sock *tp, 854 struct inet_request_sock *ireq, 855 struct tcp_out_options *opts, 856 unsigned int *remaining) 857 { 858 #if IS_ENABLED(CONFIG_SMC) 859 if (static_branch_unlikely(&tcp_have_smc) && tp->syn_smc && ireq->smc_ok) { 860 ireq->smc_ok = !!smc_call_hsbpf(1, tp, synack_option, ireq); 861 /* re-check smc_ok */ 862 if (ireq->smc_ok && 863 *remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) { 864 opts->options |= OPTION_SMC; 865 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; 866 } 867 } 868 #endif 869 } 870 871 static void mptcp_set_option_cond(const struct request_sock *req, 872 struct tcp_out_options *opts, 873 unsigned int *remaining) 874 { 875 if (rsk_is_mptcp(req)) { 876 unsigned int size; 877 878 if (mptcp_synack_options(req, &size, &opts->mptcp)) { 879 if (*remaining >= size) { 880 opts->options |= OPTION_MPTCP; 881 *remaining -= size; 882 } 883 } 884 } 885 } 886 887 static u32 tcp_synack_options_combine_saving(struct tcp_out_options *opts) 888 { 889 /* How much there's room for combining with the alignment padding? */ 890 if ((opts->options & (OPTION_SACK_ADVERTISE | OPTION_TS)) == 891 OPTION_SACK_ADVERTISE) 892 return 2; 893 else if (opts->options & OPTION_WSCALE) 894 return 1; 895 return 0; 896 } 897 898 /* Calculates how long AccECN option will fit to @remaining option space. 899 * 900 * AccECN option can sometimes replace NOPs used for alignment of other 901 * TCP options (up to @max_combine_saving available). 902 * 903 * Only solutions with at least @required AccECN fields are accepted. 904 * 905 * Returns: The size of the AccECN option excluding space repurposed from 906 * the alignment of the other options. 907 */ 908 static int tcp_options_fit_accecn(struct tcp_out_options *opts, int required, 909 int remaining) 910 { 911 int size = TCP_ACCECN_MAXSIZE; 912 int sack_blocks_reduce = 0; 913 int max_combine_saving; 914 int rem = remaining; 915 int align_size; 916 917 if (opts->use_synack_ecn_bytes) 918 max_combine_saving = tcp_synack_options_combine_saving(opts); 919 else 920 max_combine_saving = opts->num_sack_blocks > 0 ? 2 : 0; 921 opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS; 922 while (opts->num_accecn_fields >= required) { 923 /* Pad to dword if cannot combine */ 924 if ((size & 0x3) > max_combine_saving) 925 align_size = ALIGN(size, 4); 926 else 927 align_size = ALIGN_DOWN(size, 4); 928 929 if (rem >= align_size) { 930 size = align_size; 931 break; 932 } else if (opts->num_accecn_fields == required && 933 opts->num_sack_blocks > 2 && 934 required > 0) { 935 /* Try to fit the option by removing one SACK block */ 936 opts->num_sack_blocks--; 937 sack_blocks_reduce++; 938 rem = rem + TCPOLEN_SACK_PERBLOCK; 939 940 opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS; 941 size = TCP_ACCECN_MAXSIZE; 942 continue; 943 } 944 945 opts->num_accecn_fields--; 946 size -= TCPOLEN_ACCECN_PERFIELD; 947 } 948 if (sack_blocks_reduce > 0) { 949 if (opts->num_accecn_fields >= required) 950 size -= sack_blocks_reduce * TCPOLEN_SACK_PERBLOCK; 951 else 952 opts->num_sack_blocks += sack_blocks_reduce; 953 } 954 if (opts->num_accecn_fields < required) 955 return 0; 956 957 opts->options |= OPTION_ACCECN; 958 return size; 959 } 960 961 /* Compute TCP options for SYN packets. This is not the final 962 * network wire format yet. 963 */ 964 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 965 struct tcp_out_options *opts, 966 struct tcp_key *key) 967 { 968 struct tcp_sock *tp = tcp_sk(sk); 969 unsigned int remaining = MAX_TCP_OPTION_SPACE; 970 struct tcp_fastopen_request *fastopen = tp->fastopen_req; 971 bool timestamps; 972 973 opts->options = 0; 974 975 /* Better than switch (key.type) as it has static branches */ 976 if (tcp_key_is_md5(key)) { 977 timestamps = false; 978 opts->options |= OPTION_MD5; 979 remaining -= TCPOLEN_MD5SIG_ALIGNED; 980 } else { 981 timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps); 982 if (tcp_key_is_ao(key)) { 983 opts->options |= OPTION_AO; 984 remaining -= tcp_ao_len_aligned(key->ao_key); 985 } 986 } 987 988 /* We always get an MSS option. The option bytes which will be seen in 989 * normal data packets should timestamps be used, must be in the MSS 990 * advertised. But we subtract them from tp->mss_cache so that 991 * calculations in tcp_sendmsg are simpler etc. So account for this 992 * fact here if necessary. If we don't do this correctly, as a 993 * receiver we won't recognize data packets as being full sized when we 994 * should, and thus we won't abide by the delayed ACK rules correctly. 995 * SACKs don't matter, we never delay an ACK when we have any of those 996 * going out. */ 997 opts->mss = tcp_advertise_mss(sk); 998 remaining -= TCPOLEN_MSS_ALIGNED; 999 1000 if (likely(timestamps)) { 1001 opts->options |= OPTION_TS; 1002 opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset; 1003 opts->tsecr = tp->rx_opt.ts_recent; 1004 remaining -= TCPOLEN_TSTAMP_ALIGNED; 1005 } 1006 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { 1007 opts->ws = tp->rx_opt.rcv_wscale; 1008 opts->options |= OPTION_WSCALE; 1009 remaining -= TCPOLEN_WSCALE_ALIGNED; 1010 } 1011 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { 1012 opts->options |= OPTION_SACK_ADVERTISE; 1013 if (unlikely(!(OPTION_TS & opts->options))) 1014 remaining -= TCPOLEN_SACKPERM_ALIGNED; 1015 } 1016 1017 if (fastopen && fastopen->cookie.len >= 0) { 1018 u32 need = fastopen->cookie.len; 1019 1020 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : 1021 TCPOLEN_FASTOPEN_BASE; 1022 need = (need + 3) & ~3U; /* Align to 32 bits */ 1023 if (remaining >= need) { 1024 opts->options |= OPTION_FAST_OPEN_COOKIE; 1025 opts->fastopen_cookie = &fastopen->cookie; 1026 remaining -= need; 1027 tp->syn_fastopen = 1; 1028 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; 1029 } 1030 } 1031 1032 smc_set_option(tp, opts, &remaining); 1033 1034 if (sk_is_mptcp(sk)) { 1035 unsigned int size; 1036 1037 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { 1038 if (remaining >= size) { 1039 opts->options |= OPTION_MPTCP; 1040 remaining -= size; 1041 } 1042 } 1043 } 1044 1045 /* Simultaneous open SYN/ACK needs AccECN option but not SYN. 1046 * It is attempted to negotiate the use of AccECN also on the first 1047 * retransmitted SYN, as mentioned in "3.1.4.1. Retransmitted SYNs" 1048 * of AccECN draft. 1049 */ 1050 if (unlikely((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) && 1051 tcp_ecn_mode_accecn(tp) && 1052 inet_csk(sk)->icsk_retransmits < 2 && 1053 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) && 1054 remaining >= TCPOLEN_ACCECN_BASE)) { 1055 opts->use_synack_ecn_bytes = 1; 1056 remaining -= tcp_options_fit_accecn(opts, 0, remaining); 1057 } 1058 1059 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining); 1060 1061 return MAX_TCP_OPTION_SPACE - remaining; 1062 } 1063 1064 /* Set up TCP options for SYN-ACKs. */ 1065 static unsigned int tcp_synack_options(const struct sock *sk, 1066 struct request_sock *req, 1067 unsigned int mss, struct sk_buff *skb, 1068 struct tcp_out_options *opts, 1069 const struct tcp_key *key, 1070 struct tcp_fastopen_cookie *foc, 1071 enum tcp_synack_type synack_type, 1072 struct sk_buff *syn_skb) 1073 { 1074 struct inet_request_sock *ireq = inet_rsk(req); 1075 unsigned int remaining = MAX_TCP_OPTION_SPACE; 1076 struct tcp_request_sock *treq = tcp_rsk(req); 1077 1078 if (tcp_key_is_md5(key)) { 1079 opts->options |= OPTION_MD5; 1080 remaining -= TCPOLEN_MD5SIG_ALIGNED; 1081 1082 /* We can't fit any SACK blocks in a packet with MD5 + TS 1083 * options. There was discussion about disabling SACK 1084 * rather than TS in order to fit in better with old, 1085 * buggy kernels, but that was deemed to be unnecessary. 1086 */ 1087 if (synack_type != TCP_SYNACK_COOKIE) 1088 ireq->tstamp_ok &= !ireq->sack_ok; 1089 } else if (tcp_key_is_ao(key)) { 1090 opts->options |= OPTION_AO; 1091 remaining -= tcp_ao_len_aligned(key->ao_key); 1092 ireq->tstamp_ok &= !ireq->sack_ok; 1093 } 1094 1095 /* We always send an MSS option. */ 1096 opts->mss = mss; 1097 remaining -= TCPOLEN_MSS_ALIGNED; 1098 1099 if (likely(ireq->wscale_ok)) { 1100 opts->ws = ireq->rcv_wscale; 1101 opts->options |= OPTION_WSCALE; 1102 remaining -= TCPOLEN_WSCALE_ALIGNED; 1103 } 1104 if (likely(ireq->tstamp_ok)) { 1105 opts->options |= OPTION_TS; 1106 opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) + 1107 tcp_rsk(req)->ts_off; 1108 if (!tcp_rsk(req)->snt_tsval_first) { 1109 if (!opts->tsval) 1110 opts->tsval = ~0U; 1111 tcp_rsk(req)->snt_tsval_first = opts->tsval; 1112 } 1113 WRITE_ONCE(tcp_rsk(req)->snt_tsval_last, opts->tsval); 1114 opts->tsecr = req->ts_recent; 1115 remaining -= TCPOLEN_TSTAMP_ALIGNED; 1116 } 1117 if (likely(ireq->sack_ok)) { 1118 opts->options |= OPTION_SACK_ADVERTISE; 1119 if (unlikely(!ireq->tstamp_ok)) 1120 remaining -= TCPOLEN_SACKPERM_ALIGNED; 1121 } 1122 if (foc != NULL && foc->len >= 0) { 1123 u32 need = foc->len; 1124 1125 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : 1126 TCPOLEN_FASTOPEN_BASE; 1127 need = (need + 3) & ~3U; /* Align to 32 bits */ 1128 if (remaining >= need) { 1129 opts->options |= OPTION_FAST_OPEN_COOKIE; 1130 opts->fastopen_cookie = foc; 1131 remaining -= need; 1132 } 1133 } 1134 1135 mptcp_set_option_cond(req, opts, &remaining); 1136 1137 smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); 1138 1139 if (treq->accecn_ok && 1140 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) && 1141 synack_type != TCP_SYNACK_RETRANS && remaining >= TCPOLEN_ACCECN_BASE) { 1142 opts->use_synack_ecn_bytes = 1; 1143 remaining -= tcp_options_fit_accecn(opts, 0, remaining); 1144 } 1145 1146 bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb, 1147 synack_type, opts, &remaining); 1148 1149 return MAX_TCP_OPTION_SPACE - remaining; 1150 } 1151 1152 /* Compute TCP options for ESTABLISHED sockets. This is not the 1153 * final wire format yet. 1154 */ 1155 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 1156 struct tcp_out_options *opts, 1157 struct tcp_key *key) 1158 { 1159 struct tcp_sock *tp = tcp_sk(sk); 1160 unsigned int size = 0; 1161 unsigned int eff_sacks; 1162 1163 opts->options = 0; 1164 1165 /* Better than switch (key.type) as it has static branches */ 1166 if (tcp_key_is_md5(key)) { 1167 opts->options |= OPTION_MD5; 1168 size += TCPOLEN_MD5SIG_ALIGNED; 1169 } else if (tcp_key_is_ao(key)) { 1170 opts->options |= OPTION_AO; 1171 size += tcp_ao_len_aligned(key->ao_key); 1172 } 1173 1174 if (likely(tp->rx_opt.tstamp_ok)) { 1175 opts->options |= OPTION_TS; 1176 opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + 1177 tp->tsoffset : 0; 1178 opts->tsecr = tp->rx_opt.ts_recent; 1179 size += TCPOLEN_TSTAMP_ALIGNED; 1180 } 1181 1182 /* MPTCP options have precedence over SACK for the limited TCP 1183 * option space because a MPTCP connection would be forced to 1184 * fall back to regular TCP if a required multipath option is 1185 * missing. SACK still gets a chance to use whatever space is 1186 * left. 1187 */ 1188 if (sk_is_mptcp(sk)) { 1189 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 1190 unsigned int opt_size = 0; 1191 1192 if (mptcp_established_options(sk, skb, &opt_size, remaining, 1193 &opts->mptcp)) { 1194 opts->options |= OPTION_MPTCP; 1195 size += opt_size; 1196 } 1197 } 1198 1199 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 1200 if (unlikely(eff_sacks)) { 1201 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 1202 if (likely(remaining >= TCPOLEN_SACK_BASE_ALIGNED + 1203 TCPOLEN_SACK_PERBLOCK)) { 1204 opts->num_sack_blocks = 1205 min_t(unsigned int, eff_sacks, 1206 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 1207 TCPOLEN_SACK_PERBLOCK); 1208 1209 size += TCPOLEN_SACK_BASE_ALIGNED + 1210 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 1211 } else { 1212 opts->num_sack_blocks = 0; 1213 } 1214 } else { 1215 opts->num_sack_blocks = 0; 1216 } 1217 1218 if (tcp_ecn_mode_accecn(tp)) { 1219 int ecn_opt = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option); 1220 1221 if (ecn_opt && tp->saw_accecn_opt && 1222 (ecn_opt >= TCP_ACCECN_OPTION_PERSIST || 1223 !tcp_accecn_opt_fail_send(tp)) && 1224 (ecn_opt >= TCP_ACCECN_OPTION_FULL || tp->accecn_opt_demand || 1225 tcp_accecn_option_beacon_check(sk))) { 1226 opts->use_synack_ecn_bytes = 0; 1227 size += tcp_options_fit_accecn(opts, tp->accecn_minlen, 1228 MAX_TCP_OPTION_SPACE - size); 1229 } 1230 } 1231 1232 if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp, 1233 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) { 1234 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 1235 1236 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining); 1237 1238 size = MAX_TCP_OPTION_SPACE - remaining; 1239 } 1240 1241 return size; 1242 } 1243 1244 1245 /* TCP SMALL QUEUES (TSQ) 1246 * 1247 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 1248 * to reduce RTT and bufferbloat. 1249 * We do this using a special skb destructor (tcp_wfree). 1250 * 1251 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 1252 * needs to be reallocated in a driver. 1253 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 1254 * 1255 * Since transmit from skb destructor is forbidden, we use a BH work item 1256 * to process all sockets that eventually need to send more skbs. 1257 * We use one work item per cpu, with its own queue of sockets. 1258 */ 1259 struct tsq_work { 1260 struct work_struct work; 1261 struct list_head head; /* queue of tcp sockets */ 1262 }; 1263 static DEFINE_PER_CPU(struct tsq_work, tsq_work); 1264 1265 static void tcp_tsq_write(struct sock *sk) 1266 { 1267 if ((1 << sk->sk_state) & 1268 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 1269 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) { 1270 struct tcp_sock *tp = tcp_sk(sk); 1271 1272 if (tp->lost_out > tp->retrans_out && 1273 tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) { 1274 tcp_mstamp_refresh(tp); 1275 tcp_xmit_retransmit_queue(sk); 1276 } 1277 1278 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 1279 0, GFP_ATOMIC); 1280 } 1281 } 1282 1283 static void tcp_tsq_handler(struct sock *sk) 1284 { 1285 bh_lock_sock(sk); 1286 if (!sock_owned_by_user(sk)) 1287 tcp_tsq_write(sk); 1288 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) 1289 sock_hold(sk); 1290 bh_unlock_sock(sk); 1291 } 1292 /* 1293 * One work item per cpu tries to send more skbs. 1294 * We run in BH context but need to disable irqs when 1295 * transferring tsq->head because tcp_wfree() might 1296 * interrupt us (non NAPI drivers) 1297 */ 1298 static void tcp_tsq_workfn(struct work_struct *work) 1299 { 1300 struct tsq_work *tsq = container_of(work, struct tsq_work, work); 1301 LIST_HEAD(list); 1302 unsigned long flags; 1303 struct list_head *q, *n; 1304 struct tcp_sock *tp; 1305 struct sock *sk; 1306 1307 local_irq_save(flags); 1308 list_splice_init(&tsq->head, &list); 1309 local_irq_restore(flags); 1310 1311 list_for_each_safe(q, n, &list) { 1312 tp = list_entry(q, struct tcp_sock, tsq_node); 1313 list_del(&tp->tsq_node); 1314 1315 sk = (struct sock *)tp; 1316 smp_mb__before_atomic(); 1317 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); 1318 1319 tcp_tsq_handler(sk); 1320 sk_free(sk); 1321 } 1322 } 1323 1324 /** 1325 * tcp_release_cb - tcp release_sock() callback 1326 * @sk: socket 1327 * 1328 * called from release_sock() to perform protocol dependent 1329 * actions before socket release. 1330 */ 1331 void tcp_release_cb(struct sock *sk) 1332 { 1333 unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); 1334 unsigned long nflags; 1335 1336 /* perform an atomic operation only if at least one flag is set */ 1337 do { 1338 if (!(flags & TCP_DEFERRED_ALL)) 1339 return; 1340 nflags = flags & ~TCP_DEFERRED_ALL; 1341 } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); 1342 1343 if (flags & TCPF_TSQ_DEFERRED) { 1344 tcp_tsq_write(sk); 1345 __sock_put(sk); 1346 } 1347 1348 if (flags & TCPF_WRITE_TIMER_DEFERRED) { 1349 tcp_write_timer_handler(sk); 1350 __sock_put(sk); 1351 } 1352 if (flags & TCPF_DELACK_TIMER_DEFERRED) { 1353 tcp_delack_timer_handler(sk); 1354 __sock_put(sk); 1355 } 1356 if (flags & TCPF_MTU_REDUCED_DEFERRED) { 1357 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); 1358 __sock_put(sk); 1359 } 1360 if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk)) 1361 tcp_send_ack(sk); 1362 } 1363 1364 void __init tcp_tsq_work_init(void) 1365 { 1366 int i; 1367 1368 for_each_possible_cpu(i) { 1369 struct tsq_work *tsq = &per_cpu(tsq_work, i); 1370 1371 INIT_LIST_HEAD(&tsq->head); 1372 INIT_WORK(&tsq->work, tcp_tsq_workfn); 1373 } 1374 } 1375 1376 /* 1377 * Write buffer destructor automatically called from kfree_skb. 1378 * We can't xmit new skbs from this context, as we might already 1379 * hold qdisc lock. 1380 */ 1381 void tcp_wfree(struct sk_buff *skb) 1382 { 1383 struct sock *sk = skb->sk; 1384 struct tcp_sock *tp = tcp_sk(sk); 1385 unsigned long flags, nval, oval; 1386 struct tsq_work *tsq; 1387 bool empty; 1388 1389 /* Keep one reference on sk_wmem_alloc. 1390 * Will be released by sk_free() from here or tcp_tsq_workfn() 1391 */ 1392 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); 1393 1394 /* If this softirq is serviced by ksoftirqd, we are likely under stress. 1395 * Wait until our queues (qdisc + devices) are drained. 1396 * This gives : 1397 * - less callbacks to tcp_write_xmit(), reducing stress (batches) 1398 * - chance for incoming ACK (processed by another cpu maybe) 1399 * to migrate this flow (skb->ooo_okay will be eventually set) 1400 */ 1401 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 1402 goto out; 1403 1404 oval = smp_load_acquire(&sk->sk_tsq_flags); 1405 do { 1406 if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) 1407 goto out; 1408 1409 nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED; 1410 } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); 1411 1412 /* queue this socket to BH workqueue */ 1413 local_irq_save(flags); 1414 tsq = this_cpu_ptr(&tsq_work); 1415 empty = list_empty(&tsq->head); 1416 list_add(&tp->tsq_node, &tsq->head); 1417 if (empty) 1418 queue_work(system_bh_wq, &tsq->work); 1419 local_irq_restore(flags); 1420 return; 1421 out: 1422 sk_free(sk); 1423 } 1424 1425 /* Note: Called under soft irq. 1426 * We can call TCP stack right away, unless socket is owned by user. 1427 */ 1428 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) 1429 { 1430 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); 1431 struct sock *sk = (struct sock *)tp; 1432 1433 tcp_tsq_handler(sk); 1434 sock_put(sk); 1435 1436 return HRTIMER_NORESTART; 1437 } 1438 1439 static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, 1440 u64 prior_wstamp) 1441 { 1442 struct tcp_sock *tp = tcp_sk(sk); 1443 1444 if (sk->sk_pacing_status != SK_PACING_NONE) { 1445 unsigned long rate = READ_ONCE(sk->sk_pacing_rate); 1446 1447 /* Original sch_fq does not pace first 10 MSS 1448 * Note that tp->data_segs_out overflows after 2^32 packets, 1449 * this is a minor annoyance. 1450 */ 1451 if (rate != ~0UL && rate && tp->data_segs_out >= 10) { 1452 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); 1453 u64 credit = tp->tcp_wstamp_ns - prior_wstamp; 1454 1455 /* take into account OS jitter */ 1456 len_ns -= min_t(u64, len_ns / 2, credit); 1457 tp->tcp_wstamp_ns += len_ns; 1458 } 1459 } 1460 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 1461 } 1462 1463 /* Snapshot the current delivery information in the skb, to generate 1464 * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered(). 1465 */ 1466 static void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb) 1467 { 1468 struct tcp_sock *tp = tcp_sk(sk); 1469 1470 /* In general we need to start delivery rate samples from the 1471 * time we received the most recent ACK, to ensure we include 1472 * the full time the network needs to deliver all in-flight 1473 * packets. If there are no packets in flight yet, then we 1474 * know that any ACKs after now indicate that the network was 1475 * able to deliver those packets completely in the sampling 1476 * interval between now and the next ACK. 1477 * 1478 * Note that we use packets_out instead of tcp_packets_in_flight(tp) 1479 * because the latter is a guess based on RTO and loss-marking 1480 * heuristics. We don't want spurious RTOs or loss markings to cause 1481 * a spuriously small time interval, causing a spuriously high 1482 * bandwidth estimate. 1483 */ 1484 if (!tp->packets_out) { 1485 u64 tstamp_us = tcp_skb_timestamp_us(skb); 1486 1487 tp->first_tx_mstamp = tstamp_us; 1488 tp->delivered_mstamp = tstamp_us; 1489 } 1490 1491 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; 1492 TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp; 1493 TCP_SKB_CB(skb)->tx.delivered = tp->delivered; 1494 TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce; 1495 TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0; 1496 } 1497 1498 INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); 1499 INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); 1500 1501 /* This routine computes an IPv4 TCP checksum. */ 1502 static void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 1503 { 1504 const struct inet_sock *inet = inet_sk(sk); 1505 1506 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 1507 } 1508 1509 #if IS_ENABLED(CONFIG_IPV6) 1510 #include <net/ip6_checksum.h> 1511 1512 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) 1513 { 1514 __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); 1515 } 1516 #endif 1517 1518 /* This routine actually transmits TCP packets queued in by 1519 * tcp_do_sendmsg(). This is used by both the initial 1520 * transmission and possible later retransmissions. 1521 * All SKB's seen here are completely headerless. It is our 1522 * job to build the TCP header, and pass the packet down to 1523 * IP so it can do the same plus pass the packet off to the 1524 * device. 1525 * 1526 * We are working here with either a clone of the original 1527 * SKB, or a fresh unique copy made by the retransmit engine. 1528 */ 1529 static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, 1530 int clone_it, gfp_t gfp_mask, u32 rcv_nxt) 1531 { 1532 const struct inet_connection_sock *icsk = inet_csk(sk); 1533 struct inet_sock *inet; 1534 struct tcp_sock *tp; 1535 struct tcp_skb_cb *tcb; 1536 struct tcp_out_options opts; 1537 unsigned int tcp_options_size, tcp_header_size; 1538 struct sk_buff *oskb = NULL; 1539 struct tcp_key key; 1540 struct tcphdr *th; 1541 u64 prior_wstamp; 1542 int err; 1543 1544 BUG_ON(!skb || !tcp_skb_pcount(skb)); 1545 tp = tcp_sk(sk); 1546 prior_wstamp = tp->tcp_wstamp_ns; 1547 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); 1548 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); 1549 if (clone_it) { 1550 oskb = skb; 1551 1552 tcp_skb_tsorted_save(oskb) { 1553 if (unlikely(skb_cloned(oskb))) 1554 skb = pskb_copy(oskb, gfp_mask); 1555 else 1556 skb = skb_clone(oskb, gfp_mask); 1557 } tcp_skb_tsorted_restore(oskb); 1558 1559 if (unlikely(!skb)) 1560 return -ENOBUFS; 1561 /* retransmit skbs might have a non zero value in skb->dev 1562 * because skb->dev is aliased with skb->rbnode.rb_left 1563 */ 1564 skb->dev = NULL; 1565 } 1566 1567 inet = inet_sk(sk); 1568 tcb = TCP_SKB_CB(skb); 1569 memset(&opts.cleared, 0, sizeof(opts.cleared)); 1570 1571 tcp_get_current_key(sk, &key); 1572 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { 1573 tcp_options_size = tcp_syn_options(sk, skb, &opts, &key); 1574 } else { 1575 tcp_options_size = tcp_established_options(sk, skb, &opts, &key); 1576 /* Force a PSH flag on all (GSO) packets to expedite GRO flush 1577 * at receiver : This slightly improve GRO performance. 1578 * Note that we do not force the PSH flag for non GSO packets, 1579 * because they might be sent under high congestion events, 1580 * and in this case it is better to delay the delivery of 1-MSS 1581 * packets and thus the corresponding ACK packet that would 1582 * release the following packet. 1583 */ 1584 if (tcp_skb_pcount(skb) > 1) 1585 tcb->tcp_flags |= TCPHDR_PSH; 1586 } 1587 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 1588 1589 /* We set skb->ooo_okay to one if this packet can select 1590 * a different TX queue than prior packets of this flow, 1591 * to avoid self inflicted reorders. 1592 * The 'other' queue decision is based on current cpu number 1593 * if XPS is enabled, or sk->sk_txhash otherwise. 1594 * We can switch to another (and better) queue if: 1595 * 1) No packet with payload is in qdisc/device queues. 1596 * Delays in TX completion can defeat the test 1597 * even if packets were already sent. 1598 * 2) Or rtx queue is empty. 1599 * This mitigates above case if ACK packets for 1600 * all prior packets were already processed. 1601 */ 1602 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || 1603 tcp_rtx_queue_empty(sk); 1604 1605 /* If we had to use memory reserve to allocate this skb, 1606 * this might cause drops if packet is looped back : 1607 * Other socket might not have SOCK_MEMALLOC. 1608 * Packets not looped back do not care about pfmemalloc. 1609 */ 1610 skb->pfmemalloc = 0; 1611 1612 __skb_push(skb, tcp_header_size); 1613 skb_reset_transport_header(skb); 1614 1615 skb_orphan(skb); 1616 skb->sk = sk; 1617 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 1618 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 1619 1620 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); 1621 1622 /* Build TCP header and checksum it. */ 1623 th = (struct tcphdr *)skb->data; 1624 th->source = inet->inet_sport; 1625 th->dest = inet->inet_dport; 1626 th->seq = htonl(tcb->seq); 1627 th->ack_seq = htonl(rcv_nxt); 1628 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 1629 (tcb->tcp_flags & TCPHDR_FLAGS_MASK)); 1630 1631 th->check = 0; 1632 th->urg_ptr = 0; 1633 1634 /* The urg_mode check is necessary during a below snd_una win probe */ 1635 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 1636 if (before(tp->snd_up, tcb->seq + 0x10000)) { 1637 th->urg_ptr = htons(tp->snd_up - tcb->seq); 1638 th->urg = 1; 1639 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 1640 th->urg_ptr = htons(0xFFFF); 1641 th->urg = 1; 1642 } 1643 } 1644 1645 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 1646 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { 1647 th->window = htons(tcp_select_window(sk)); 1648 tcp_ecn_send(sk, skb, th, tcp_header_size); 1649 } else { 1650 /* RFC1323: The window in SYN & SYN/ACK segments 1651 * is never scaled. 1652 */ 1653 th->window = htons(min(tp->rcv_wnd, 65535U)); 1654 } 1655 1656 tcp_options_write(th, tp, NULL, &opts, &key); 1657 1658 if (tcp_key_is_md5(&key)) { 1659 #ifdef CONFIG_TCP_MD5SIG 1660 /* Calculate the MD5 hash, as we have all we need now */ 1661 sk_gso_disable(sk); 1662 tp->af_specific->calc_md5_hash(opts.hash_location, 1663 key.md5_key, sk, skb); 1664 #endif 1665 } else if (tcp_key_is_ao(&key)) { 1666 int err; 1667 1668 err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th, 1669 opts.hash_location); 1670 if (err) { 1671 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_NOT_SPECIFIED); 1672 return -ENOMEM; 1673 } 1674 } 1675 1676 /* BPF prog is the last one writing header option */ 1677 bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts); 1678 1679 #if IS_ENABLED(CONFIG_IPV6) 1680 if (likely(icsk->icsk_af_ops->net_header_len == sizeof(struct ipv6hdr))) 1681 tcp_v6_send_check(sk, skb); 1682 else 1683 #endif 1684 tcp_v4_send_check(sk, skb); 1685 1686 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1687 tcp_event_ack_sent(sk, rcv_nxt); 1688 1689 if (skb->len != tcp_header_size) { 1690 tcp_event_data_sent(tp, sk); 1691 WRITE_ONCE(tp->data_segs_out, 1692 tp->data_segs_out + tcp_skb_pcount(skb)); 1693 WRITE_ONCE(tp->bytes_sent, 1694 tp->bytes_sent + skb->len - tcp_header_size); 1695 } 1696 1697 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 1698 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 1699 tcp_skb_pcount(skb)); 1700 1701 tp->segs_out += tcp_skb_pcount(skb); 1702 skb_set_hash_from_sk(skb, sk); 1703 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ 1704 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); 1705 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1706 1707 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ 1708 1709 /* Cleanup our debris for IP stacks */ 1710 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1711 sizeof(struct inet6_skb_parm))); 1712 1713 tcp_add_tx_delay(skb, tp); 1714 1715 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, 1716 inet6_csk_xmit, ip_queue_xmit, 1717 sk, skb, &inet->cork.fl); 1718 1719 if (unlikely(err > 0)) { 1720 tcp_enter_cwr(sk); 1721 err = net_xmit_eval(err); 1722 } 1723 if (!err && oskb) { 1724 tcp_update_skb_after_send(sk, oskb, prior_wstamp); 1725 tcp_rate_skb_sent(sk, oskb); 1726 } 1727 return err; 1728 } 1729 1730 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 1731 gfp_t gfp_mask) 1732 { 1733 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, 1734 tcp_sk(sk)->rcv_nxt); 1735 } 1736 1737 /* This routine just queues the buffer for sending. 1738 * 1739 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 1740 * otherwise socket can stall. 1741 */ 1742 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 1743 { 1744 struct tcp_sock *tp = tcp_sk(sk); 1745 1746 /* Advance write_seq and place onto the write_queue. */ 1747 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); 1748 __skb_header_release(skb); 1749 psp_enqueue_set_decrypted(sk, skb); 1750 tcp_add_write_queue_tail(sk, skb); 1751 sk_wmem_queued_add(sk, skb->truesize); 1752 sk_mem_charge(sk, skb->truesize); 1753 } 1754 1755 /* Initialize TSO segments for a packet. */ 1756 static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1757 { 1758 int tso_segs; 1759 1760 if (skb->len <= mss_now) { 1761 /* Avoid the costly divide in the normal 1762 * non-TSO case. 1763 */ 1764 TCP_SKB_CB(skb)->tcp_gso_size = 0; 1765 tcp_skb_pcount_set(skb, 1); 1766 return 1; 1767 } 1768 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; 1769 tso_segs = DIV_ROUND_UP(skb->len, mss_now); 1770 tcp_skb_pcount_set(skb, tso_segs); 1771 return tso_segs; 1772 } 1773 1774 /* Pcount in the middle of the write queue got changed, we need to do various 1775 * tweaks to fix counters 1776 */ 1777 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1778 { 1779 struct tcp_sock *tp = tcp_sk(sk); 1780 1781 tp->packets_out -= decr; 1782 1783 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1784 tp->sacked_out -= decr; 1785 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1786 tp->retrans_out -= decr; 1787 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1788 tp->lost_out -= decr; 1789 1790 /* Reno case is special. Sigh... */ 1791 if (tcp_is_reno(tp) && decr > 0) 1792 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1793 1794 tcp_verify_left_out(tp); 1795 } 1796 1797 static bool tcp_has_tx_tstamp(const struct sk_buff *skb) 1798 { 1799 return TCP_SKB_CB(skb)->txstamp_ack || 1800 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); 1801 } 1802 1803 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) 1804 { 1805 struct skb_shared_info *shinfo = skb_shinfo(skb); 1806 1807 if (unlikely(tcp_has_tx_tstamp(skb)) && 1808 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { 1809 struct skb_shared_info *shinfo2 = skb_shinfo(skb2); 1810 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; 1811 1812 shinfo->tx_flags &= ~tsflags; 1813 shinfo2->tx_flags |= tsflags; 1814 swap(shinfo->tskey, shinfo2->tskey); 1815 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; 1816 TCP_SKB_CB(skb)->txstamp_ack = 0; 1817 } 1818 } 1819 1820 static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) 1821 { 1822 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; 1823 TCP_SKB_CB(skb)->eor = 0; 1824 } 1825 1826 /* Insert buff after skb on the write or rtx queue of sk. */ 1827 static void tcp_insert_write_queue_after(struct sk_buff *skb, 1828 struct sk_buff *buff, 1829 struct sock *sk, 1830 enum tcp_queue tcp_queue) 1831 { 1832 if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE) 1833 __skb_queue_after(&sk->sk_write_queue, skb, buff); 1834 else 1835 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); 1836 } 1837 1838 /* Function to create two new TCP segments. Shrinks the given segment 1839 * to the specified size and appends a new segment with the rest of the 1840 * packet to the list. This won't be called frequently, I hope. 1841 * Remember, these are still headerless SKBs at this point. 1842 */ 1843 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 1844 struct sk_buff *skb, u32 len, 1845 unsigned int mss_now, gfp_t gfp) 1846 { 1847 struct tcp_sock *tp = tcp_sk(sk); 1848 struct sk_buff *buff; 1849 int old_factor; 1850 long limit; 1851 u16 flags; 1852 int nlen; 1853 1854 if (WARN_ON(len > skb->len)) 1855 return -EINVAL; 1856 1857 DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); 1858 1859 /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. 1860 * We need some allowance to not penalize applications setting small 1861 * SO_SNDBUF values. 1862 * Also allow first and last skb in retransmit queue to be split. 1863 */ 1864 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); 1865 if (unlikely((sk->sk_wmem_queued >> 1) > limit && 1866 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE && 1867 skb != tcp_rtx_queue_head(sk) && 1868 skb != tcp_rtx_queue_tail(sk))) { 1869 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG); 1870 return -ENOMEM; 1871 } 1872 1873 if (skb_unclone_keeptruesize(skb, gfp)) 1874 return -ENOMEM; 1875 1876 /* Get a new skb... force flag on. */ 1877 buff = tcp_stream_alloc_skb(sk, gfp, true); 1878 if (!buff) 1879 return -ENOMEM; /* We'll just try again later. */ 1880 skb_copy_decrypted(buff, skb); 1881 mptcp_skb_ext_copy(buff, skb); 1882 1883 sk_wmem_queued_add(sk, buff->truesize); 1884 sk_mem_charge(sk, buff->truesize); 1885 nlen = skb->len - len; 1886 buff->truesize += nlen; 1887 skb->truesize -= nlen; 1888 1889 /* Correct the sequence numbers. */ 1890 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1891 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1892 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1893 1894 /* PSH and FIN should only be set in the second packet. */ 1895 flags = TCP_SKB_CB(skb)->tcp_flags; 1896 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1897 TCP_SKB_CB(buff)->tcp_flags = flags; 1898 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1899 tcp_skb_fragment_eor(skb, buff); 1900 1901 skb_split(skb, buff, len); 1902 1903 skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC); 1904 tcp_fragment_tstamp(skb, buff); 1905 1906 old_factor = tcp_skb_pcount(skb); 1907 1908 /* Fix up tso_factor for both original and new SKB. */ 1909 tcp_set_skb_tso_segs(skb, mss_now); 1910 tcp_set_skb_tso_segs(buff, mss_now); 1911 1912 /* Update delivered info for the new segment */ 1913 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; 1914 1915 /* If this packet has been sent out already, we must 1916 * adjust the various packet counters. 1917 */ 1918 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1919 int diff = old_factor - tcp_skb_pcount(skb) - 1920 tcp_skb_pcount(buff); 1921 1922 if (diff) 1923 tcp_adjust_pcount(sk, skb, diff); 1924 } 1925 1926 /* Link BUFF into the send queue. */ 1927 __skb_header_release(buff); 1928 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue); 1929 if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE) 1930 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); 1931 1932 return 0; 1933 } 1934 1935 /* This is similar to __pskb_pull_tail(). The difference is that pulled 1936 * data is not copied, but immediately discarded. 1937 */ 1938 static int __pskb_trim_head(struct sk_buff *skb, int len) 1939 { 1940 struct skb_shared_info *shinfo; 1941 int i, k, eat; 1942 1943 DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); 1944 eat = len; 1945 k = 0; 1946 shinfo = skb_shinfo(skb); 1947 for (i = 0; i < shinfo->nr_frags; i++) { 1948 int size = skb_frag_size(&shinfo->frags[i]); 1949 1950 if (size <= eat) { 1951 skb_frag_unref(skb, i); 1952 eat -= size; 1953 } else { 1954 shinfo->frags[k] = shinfo->frags[i]; 1955 if (eat) { 1956 skb_frag_off_add(&shinfo->frags[k], eat); 1957 skb_frag_size_sub(&shinfo->frags[k], eat); 1958 eat = 0; 1959 } 1960 k++; 1961 } 1962 } 1963 shinfo->nr_frags = k; 1964 1965 skb->data_len -= len; 1966 skb->len = skb->data_len; 1967 return len; 1968 } 1969 1970 /* Remove acked data from a packet in the transmit queue. */ 1971 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1972 { 1973 u32 delta_truesize; 1974 1975 if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) 1976 return -ENOMEM; 1977 1978 delta_truesize = __pskb_trim_head(skb, len); 1979 1980 TCP_SKB_CB(skb)->seq += len; 1981 1982 skb->truesize -= delta_truesize; 1983 sk_wmem_queued_add(sk, -delta_truesize); 1984 if (!skb_zcopy_pure(skb)) 1985 sk_mem_uncharge(sk, delta_truesize); 1986 1987 /* Any change of skb->len requires recalculation of tso factor. */ 1988 if (tcp_skb_pcount(skb) > 1) 1989 tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); 1990 1991 return 0; 1992 } 1993 1994 /* Calculate MSS not accounting any TCP options. */ 1995 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 1996 { 1997 const struct tcp_sock *tp = tcp_sk(sk); 1998 const struct inet_connection_sock *icsk = inet_csk(sk); 1999 int mss_now; 2000 2001 /* Calculate base mss without TCP options: 2002 It is MMS_S - sizeof(tcphdr) of rfc1122 2003 */ 2004 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 2005 2006 /* Clamp it (mss_clamp does not include tcp options) */ 2007 if (mss_now > tp->rx_opt.mss_clamp) 2008 mss_now = tp->rx_opt.mss_clamp; 2009 2010 /* Now subtract optional transport overhead */ 2011 mss_now -= icsk->icsk_ext_hdr_len; 2012 2013 /* Then reserve room for full set of TCP options and 8 bytes of data */ 2014 mss_now = max(mss_now, 2015 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); 2016 return mss_now; 2017 } 2018 2019 /* Calculate MSS. Not accounting for SACKs here. */ 2020 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 2021 { 2022 /* Subtract TCP options size, not including SACKs */ 2023 return __tcp_mtu_to_mss(sk, pmtu) - 2024 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 2025 } 2026 2027 /* Inverse of above */ 2028 int tcp_mss_to_mtu(struct sock *sk, int mss) 2029 { 2030 const struct tcp_sock *tp = tcp_sk(sk); 2031 const struct inet_connection_sock *icsk = inet_csk(sk); 2032 2033 return mss + 2034 tp->tcp_header_len + 2035 icsk->icsk_ext_hdr_len + 2036 icsk->icsk_af_ops->net_header_len; 2037 } 2038 EXPORT_SYMBOL(tcp_mss_to_mtu); 2039 2040 /* MTU probing init per socket */ 2041 void tcp_mtup_init(struct sock *sk) 2042 { 2043 struct tcp_sock *tp = tcp_sk(sk); 2044 struct inet_connection_sock *icsk = inet_csk(sk); 2045 struct net *net = sock_net(sk); 2046 2047 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; 2048 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 2049 icsk->icsk_af_ops->net_header_len; 2050 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); 2051 icsk->icsk_mtup.probe_size = 0; 2052 if (icsk->icsk_mtup.enabled) 2053 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 2054 } 2055 2056 /* This function synchronize snd mss to current pmtu/exthdr set. 2057 2058 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 2059 for TCP options, but includes only bare TCP header. 2060 2061 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 2062 It is minimum of user_mss and mss received with SYN. 2063 It also does not include TCP options. 2064 2065 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 2066 2067 tp->mss_cache is current effective sending mss, including 2068 all tcp options except for SACKs. It is evaluated, 2069 taking into account current pmtu, but never exceeds 2070 tp->rx_opt.mss_clamp. 2071 2072 NOTE1. rfc1122 clearly states that advertised MSS 2073 DOES NOT include either tcp or ip options. 2074 2075 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 2076 are READ ONLY outside this function. --ANK (980731) 2077 */ 2078 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 2079 { 2080 struct tcp_sock *tp = tcp_sk(sk); 2081 struct inet_connection_sock *icsk = inet_csk(sk); 2082 int mss_now; 2083 2084 if (icsk->icsk_mtup.search_high > pmtu) 2085 icsk->icsk_mtup.search_high = pmtu; 2086 2087 mss_now = tcp_mtu_to_mss(sk, pmtu); 2088 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 2089 2090 /* And store cached results */ 2091 icsk->icsk_pmtu_cookie = pmtu; 2092 if (icsk->icsk_mtup.enabled) 2093 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 2094 tp->mss_cache = mss_now; 2095 2096 return mss_now; 2097 } 2098 2099 /* Compute the current effective MSS, taking SACKs and IP options, 2100 * and even PMTU discovery events into account. 2101 */ 2102 unsigned int tcp_current_mss(struct sock *sk) 2103 { 2104 const struct tcp_sock *tp = tcp_sk(sk); 2105 const struct dst_entry *dst = __sk_dst_get(sk); 2106 u32 mss_now; 2107 unsigned int header_len; 2108 struct tcp_out_options opts; 2109 struct tcp_key key; 2110 2111 mss_now = tp->mss_cache; 2112 2113 if (dst) { 2114 u32 mtu = dst_mtu(dst); 2115 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 2116 mss_now = tcp_sync_mss(sk, mtu); 2117 } 2118 tcp_get_current_key(sk, &key); 2119 header_len = tcp_established_options(sk, NULL, &opts, &key) + 2120 sizeof(struct tcphdr); 2121 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 2122 * some common options. If this is an odd packet (because we have SACK 2123 * blocks etc) then our calculated header_len will be different, and 2124 * we have to adjust mss_now correspondingly */ 2125 if (header_len != tp->tcp_header_len) { 2126 int delta = (int) header_len - tp->tcp_header_len; 2127 mss_now -= delta; 2128 } 2129 2130 return mss_now; 2131 } 2132 2133 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 2134 * As additional protections, we do not touch cwnd in retransmission phases, 2135 * and if application hit its sndbuf limit recently. 2136 */ 2137 static void tcp_cwnd_application_limited(struct sock *sk) 2138 { 2139 struct tcp_sock *tp = tcp_sk(sk); 2140 2141 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 2142 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 2143 /* Limited by application or receiver window. */ 2144 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 2145 u32 win_used = max(tp->snd_cwnd_used, init_win); 2146 if (win_used < tcp_snd_cwnd(tp)) { 2147 WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk)); 2148 tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); 2149 } 2150 tp->snd_cwnd_used = 0; 2151 } 2152 tp->snd_cwnd_stamp = tcp_jiffies32; 2153 } 2154 2155 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) 2156 { 2157 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 2158 struct tcp_sock *tp = tcp_sk(sk); 2159 2160 /* Track the strongest available signal of the degree to which the cwnd 2161 * is fully utilized. If cwnd-limited then remember that fact for the 2162 * current window. If not cwnd-limited then track the maximum number of 2163 * outstanding packets in the current window. (If cwnd-limited then we 2164 * chose to not update tp->max_packets_out to avoid an extra else 2165 * clause with no functional impact.) 2166 */ 2167 if (!before(tp->snd_una, tp->cwnd_usage_seq) || 2168 is_cwnd_limited || 2169 (!tp->is_cwnd_limited && 2170 tp->packets_out > tp->max_packets_out)) { 2171 tp->is_cwnd_limited = is_cwnd_limited; 2172 tp->max_packets_out = tp->packets_out; 2173 tp->cwnd_usage_seq = tp->snd_nxt; 2174 } 2175 2176 if (tcp_is_cwnd_limited(sk)) { 2177 /* Network is feed fully. */ 2178 tp->snd_cwnd_used = 0; 2179 tp->snd_cwnd_stamp = tcp_jiffies32; 2180 } else { 2181 /* Network starves. */ 2182 if (tp->packets_out > tp->snd_cwnd_used) 2183 tp->snd_cwnd_used = tp->packets_out; 2184 2185 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && 2186 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && 2187 !ca_ops->cong_control) 2188 tcp_cwnd_application_limited(sk); 2189 2190 /* The following conditions together indicate the starvation 2191 * is caused by insufficient sender buffer: 2192 * 1) just sent some data (see tcp_write_xmit) 2193 * 2) not cwnd limited (this else condition) 2194 * 3) no more data to send (tcp_write_queue_empty()) 2195 * 4) application is hitting buffer limit (SOCK_NOSPACE) 2196 */ 2197 if (tcp_write_queue_empty(sk) && sk->sk_socket && 2198 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && 2199 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) 2200 tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); 2201 } 2202 } 2203 2204 /* Minshall's variant of the Nagle send check. */ 2205 static bool tcp_minshall_check(const struct tcp_sock *tp) 2206 { 2207 return after(tp->snd_sml, tp->snd_una) && 2208 !after(tp->snd_sml, tp->snd_nxt); 2209 } 2210 2211 /* Update snd_sml if this skb is under mss 2212 * Note that a TSO packet might end with a sub-mss segment 2213 * The test is really : 2214 * if ((skb->len % mss) != 0) 2215 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 2216 * But we can avoid doing the divide again given we already have 2217 * skb_pcount = skb->len / mss_now 2218 */ 2219 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, 2220 const struct sk_buff *skb) 2221 { 2222 if (skb->len < tcp_skb_pcount(skb) * mss_now) 2223 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 2224 } 2225 2226 /* Return false, if packet can be sent now without violation Nagle's rules: 2227 * 1. It is full sized. (provided by caller in %partial bool) 2228 * 2. Or it contains FIN. (already checked by caller) 2229 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 2230 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 2231 * With Minshall's modification: all sent small packets are ACKed. 2232 */ 2233 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, 2234 int nonagle) 2235 { 2236 return partial && 2237 ((nonagle & TCP_NAGLE_CORK) || 2238 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 2239 } 2240 2241 /* Return how many segs we'd like on a TSO packet, 2242 * depending on current pacing rate, and how close the peer is. 2243 * 2244 * Rationale is: 2245 * - For close peers, we rather send bigger packets to reduce 2246 * cpu costs, because occasional losses will be repaired fast. 2247 * - For long distance/rtt flows, we would like to get ACK clocking 2248 * with 1 ACK per ms. 2249 * 2250 * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting 2251 * in bigger TSO bursts. We we cut the RTT-based allowance in half 2252 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance 2253 * is below 1500 bytes after 6 * ~500 usec = 3ms. 2254 */ 2255 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, 2256 int min_tso_segs) 2257 { 2258 unsigned long bytes; 2259 u32 r; 2260 2261 bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift); 2262 2263 r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); 2264 if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) 2265 bytes += sk->sk_gso_max_size >> r; 2266 2267 bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); 2268 2269 return max_t(u32, bytes / mss_now, min_tso_segs); 2270 } 2271 2272 /* Return the number of segments we want in the skb we are transmitting. 2273 * See if congestion control module wants to decide; otherwise, autosize. 2274 */ 2275 static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) 2276 { 2277 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 2278 u32 min_tso, tso_segs; 2279 2280 min_tso = ca_ops->min_tso_segs ? 2281 ca_ops->min_tso_segs(sk) : 2282 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); 2283 2284 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso); 2285 return min_t(u32, tso_segs, sk->sk_gso_max_segs); 2286 } 2287 2288 /* Returns the portion of skb which can be sent right away */ 2289 static unsigned int tcp_mss_split_point(const struct sock *sk, 2290 const struct sk_buff *skb, 2291 unsigned int mss_now, 2292 unsigned int max_segs, 2293 int nonagle) 2294 { 2295 const struct tcp_sock *tp = tcp_sk(sk); 2296 u32 partial, needed, window, max_len; 2297 2298 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2299 max_len = mss_now * max_segs; 2300 2301 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 2302 return max_len; 2303 2304 needed = min(skb->len, window); 2305 2306 if (max_len <= needed) 2307 return max_len; 2308 2309 partial = needed % mss_now; 2310 /* If last segment is not a full MSS, check if Nagle rules allow us 2311 * to include this last segment in this skb. 2312 * Otherwise, we'll split the skb at last MSS boundary 2313 */ 2314 if (tcp_nagle_check(partial != 0, tp, nonagle)) 2315 return needed - partial; 2316 2317 return needed; 2318 } 2319 2320 /* Can at least one segment of SKB be sent right now, according to the 2321 * congestion window rules? If so, return how many segments are allowed. 2322 */ 2323 static u32 tcp_cwnd_test(const struct tcp_sock *tp) 2324 { 2325 u32 in_flight, cwnd, halfcwnd; 2326 2327 in_flight = tcp_packets_in_flight(tp); 2328 cwnd = tcp_snd_cwnd(tp); 2329 if (in_flight >= cwnd) 2330 return 0; 2331 2332 /* For better scheduling, ensure we have at least 2333 * 2 GSO packets in flight. 2334 */ 2335 halfcwnd = max(cwnd >> 1, 1U); 2336 return min(halfcwnd, cwnd - in_flight); 2337 } 2338 2339 /* Initialize TSO state of a skb. 2340 * This must be invoked the first time we consider transmitting 2341 * SKB onto the wire. 2342 */ 2343 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) 2344 { 2345 int tso_segs = tcp_skb_pcount(skb); 2346 2347 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) 2348 return tcp_set_skb_tso_segs(skb, mss_now); 2349 2350 return tso_segs; 2351 } 2352 2353 2354 /* Return true if the Nagle test allows this packet to be 2355 * sent now. 2356 */ 2357 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 2358 unsigned int cur_mss, int nonagle) 2359 { 2360 /* Nagle rule does not apply to frames, which sit in the middle of the 2361 * write_queue (they have no chances to get new data). 2362 * 2363 * This is implemented in the callers, where they modify the 'nonagle' 2364 * argument based upon the location of SKB in the send queue. 2365 */ 2366 if (nonagle & TCP_NAGLE_PUSH) 2367 return true; 2368 2369 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 2370 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 2371 return true; 2372 2373 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) 2374 return true; 2375 2376 return false; 2377 } 2378 2379 /* Does at least the first segment of SKB fit into the send window? */ 2380 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 2381 const struct sk_buff *skb, 2382 unsigned int cur_mss) 2383 { 2384 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 2385 2386 if (skb->len > cur_mss) 2387 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 2388 2389 return !after(end_seq, tcp_wnd_end(tp)); 2390 } 2391 2392 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 2393 * which is put after SKB on the list. It is very much like 2394 * tcp_fragment() except that it may make several kinds of assumptions 2395 * in order to speed up the splitting operation. In particular, we 2396 * know that all the data is in scatter-gather pages, and that the 2397 * packet has never been sent out before (and thus is not cloned). 2398 */ 2399 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 2400 unsigned int mss_now, gfp_t gfp) 2401 { 2402 int nlen = skb->len - len; 2403 struct sk_buff *buff; 2404 u16 flags; 2405 2406 /* All of a TSO frame must be composed of paged data. */ 2407 DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); 2408 2409 buff = tcp_stream_alloc_skb(sk, gfp, true); 2410 if (unlikely(!buff)) 2411 return -ENOMEM; 2412 skb_copy_decrypted(buff, skb); 2413 mptcp_skb_ext_copy(buff, skb); 2414 2415 sk_wmem_queued_add(sk, buff->truesize); 2416 sk_mem_charge(sk, buff->truesize); 2417 buff->truesize += nlen; 2418 skb->truesize -= nlen; 2419 2420 /* Correct the sequence numbers. */ 2421 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 2422 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 2423 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 2424 2425 /* PSH and FIN should only be set in the second packet. */ 2426 flags = TCP_SKB_CB(skb)->tcp_flags; 2427 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 2428 TCP_SKB_CB(buff)->tcp_flags = flags; 2429 2430 tcp_skb_fragment_eor(skb, buff); 2431 2432 skb_split(skb, buff, len); 2433 tcp_fragment_tstamp(skb, buff); 2434 2435 /* Fix up tso_factor for both original and new SKB. */ 2436 tcp_set_skb_tso_segs(skb, mss_now); 2437 tcp_set_skb_tso_segs(buff, mss_now); 2438 2439 /* Link BUFF into the send queue. */ 2440 __skb_header_release(buff); 2441 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE); 2442 2443 return 0; 2444 } 2445 2446 /* Try to defer sending, if possible, in order to minimize the amount 2447 * of TSO splitting we do. View it as a kind of TSO Nagle test. 2448 * 2449 * This algorithm is from John Heffner. 2450 */ 2451 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 2452 bool *is_cwnd_limited, 2453 bool *is_rwnd_limited, 2454 u32 max_segs) 2455 { 2456 const struct inet_connection_sock *icsk = inet_csk(sk); 2457 u32 send_win, cong_win, limit, in_flight, threshold; 2458 u64 srtt_in_ns, expected_ack, how_far_is_the_ack; 2459 struct tcp_sock *tp = tcp_sk(sk); 2460 struct sk_buff *head; 2461 int win_divisor; 2462 s64 delta; 2463 2464 if (icsk->icsk_ca_state >= TCP_CA_Recovery) 2465 goto send_now; 2466 2467 /* Avoid bursty behavior by allowing defer 2468 * only if the last write was recent (1 ms). 2469 * Note that tp->tcp_wstamp_ns can be in the future if we have 2470 * packets waiting in a qdisc or device for EDT delivery. 2471 */ 2472 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; 2473 if (delta > 0) 2474 goto send_now; 2475 2476 in_flight = tcp_packets_in_flight(tp); 2477 2478 BUG_ON(tcp_skb_pcount(skb) <= 1); 2479 BUG_ON(tcp_snd_cwnd(tp) <= in_flight); 2480 2481 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2482 2483 /* From in_flight test above, we know that cwnd > in_flight. */ 2484 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; 2485 2486 limit = min(send_win, cong_win); 2487 2488 /* If a full-sized TSO skb can be sent, do it. */ 2489 if (limit >= max_segs * tp->mss_cache) 2490 goto send_now; 2491 2492 /* Middle in queue won't get any more data, full sendable already? */ 2493 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 2494 goto send_now; 2495 2496 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); 2497 if (win_divisor) { 2498 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); 2499 2500 /* If at least some fraction of a window is available, 2501 * just use it. 2502 */ 2503 chunk /= win_divisor; 2504 if (limit >= chunk) 2505 goto send_now; 2506 } else { 2507 /* Different approach, try not to defer past a single 2508 * ACK. Receiver should ACK every other full sized 2509 * frame, so if we have space for more than 3 frames 2510 * then send now. 2511 */ 2512 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 2513 goto send_now; 2514 } 2515 2516 /* TODO : use tsorted_sent_queue ? */ 2517 head = tcp_rtx_queue_head(sk); 2518 if (!head) 2519 goto send_now; 2520 2521 srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us; 2522 /* When is the ACK expected ? */ 2523 expected_ack = head->tstamp + srtt_in_ns; 2524 /* How far from now is the ACK expected ? */ 2525 how_far_is_the_ack = expected_ack - tp->tcp_clock_cache; 2526 2527 /* If next ACK is likely to come too late, 2528 * ie in more than min(1ms, half srtt), do not defer. 2529 */ 2530 threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC); 2531 2532 if ((s64)(how_far_is_the_ack - threshold) > 0) 2533 goto send_now; 2534 2535 /* Ok, it looks like it is advisable to defer. 2536 * Three cases are tracked : 2537 * 1) We are cwnd-limited 2538 * 2) We are rwnd-limited 2539 * 3) We are application limited. 2540 */ 2541 if (cong_win < send_win) { 2542 if (cong_win <= skb->len) { 2543 *is_cwnd_limited = true; 2544 return true; 2545 } 2546 } else { 2547 if (send_win <= skb->len) { 2548 *is_rwnd_limited = true; 2549 return true; 2550 } 2551 } 2552 2553 /* If this packet won't get more data, do not wait. */ 2554 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || 2555 TCP_SKB_CB(skb)->eor) 2556 goto send_now; 2557 2558 return true; 2559 2560 send_now: 2561 return false; 2562 } 2563 2564 static inline void tcp_mtu_check_reprobe(struct sock *sk) 2565 { 2566 struct inet_connection_sock *icsk = inet_csk(sk); 2567 struct tcp_sock *tp = tcp_sk(sk); 2568 struct net *net = sock_net(sk); 2569 u32 interval; 2570 s32 delta; 2571 2572 interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); 2573 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; 2574 if (unlikely(delta >= interval * HZ)) { 2575 int mss = tcp_current_mss(sk); 2576 2577 /* Update current search range */ 2578 icsk->icsk_mtup.probe_size = 0; 2579 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + 2580 sizeof(struct tcphdr) + 2581 icsk->icsk_af_ops->net_header_len; 2582 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 2583 2584 /* Update probe time stamp */ 2585 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 2586 } 2587 } 2588 2589 static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) 2590 { 2591 struct sk_buff *skb, *next; 2592 2593 skb = tcp_send_head(sk); 2594 tcp_for_write_queue_from_safe(skb, next, sk) { 2595 if (len <= skb->len) 2596 break; 2597 2598 if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next)) 2599 return false; 2600 2601 len -= skb->len; 2602 } 2603 2604 return true; 2605 } 2606 2607 static int tcp_clone_payload(struct sock *sk, struct sk_buff *to, 2608 int probe_size) 2609 { 2610 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; 2611 int i, todo, len = 0, nr_frags = 0; 2612 const struct sk_buff *skb; 2613 2614 if (!sk_wmem_schedule(sk, to->truesize + probe_size)) 2615 return -ENOMEM; 2616 2617 skb_queue_walk(&sk->sk_write_queue, skb) { 2618 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; 2619 2620 if (skb_headlen(skb)) 2621 return -EINVAL; 2622 2623 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { 2624 if (len >= probe_size) 2625 goto commit; 2626 todo = min_t(int, skb_frag_size(fragfrom), 2627 probe_size - len); 2628 len += todo; 2629 if (lastfrag && 2630 skb_frag_page(fragfrom) == skb_frag_page(lastfrag) && 2631 skb_frag_off(fragfrom) == skb_frag_off(lastfrag) + 2632 skb_frag_size(lastfrag)) { 2633 skb_frag_size_add(lastfrag, todo); 2634 continue; 2635 } 2636 if (unlikely(nr_frags == MAX_SKB_FRAGS)) 2637 return -E2BIG; 2638 skb_frag_page_copy(fragto, fragfrom); 2639 skb_frag_off_copy(fragto, fragfrom); 2640 skb_frag_size_set(fragto, todo); 2641 nr_frags++; 2642 lastfrag = fragto++; 2643 } 2644 } 2645 commit: 2646 WARN_ON_ONCE(len != probe_size); 2647 for (i = 0; i < nr_frags; i++) 2648 skb_frag_ref(to, i); 2649 2650 skb_shinfo(to)->nr_frags = nr_frags; 2651 to->truesize += probe_size; 2652 to->len += probe_size; 2653 to->data_len += probe_size; 2654 __skb_header_release(to); 2655 return 0; 2656 } 2657 2658 /* tcp_mtu_probe() and tcp_grow_skb() can both eat an skb (src) if 2659 * all its payload was moved to another one (dst). 2660 * Make sure to transfer tcp_flags, eor, and tstamp. 2661 */ 2662 static void tcp_eat_one_skb(struct sock *sk, 2663 struct sk_buff *dst, 2664 struct sk_buff *src) 2665 { 2666 TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags; 2667 TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor; 2668 tcp_skb_collapse_tstamp(dst, src); 2669 tcp_unlink_write_queue(src, sk); 2670 tcp_wmem_free_skb(sk, src); 2671 } 2672 2673 /* Create a new MTU probe if we are ready. 2674 * MTU probe is regularly attempting to increase the path MTU by 2675 * deliberately sending larger packets. This discovers routing 2676 * changes resulting in larger path MTUs. 2677 * 2678 * Returns 0 if we should wait to probe (no cwnd available), 2679 * 1 if a probe was sent, 2680 * -1 otherwise 2681 */ 2682 static int tcp_mtu_probe(struct sock *sk) 2683 { 2684 struct inet_connection_sock *icsk = inet_csk(sk); 2685 struct tcp_sock *tp = tcp_sk(sk); 2686 struct sk_buff *skb, *nskb, *next; 2687 struct net *net = sock_net(sk); 2688 int probe_size; 2689 int size_needed; 2690 int copy, len; 2691 int mss_now; 2692 int interval; 2693 2694 /* Not currently probing/verifying, 2695 * not in recovery, 2696 * have enough cwnd, and 2697 * not SACKing (the variable headers throw things off) 2698 */ 2699 if (likely(!icsk->icsk_mtup.enabled || 2700 icsk->icsk_mtup.probe_size || 2701 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 2702 tcp_snd_cwnd(tp) < 11 || 2703 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) 2704 return -1; 2705 2706 /* Use binary search for probe_size between tcp_mss_base, 2707 * and current mss_clamp. if (search_high - search_low) 2708 * smaller than a threshold, backoff from probing. 2709 */ 2710 mss_now = tcp_current_mss(sk); 2711 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + 2712 icsk->icsk_mtup.search_low) >> 1); 2713 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 2714 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; 2715 /* When misfortune happens, we are reprobing actively, 2716 * and then reprobe timer has expired. We stick with current 2717 * probing process by not resetting search range to its orignal. 2718 */ 2719 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || 2720 interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { 2721 /* Check whether enough time has elaplased for 2722 * another round of probing. 2723 */ 2724 tcp_mtu_check_reprobe(sk); 2725 return -1; 2726 } 2727 2728 /* Have enough data in the send queue to probe? */ 2729 if (tp->write_seq - tp->snd_nxt < size_needed) 2730 return -1; 2731 2732 if (tp->snd_wnd < size_needed) 2733 return -1; 2734 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 2735 return 0; 2736 2737 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 2738 if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) { 2739 if (!tcp_packets_in_flight(tp)) 2740 return -1; 2741 else 2742 return 0; 2743 } 2744 2745 if (!tcp_can_coalesce_send_queue_head(sk, probe_size)) 2746 return -1; 2747 2748 /* We're allowed to probe. Build it now. */ 2749 nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false); 2750 if (!nskb) 2751 return -1; 2752 2753 /* build the payload, and be prepared to abort if this fails. */ 2754 if (tcp_clone_payload(sk, nskb, probe_size)) { 2755 tcp_skb_tsorted_anchor_cleanup(nskb); 2756 consume_skb(nskb); 2757 return -1; 2758 } 2759 sk_wmem_queued_add(sk, nskb->truesize); 2760 sk_mem_charge(sk, nskb->truesize); 2761 2762 skb = tcp_send_head(sk); 2763 skb_copy_decrypted(nskb, skb); 2764 mptcp_skb_ext_copy(nskb, skb); 2765 2766 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2767 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2768 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 2769 2770 tcp_insert_write_queue_before(nskb, skb, sk); 2771 tcp_highest_sack_replace(sk, skb, nskb); 2772 2773 len = 0; 2774 tcp_for_write_queue_from_safe(skb, next, sk) { 2775 copy = min_t(int, skb->len, probe_size - len); 2776 2777 if (skb->len <= copy) { 2778 tcp_eat_one_skb(sk, nskb, skb); 2779 } else { 2780 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 2781 ~(TCPHDR_FIN|TCPHDR_PSH); 2782 __pskb_trim_head(skb, copy); 2783 tcp_set_skb_tso_segs(skb, mss_now); 2784 TCP_SKB_CB(skb)->seq += copy; 2785 } 2786 2787 len += copy; 2788 2789 if (len >= probe_size) 2790 break; 2791 } 2792 tcp_init_tso_segs(nskb, nskb->len); 2793 2794 /* We're ready to send. If this fails, the probe will 2795 * be resegmented into mss-sized pieces by tcp_write_xmit(). 2796 */ 2797 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 2798 /* Decrement cwnd here because we are sending 2799 * effectively two packets. */ 2800 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); 2801 tcp_event_new_data_sent(sk, nskb); 2802 2803 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 2804 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 2805 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 2806 2807 return 1; 2808 } 2809 2810 return -1; 2811 } 2812 2813 static bool tcp_pacing_check(struct sock *sk) 2814 { 2815 struct tcp_sock *tp = tcp_sk(sk); 2816 2817 if (!tcp_needs_internal_pacing(sk)) 2818 return false; 2819 2820 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) 2821 return false; 2822 2823 if (!hrtimer_is_queued(&tp->pacing_timer)) { 2824 hrtimer_start(&tp->pacing_timer, 2825 ns_to_ktime(tp->tcp_wstamp_ns), 2826 HRTIMER_MODE_ABS_PINNED_SOFT); 2827 sock_hold(sk); 2828 } 2829 return true; 2830 } 2831 2832 static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk) 2833 { 2834 const struct rb_node *node = sk->tcp_rtx_queue.rb_node; 2835 2836 /* No skb in the rtx queue. */ 2837 if (!node) 2838 return true; 2839 2840 /* Only one skb in rtx queue. */ 2841 return !node->rb_left && !node->rb_right; 2842 } 2843 2844 /* TCP Small Queues : 2845 * Control number of packets in qdisc/devices to two packets / or ~1 ms. 2846 * (These limits are doubled for retransmits) 2847 * This allows for : 2848 * - better RTT estimation and ACK scheduling 2849 * - faster recovery 2850 * - high rates 2851 * Alas, some drivers / subsystems require a fair amount 2852 * of queued bytes to ensure line rate. 2853 * One example is wifi aggregation (802.11 AMPDU) 2854 */ 2855 static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, 2856 unsigned int factor) 2857 { 2858 unsigned long limit; 2859 2860 limit = max_t(unsigned long, 2861 2 * skb->truesize, 2862 READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift)); 2863 limit = min_t(unsigned long, limit, 2864 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); 2865 limit <<= factor; 2866 2867 if (static_branch_unlikely(&tcp_tx_delay_enabled) && 2868 tcp_sk(sk)->tcp_tx_delay) { 2869 u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) * 2870 tcp_sk(sk)->tcp_tx_delay; 2871 2872 /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we 2873 * approximate our needs assuming an ~100% skb->truesize overhead. 2874 * USEC_PER_SEC is approximated by 2^20. 2875 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift. 2876 */ 2877 extra_bytes >>= (20 - 1); 2878 limit += extra_bytes; 2879 } 2880 if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2881 /* Always send skb if rtx queue is empty or has one skb. 2882 * No need to wait for TX completion to call us back, 2883 * after softirq schedule. 2884 * This helps when TX completions are delayed too much. 2885 */ 2886 if (tcp_rtx_queue_empty_or_single_skb(sk)) 2887 return false; 2888 2889 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 2890 /* It is possible TX completion already happened 2891 * before we set TSQ_THROTTLED, so we must 2892 * test again the condition. 2893 */ 2894 smp_mb__after_atomic(); 2895 if (refcount_read(&sk->sk_wmem_alloc) > limit) 2896 return true; 2897 } 2898 return false; 2899 } 2900 2901 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type) 2902 { 2903 struct tcp_sock *tp = tcp_sk(sk); 2904 2905 2906 /* There are multiple conditions worthy of tracking in a 2907 * chronograph, so that the highest priority enum takes 2908 * precedence over the other conditions (see tcp_chrono_start). 2909 * If a condition stops, we only stop chrono tracking if 2910 * it's the "most interesting" or current chrono we are 2911 * tracking and starts busy chrono if we have pending data. 2912 */ 2913 if (tcp_rtx_and_write_queues_empty(sk)) 2914 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); 2915 else if (type == tp->chrono_type) 2916 tcp_chrono_set(tp, TCP_CHRONO_BUSY); 2917 } 2918 2919 /* First skb in the write queue is smaller than ideal packet size. 2920 * Check if we can move payload from the second skb in the queue. 2921 */ 2922 static void tcp_grow_skb(struct sock *sk, struct sk_buff *skb, int amount) 2923 { 2924 struct sk_buff *next_skb = skb->next; 2925 unsigned int nlen; 2926 2927 if (tcp_skb_is_last(sk, skb)) 2928 return; 2929 2930 if (!tcp_skb_can_collapse(skb, next_skb)) 2931 return; 2932 2933 nlen = min_t(u32, amount, next_skb->len); 2934 if (!nlen || !skb_shift(skb, next_skb, nlen)) 2935 return; 2936 2937 TCP_SKB_CB(skb)->end_seq += nlen; 2938 TCP_SKB_CB(next_skb)->seq += nlen; 2939 2940 if (!next_skb->len) { 2941 /* In case FIN is set, we need to update end_seq */ 2942 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 2943 2944 tcp_eat_one_skb(sk, skb, next_skb); 2945 } 2946 } 2947 2948 /* This routine writes packets to the network. It advances the 2949 * send_head. This happens as incoming acks open up the remote 2950 * window for us. 2951 * 2952 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 2953 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 2954 * account rare use of URG, this is not a big flaw. 2955 * 2956 * Send at most one packet when push_one > 0. Temporarily ignore 2957 * cwnd limit to force at most one packet out when push_one == 2. 2958 2959 * Returns true, if no segments are in flight and we have queued segments, 2960 * but cannot send anything now because of SWS or another problem. 2961 */ 2962 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 2963 int push_one, gfp_t gfp) 2964 { 2965 struct tcp_sock *tp = tcp_sk(sk); 2966 struct sk_buff *skb; 2967 unsigned int tso_segs, sent_pkts; 2968 u32 cwnd_quota, max_segs; 2969 int result; 2970 bool is_cwnd_limited = false, is_rwnd_limited = false; 2971 2972 sent_pkts = 0; 2973 2974 tcp_mstamp_refresh(tp); 2975 2976 /* AccECN option beacon depends on mstamp, it may change mss */ 2977 if (tcp_ecn_mode_accecn(tp) && tcp_accecn_option_beacon_check(sk)) 2978 mss_now = tcp_current_mss(sk); 2979 2980 if (!push_one) { 2981 /* Do MTU probing. */ 2982 result = tcp_mtu_probe(sk); 2983 if (!result) { 2984 return false; 2985 } else if (result > 0) { 2986 sent_pkts = 1; 2987 } 2988 } 2989 2990 max_segs = tcp_tso_segs(sk, mss_now); 2991 while ((skb = tcp_send_head(sk))) { 2992 unsigned int limit; 2993 int missing_bytes; 2994 2995 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 2996 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ 2997 tp->tcp_wstamp_ns = tp->tcp_clock_cache; 2998 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); 2999 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); 3000 tcp_init_tso_segs(skb, mss_now); 3001 goto repair; /* Skip network transmission */ 3002 } 3003 3004 if (tcp_pacing_check(sk)) 3005 break; 3006 3007 cwnd_quota = tcp_cwnd_test(tp); 3008 if (!cwnd_quota) { 3009 if (push_one == 2) 3010 /* Force out a loss probe pkt. */ 3011 cwnd_quota = 1; 3012 else 3013 break; 3014 } 3015 cwnd_quota = min(cwnd_quota, max_segs); 3016 missing_bytes = cwnd_quota * mss_now - skb->len; 3017 if (missing_bytes > 0) 3018 tcp_grow_skb(sk, skb, missing_bytes); 3019 3020 tso_segs = tcp_set_skb_tso_segs(skb, mss_now); 3021 3022 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { 3023 is_rwnd_limited = true; 3024 break; 3025 } 3026 3027 if (tso_segs == 1) { 3028 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 3029 (tcp_skb_is_last(sk, skb) ? 3030 nonagle : TCP_NAGLE_PUSH)))) 3031 break; 3032 } else { 3033 if (!push_one && 3034 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 3035 &is_rwnd_limited, max_segs)) 3036 break; 3037 } 3038 3039 limit = mss_now; 3040 if (tso_segs > 1 && !tcp_urg_mode(tp)) 3041 limit = tcp_mss_split_point(sk, skb, mss_now, 3042 cwnd_quota, 3043 nonagle); 3044 3045 if (skb->len > limit && 3046 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 3047 break; 3048 3049 if (tcp_small_queue_check(sk, skb, 0)) 3050 break; 3051 3052 /* Argh, we hit an empty skb(), presumably a thread 3053 * is sleeping in sendmsg()/sk_stream_wait_memory(). 3054 * We do not want to send a pure-ack packet and have 3055 * a strange looking rtx queue with empty packet(s). 3056 */ 3057 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) 3058 break; 3059 3060 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 3061 break; 3062 3063 repair: 3064 /* Advance the send_head. This one is sent out. 3065 * This call will increment packets_out. 3066 */ 3067 tcp_event_new_data_sent(sk, skb); 3068 3069 tcp_minshall_update(tp, mss_now, skb); 3070 sent_pkts += tcp_skb_pcount(skb); 3071 3072 if (push_one) 3073 break; 3074 } 3075 3076 if (is_rwnd_limited) 3077 tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED); 3078 else 3079 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); 3080 3081 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)); 3082 if (likely(sent_pkts || is_cwnd_limited)) 3083 tcp_cwnd_validate(sk, is_cwnd_limited); 3084 3085 if (likely(sent_pkts)) { 3086 if (tcp_in_cwnd_reduction(sk)) 3087 tp->prr_out += sent_pkts; 3088 3089 /* Send one loss probe per tail loss episode. */ 3090 if (push_one != 2) 3091 tcp_schedule_loss_probe(sk, false); 3092 return false; 3093 } 3094 return !tp->packets_out && !tcp_write_queue_empty(sk); 3095 } 3096 3097 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) 3098 { 3099 struct inet_connection_sock *icsk = inet_csk(sk); 3100 struct tcp_sock *tp = tcp_sk(sk); 3101 u32 timeout, timeout_us, rto_delta_us; 3102 int early_retrans; 3103 3104 /* Don't do any loss probe on a Fast Open connection before 3WHS 3105 * finishes. 3106 */ 3107 if (rcu_access_pointer(tp->fastopen_rsk)) 3108 return false; 3109 3110 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); 3111 /* Schedule a loss probe in 2*RTT for SACK capable connections 3112 * not in loss recovery, that are either limited by cwnd or application. 3113 */ 3114 if ((early_retrans != 3 && early_retrans != 4) || 3115 !tcp_is_sack(tp) || 3116 (icsk->icsk_ca_state != TCP_CA_Open && 3117 icsk->icsk_ca_state != TCP_CA_CWR)) 3118 return false; 3119 3120 /* Probe timeout is 2*rtt. Add minimum RTO to account 3121 * for delayed ack when there's one outstanding packet. If no RTT 3122 * sample is available then probe after TCP_TIMEOUT_INIT. 3123 */ 3124 if (tp->srtt_us) { 3125 timeout_us = tp->srtt_us >> 2; 3126 if (tp->packets_out == 1) 3127 timeout_us += tcp_rto_min_us(sk); 3128 else 3129 timeout_us += TCP_TIMEOUT_MIN_US; 3130 timeout = usecs_to_jiffies(timeout_us); 3131 } else { 3132 timeout = TCP_TIMEOUT_INIT; 3133 } 3134 3135 /* If the RTO formula yields an earlier time, then use that time. */ 3136 rto_delta_us = advancing_rto ? 3137 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : 3138 tcp_rto_delta_us(sk); /* How far in future is RTO? */ 3139 if (rto_delta_us > 0) 3140 timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); 3141 3142 tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, true); 3143 return true; 3144 } 3145 3146 /* Thanks to skb fast clones, we can detect if a prior transmit of 3147 * a packet is still in a qdisc or driver queue. 3148 * In this case, there is very little point doing a retransmit ! 3149 */ 3150 static bool skb_still_in_host_queue(struct sock *sk, 3151 const struct sk_buff *skb) 3152 { 3153 if (unlikely(skb_fclone_busy(sk, skb))) { 3154 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 3155 smp_mb__after_atomic(); 3156 if (skb_fclone_busy(sk, skb)) { 3157 NET_INC_STATS(sock_net(sk), 3158 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 3159 return true; 3160 } 3161 } 3162 return false; 3163 } 3164 3165 /* When probe timeout (PTO) fires, try send a new segment if possible, else 3166 * retransmit the last segment. 3167 */ 3168 void tcp_send_loss_probe(struct sock *sk) 3169 { 3170 struct tcp_sock *tp = tcp_sk(sk); 3171 struct sk_buff *skb; 3172 int pcount; 3173 int mss = tcp_current_mss(sk); 3174 3175 /* At most one outstanding TLP */ 3176 if (tp->tlp_high_seq) 3177 goto rearm_timer; 3178 3179 tp->tlp_retrans = 0; 3180 skb = tcp_send_head(sk); 3181 if (skb && tcp_snd_wnd_test(tp, skb, mss)) { 3182 pcount = tp->packets_out; 3183 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 3184 if (tp->packets_out > pcount) 3185 goto probe_sent; 3186 goto rearm_timer; 3187 } 3188 skb = skb_rb_last(&sk->tcp_rtx_queue); 3189 if (unlikely(!skb)) { 3190 tcp_warn_once(sk, tp->packets_out, "invalid inflight: "); 3191 smp_store_release(&inet_csk(sk)->icsk_pending, 0); 3192 return; 3193 } 3194 3195 if (skb_still_in_host_queue(sk, skb)) 3196 goto rearm_timer; 3197 3198 pcount = tcp_skb_pcount(skb); 3199 if (WARN_ON(!pcount)) 3200 goto rearm_timer; 3201 3202 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 3203 if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 3204 (pcount - 1) * mss, mss, 3205 GFP_ATOMIC))) 3206 goto rearm_timer; 3207 skb = skb_rb_next(skb); 3208 } 3209 3210 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 3211 goto rearm_timer; 3212 3213 if (__tcp_retransmit_skb(sk, skb, 1)) 3214 goto rearm_timer; 3215 3216 tp->tlp_retrans = 1; 3217 3218 probe_sent: 3219 /* Record snd_nxt for loss detection. */ 3220 tp->tlp_high_seq = tp->snd_nxt; 3221 3222 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 3223 /* Reset s.t. tcp_rearm_rto will restart timer from now */ 3224 smp_store_release(&inet_csk(sk)->icsk_pending, 0); 3225 rearm_timer: 3226 tcp_rearm_rto(sk); 3227 } 3228 3229 /* Push out any pending frames which were held back due to 3230 * TCP_CORK or attempt at coalescing tiny packets. 3231 * The socket must be locked by the caller. 3232 */ 3233 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 3234 int nonagle) 3235 { 3236 /* If we are closed, the bytes will have to remain here. 3237 * In time closedown will finish, we empty the write queue and 3238 * all will be happy. 3239 */ 3240 if (unlikely(sk->sk_state == TCP_CLOSE)) 3241 return; 3242 3243 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 3244 sk_gfp_mask(sk, GFP_ATOMIC))) 3245 tcp_check_probe_timer(sk); 3246 } 3247 3248 /* Send _single_ skb sitting at the send head. This function requires 3249 * true push pending frames to setup probe timer etc. 3250 */ 3251 void tcp_push_one(struct sock *sk, unsigned int mss_now) 3252 { 3253 struct sk_buff *skb = tcp_send_head(sk); 3254 3255 BUG_ON(!skb || skb->len < mss_now); 3256 3257 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 3258 } 3259 3260 /* This function returns the amount that we can raise the 3261 * usable window based on the following constraints 3262 * 3263 * 1. The window can never be shrunk once it is offered (RFC 793) 3264 * 2. We limit memory per socket 3265 * 3266 * RFC 1122: 3267 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 3268 * RECV.NEXT + RCV.WIN fixed until: 3269 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 3270 * 3271 * i.e. don't raise the right edge of the window until you can raise 3272 * it at least MSS bytes. 3273 * 3274 * Unfortunately, the recommended algorithm breaks header prediction, 3275 * since header prediction assumes th->window stays fixed. 3276 * 3277 * Strictly speaking, keeping th->window fixed violates the receiver 3278 * side SWS prevention criteria. The problem is that under this rule 3279 * a stream of single byte packets will cause the right side of the 3280 * window to always advance by a single byte. 3281 * 3282 * Of course, if the sender implements sender side SWS prevention 3283 * then this will not be a problem. 3284 * 3285 * BSD seems to make the following compromise: 3286 * 3287 * If the free space is less than the 1/4 of the maximum 3288 * space available and the free space is less than 1/2 mss, 3289 * then set the window to 0. 3290 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 3291 * Otherwise, just prevent the window from shrinking 3292 * and from being larger than the largest representable value. 3293 * 3294 * This prevents incremental opening of the window in the regime 3295 * where TCP is limited by the speed of the reader side taking 3296 * data out of the TCP receive queue. It does nothing about 3297 * those cases where the window is constrained on the sender side 3298 * because the pipeline is full. 3299 * 3300 * BSD also seems to "accidentally" limit itself to windows that are a 3301 * multiple of MSS, at least until the free space gets quite small. 3302 * This would appear to be a side effect of the mbuf implementation. 3303 * Combining these two algorithms results in the observed behavior 3304 * of having a fixed window size at almost all times. 3305 * 3306 * Below we obtain similar behavior by forcing the offered window to 3307 * a multiple of the mss when it is feasible to do so. 3308 * 3309 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 3310 * Regular options like TIMESTAMP are taken into account. 3311 */ 3312 u32 __tcp_select_window(struct sock *sk) 3313 { 3314 struct inet_connection_sock *icsk = inet_csk(sk); 3315 struct tcp_sock *tp = tcp_sk(sk); 3316 struct net *net = sock_net(sk); 3317 /* MSS for the peer's data. Previous versions used mss_clamp 3318 * here. I don't know if the value based on our guesses 3319 * of peer's MSS is better for the performance. It's more correct 3320 * but may be worse for the performance because of rcv_mss 3321 * fluctuations. --SAW 1998/11/1 3322 */ 3323 int mss = icsk->icsk_ack.rcv_mss; 3324 int free_space = tcp_space(sk); 3325 int allowed_space = tcp_full_space(sk); 3326 int full_space, window; 3327 3328 if (sk_is_mptcp(sk)) 3329 mptcp_space(sk, &free_space, &allowed_space); 3330 3331 full_space = min_t(int, tp->window_clamp, allowed_space); 3332 3333 if (unlikely(mss > full_space)) { 3334 mss = full_space; 3335 if (mss <= 0) 3336 return 0; 3337 } 3338 3339 /* Only allow window shrink if the sysctl is enabled and we have 3340 * a non-zero scaling factor in effect. 3341 */ 3342 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) 3343 goto shrink_window_allowed; 3344 3345 /* do not allow window to shrink */ 3346 3347 if (free_space < (full_space >> 1)) { 3348 icsk->icsk_ack.quick = 0; 3349 3350 if (tcp_under_memory_pressure(sk)) 3351 tcp_adjust_rcv_ssthresh(sk); 3352 3353 /* free_space might become our new window, make sure we don't 3354 * increase it due to wscale. 3355 */ 3356 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 3357 3358 /* if free space is less than mss estimate, or is below 1/16th 3359 * of the maximum allowed, try to move to zero-window, else 3360 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and 3361 * new incoming data is dropped due to memory limits. 3362 * With large window, mss test triggers way too late in order 3363 * to announce zero window in time before rmem limit kicks in. 3364 */ 3365 if (free_space < (allowed_space >> 4) || free_space < mss) 3366 return 0; 3367 } 3368 3369 if (free_space > tp->rcv_ssthresh) 3370 free_space = tp->rcv_ssthresh; 3371 3372 /* Don't do rounding if we are using window scaling, since the 3373 * scaled window will not line up with the MSS boundary anyway. 3374 */ 3375 if (tp->rx_opt.rcv_wscale) { 3376 window = free_space; 3377 3378 /* Advertise enough space so that it won't get scaled away. 3379 * Import case: prevent zero window announcement if 3380 * 1<<rcv_wscale > mss. 3381 */ 3382 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); 3383 } else { 3384 window = tp->rcv_wnd; 3385 /* Get the largest window that is a nice multiple of mss. 3386 * Window clamp already applied above. 3387 * If our current window offering is within 1 mss of the 3388 * free space we just keep it. This prevents the divide 3389 * and multiply from happening most of the time. 3390 * We also don't do any window rounding when the free space 3391 * is too small. 3392 */ 3393 if (window <= free_space - mss || window > free_space) 3394 window = rounddown(free_space, mss); 3395 else if (mss == full_space && 3396 free_space > window + (full_space >> 1)) 3397 window = free_space; 3398 } 3399 3400 return window; 3401 3402 shrink_window_allowed: 3403 /* new window should always be an exact multiple of scaling factor */ 3404 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 3405 3406 if (free_space < (full_space >> 1)) { 3407 icsk->icsk_ack.quick = 0; 3408 3409 if (tcp_under_memory_pressure(sk)) 3410 tcp_adjust_rcv_ssthresh(sk); 3411 3412 /* if free space is too low, return a zero window */ 3413 if (free_space < (allowed_space >> 4) || free_space < mss || 3414 free_space < (1 << tp->rx_opt.rcv_wscale)) 3415 return 0; 3416 } 3417 3418 if (free_space > tp->rcv_ssthresh) { 3419 free_space = tp->rcv_ssthresh; 3420 /* new window should always be an exact multiple of scaling factor 3421 * 3422 * For this case, we ALIGN "up" (increase free_space) because 3423 * we know free_space is not zero here, it has been reduced from 3424 * the memory-based limit, and rcv_ssthresh is not a hard limit 3425 * (unlike sk_rcvbuf). 3426 */ 3427 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); 3428 } 3429 3430 return free_space; 3431 } 3432 3433 void tcp_skb_collapse_tstamp(struct sk_buff *skb, 3434 const struct sk_buff *next_skb) 3435 { 3436 if (unlikely(tcp_has_tx_tstamp(next_skb))) { 3437 const struct skb_shared_info *next_shinfo = 3438 skb_shinfo(next_skb); 3439 struct skb_shared_info *shinfo = skb_shinfo(skb); 3440 3441 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 3442 shinfo->tskey = next_shinfo->tskey; 3443 TCP_SKB_CB(skb)->txstamp_ack |= 3444 TCP_SKB_CB(next_skb)->txstamp_ack; 3445 } 3446 } 3447 3448 /* Collapses two adjacent SKB's during retransmission. */ 3449 static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 3450 { 3451 struct tcp_sock *tp = tcp_sk(sk); 3452 struct sk_buff *next_skb = skb_rb_next(skb); 3453 int next_skb_size; 3454 3455 next_skb_size = next_skb->len; 3456 3457 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 3458 3459 if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size)) 3460 return false; 3461 3462 tcp_highest_sack_replace(sk, next_skb, skb); 3463 3464 /* Update sequence range on original skb. */ 3465 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 3466 3467 /* Merge over control information. This moves PSH/FIN etc. over */ 3468 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 3469 3470 /* All done, get rid of second SKB and account for it so 3471 * packet counting does not break. 3472 */ 3473 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 3474 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; 3475 3476 /* changed transmit queue under us so clear hints */ 3477 if (next_skb == tp->retransmit_skb_hint) 3478 tp->retransmit_skb_hint = skb; 3479 3480 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 3481 3482 tcp_skb_collapse_tstamp(skb, next_skb); 3483 3484 tcp_rtx_queue_unlink_and_free(next_skb, sk); 3485 return true; 3486 } 3487 3488 /* Check if coalescing SKBs is legal. */ 3489 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 3490 { 3491 if (tcp_skb_pcount(skb) > 1) 3492 return false; 3493 if (skb_cloned(skb)) 3494 return false; 3495 if (!skb_frags_readable(skb)) 3496 return false; 3497 /* Some heuristics for collapsing over SACK'd could be invented */ 3498 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 3499 return false; 3500 3501 return true; 3502 } 3503 3504 /* Collapse packets in the retransmit queue to make to create 3505 * less packets on the wire. This is only done on retransmission. 3506 */ 3507 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 3508 int space) 3509 { 3510 struct tcp_sock *tp = tcp_sk(sk); 3511 struct sk_buff *skb = to, *tmp; 3512 bool first = true; 3513 3514 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) 3515 return; 3516 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 3517 return; 3518 3519 skb_rbtree_walk_from_safe(skb, tmp) { 3520 if (!tcp_can_collapse(sk, skb)) 3521 break; 3522 3523 if (!tcp_skb_can_collapse(to, skb)) 3524 break; 3525 3526 space -= skb->len; 3527 3528 if (first) { 3529 first = false; 3530 continue; 3531 } 3532 3533 if (space < 0) 3534 break; 3535 3536 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 3537 break; 3538 3539 if (!tcp_collapse_retrans(sk, to)) 3540 break; 3541 } 3542 } 3543 3544 /* This retransmits one SKB. Policy decisions and retransmit queue 3545 * state updates are done by the caller. Returns non-zero if an 3546 * error occurred which prevented the send. 3547 */ 3548 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 3549 { 3550 struct inet_connection_sock *icsk = inet_csk(sk); 3551 struct tcp_sock *tp = tcp_sk(sk); 3552 unsigned int cur_mss; 3553 int diff, len, err; 3554 int avail_wnd; 3555 3556 /* Inconclusive MTU probe */ 3557 if (icsk->icsk_mtup.probe_size) 3558 icsk->icsk_mtup.probe_size = 0; 3559 3560 if (skb_still_in_host_queue(sk, skb)) { 3561 err = -EBUSY; 3562 goto out; 3563 } 3564 3565 start: 3566 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 3567 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 3568 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; 3569 TCP_SKB_CB(skb)->seq++; 3570 goto start; 3571 } 3572 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { 3573 WARN_ON_ONCE(1); 3574 err = -EINVAL; 3575 goto out; 3576 } 3577 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) { 3578 err = -ENOMEM; 3579 goto out; 3580 } 3581 } 3582 3583 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { 3584 err = -EHOSTUNREACH; /* Routing failure or similar. */ 3585 goto out; 3586 } 3587 3588 cur_mss = tcp_current_mss(sk); 3589 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 3590 3591 /* If receiver has shrunk his window, and skb is out of 3592 * new window, do not retransmit it. The exception is the 3593 * case, when window is shrunk to zero. In this case 3594 * our retransmit of one segment serves as a zero window probe. 3595 */ 3596 if (avail_wnd <= 0) { 3597 if (TCP_SKB_CB(skb)->seq != tp->snd_una) { 3598 err = -EAGAIN; 3599 goto out; 3600 } 3601 avail_wnd = cur_mss; 3602 } 3603 3604 len = cur_mss * segs; 3605 if (len > avail_wnd) { 3606 len = rounddown(avail_wnd, cur_mss); 3607 if (!len) 3608 len = avail_wnd; 3609 } 3610 if (skb->len > len) { 3611 if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len, 3612 cur_mss, GFP_ATOMIC)) { 3613 err = -ENOMEM; /* We'll try again later. */ 3614 goto out; 3615 } 3616 } else { 3617 if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) { 3618 err = -ENOMEM; 3619 goto out; 3620 } 3621 3622 diff = tcp_skb_pcount(skb); 3623 tcp_set_skb_tso_segs(skb, cur_mss); 3624 diff -= tcp_skb_pcount(skb); 3625 if (diff) 3626 tcp_adjust_pcount(sk, skb, diff); 3627 avail_wnd = min_t(int, avail_wnd, cur_mss); 3628 if (skb->len < avail_wnd) 3629 tcp_retrans_try_collapse(sk, skb, avail_wnd); 3630 } 3631 3632 if (!tcp_ecn_mode_pending(tp) || icsk->icsk_retransmits > 1) { 3633 /* RFC3168, section 6.1.1.1. ECN fallback 3634 * As AccECN uses the same SYN flags (+ AE), this check 3635 * covers both cases. 3636 */ 3637 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == 3638 TCPHDR_SYN_ECN) 3639 tcp_ecn_clear_syn(sk, skb); 3640 } 3641 3642 /* Update global and local TCP statistics. */ 3643 segs = tcp_skb_pcount(skb); 3644 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); 3645 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 3646 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 3647 WRITE_ONCE(tp->total_retrans, tp->total_retrans + segs); 3648 WRITE_ONCE(tp->bytes_retrans, tp->bytes_retrans + skb->len); 3649 3650 /* make sure skb->data is aligned on arches that require it 3651 * and check if ack-trimming & collapsing extended the headroom 3652 * beyond what csum_start can cover. 3653 */ 3654 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 3655 skb_headroom(skb) >= 0xFFFF)) { 3656 struct sk_buff *nskb; 3657 3658 tcp_skb_tsorted_save(skb) { 3659 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 3660 if (nskb) { 3661 nskb->dev = NULL; 3662 err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC); 3663 } else { 3664 err = -ENOBUFS; 3665 } 3666 } tcp_skb_tsorted_restore(skb); 3667 3668 if (!err) { 3669 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); 3670 tcp_rate_skb_sent(sk, skb); 3671 } 3672 } else { 3673 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3674 } 3675 3676 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) 3677 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB, 3678 TCP_SKB_CB(skb)->seq, segs, err); 3679 3680 if (unlikely(err) && err != -EBUSY) 3681 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); 3682 3683 /* To avoid taking spuriously low RTT samples based on a timestamp 3684 * for a transmit that never happened, always mark EVER_RETRANS 3685 */ 3686 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 3687 3688 out: 3689 trace_tcp_retransmit_skb(sk, skb, err); 3690 return err; 3691 } 3692 3693 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 3694 { 3695 struct tcp_sock *tp = tcp_sk(sk); 3696 int err = __tcp_retransmit_skb(sk, skb, segs); 3697 3698 if (err == 0) { 3699 #if FASTRETRANS_DEBUG > 0 3700 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 3701 net_dbg_ratelimited("retrans_out leaked\n"); 3702 } 3703 #endif 3704 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 3705 tp->retrans_out += tcp_skb_pcount(skb); 3706 } 3707 3708 /* Save stamp of the first (attempted) retransmit. */ 3709 if (!tp->retrans_stamp) 3710 tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb); 3711 3712 if (tp->undo_retrans < 0) 3713 tp->undo_retrans = 0; 3714 tp->undo_retrans += tcp_skb_pcount(skb); 3715 return err; 3716 } 3717 3718 /* This gets called after a retransmit timeout, and the initially 3719 * retransmitted data is acknowledged. It tries to continue 3720 * resending the rest of the retransmit queue, until either 3721 * we've sent it all or the congestion window limit is reached. 3722 */ 3723 void tcp_xmit_retransmit_queue(struct sock *sk) 3724 { 3725 const struct inet_connection_sock *icsk = inet_csk(sk); 3726 struct sk_buff *skb, *rtx_head, *hole = NULL; 3727 struct tcp_sock *tp = tcp_sk(sk); 3728 bool rearm_timer = false; 3729 u32 max_segs; 3730 int mib_idx; 3731 3732 if (!tp->packets_out) 3733 return; 3734 3735 rtx_head = tcp_rtx_queue_head(sk); 3736 skb = tp->retransmit_skb_hint ?: rtx_head; 3737 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk)); 3738 skb_rbtree_walk_from(skb) { 3739 __u8 sacked; 3740 int segs; 3741 3742 if (tcp_pacing_check(sk)) 3743 break; 3744 3745 /* we could do better than to assign each time */ 3746 if (!hole) 3747 tp->retransmit_skb_hint = skb; 3748 3749 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); 3750 if (segs <= 0) 3751 break; 3752 sacked = TCP_SKB_CB(skb)->sacked; 3753 /* In case tcp_shift_skb_data() have aggregated large skbs, 3754 * we need to make sure not sending too bigs TSO packets 3755 */ 3756 segs = min_t(int, segs, max_segs); 3757 3758 if (tp->retrans_out >= tp->lost_out) { 3759 break; 3760 } else if (!(sacked & TCPCB_LOST)) { 3761 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 3762 hole = skb; 3763 continue; 3764 3765 } else { 3766 if (icsk->icsk_ca_state != TCP_CA_Loss) 3767 mib_idx = LINUX_MIB_TCPFASTRETRANS; 3768 else 3769 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 3770 } 3771 3772 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 3773 continue; 3774 3775 if (tcp_small_queue_check(sk, skb, 1)) 3776 break; 3777 3778 if (tcp_retransmit_skb(sk, skb, segs)) 3779 break; 3780 3781 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)); 3782 3783 if (tcp_in_cwnd_reduction(sk)) 3784 tp->prr_out += tcp_skb_pcount(skb); 3785 3786 if (skb == rtx_head && 3787 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) 3788 rearm_timer = true; 3789 3790 } 3791 if (rearm_timer) 3792 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 3793 inet_csk(sk)->icsk_rto, true); 3794 } 3795 3796 /* Send a FIN. The caller locks the socket for us. 3797 * We should try to send a FIN packet really hard, but eventually give up. 3798 */ 3799 void tcp_send_fin(struct sock *sk) 3800 { 3801 struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk); 3802 struct tcp_sock *tp = tcp_sk(sk); 3803 3804 /* Optimization, tack on the FIN if we have one skb in write queue and 3805 * this skb was not yet sent, or we are under memory pressure. 3806 * Note: in the latter case, FIN packet will be sent after a timeout, 3807 * as TCP stack thinks it has already been transmitted. 3808 */ 3809 tskb = tail; 3810 if (!tskb && tcp_under_memory_pressure(sk)) 3811 tskb = skb_rb_last(&sk->tcp_rtx_queue); 3812 3813 if (tskb) { 3814 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; 3815 TCP_SKB_CB(tskb)->end_seq++; 3816 tp->write_seq++; 3817 if (!tail) { 3818 /* This means tskb was already sent. 3819 * Pretend we included the FIN on previous transmit. 3820 * We need to set tp->snd_nxt to the value it would have 3821 * if FIN had been sent. This is because retransmit path 3822 * does not change tp->snd_nxt. 3823 */ 3824 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); 3825 return; 3826 } 3827 } else { 3828 skb = alloc_skb_fclone(MAX_TCP_HEADER, 3829 sk_gfp_mask(sk, GFP_ATOMIC | 3830 __GFP_NOWARN)); 3831 if (unlikely(!skb)) 3832 return; 3833 3834 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 3835 skb_reserve(skb, MAX_TCP_HEADER); 3836 sk_forced_mem_schedule(sk, skb->truesize); 3837 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 3838 tcp_init_nondata_skb(skb, sk, tp->write_seq, 3839 TCPHDR_ACK | TCPHDR_FIN); 3840 tcp_queue_skb(sk, skb); 3841 } 3842 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); 3843 } 3844 3845 /* We get here when a process closes a file descriptor (either due to 3846 * an explicit close() or as a byproduct of exit()'ing) and there 3847 * was unread data in the receive queue. This behavior is recommended 3848 * by RFC 2525, section 2.17. -DaveM 3849 */ 3850 void tcp_send_active_reset(struct sock *sk, gfp_t priority, 3851 enum sk_rst_reason reason) 3852 { 3853 struct sk_buff *skb; 3854 3855 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 3856 3857 /* NOTE: No TCP options attached and we never retransmit this. */ 3858 skb = alloc_skb(MAX_TCP_HEADER, priority); 3859 if (!skb) { 3860 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3861 return; 3862 } 3863 3864 /* Reserve space for headers and prepare control bits. */ 3865 skb_reserve(skb, MAX_TCP_HEADER); 3866 tcp_init_nondata_skb(skb, sk, tcp_acceptable_seq(sk), 3867 TCPHDR_ACK | TCPHDR_RST); 3868 tcp_mstamp_refresh(tcp_sk(sk)); 3869 /* Send it off. */ 3870 if (tcp_transmit_skb(sk, skb, 0, priority)) 3871 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 3872 3873 /* skb of trace_tcp_send_reset() keeps the skb that caused RST, 3874 * skb here is different to the troublesome skb, so use NULL 3875 */ 3876 trace_tcp_send_reset(sk, NULL, reason); 3877 } 3878 3879 /* Send a crossed SYN-ACK during socket establishment. 3880 * WARNING: This routine must only be called when we have already sent 3881 * a SYN packet that crossed the incoming SYN that caused this routine 3882 * to get called. If this assumption fails then the initial rcv_wnd 3883 * and rcv_wscale values will not be correct. 3884 */ 3885 int tcp_send_synack(struct sock *sk) 3886 { 3887 struct sk_buff *skb; 3888 3889 skb = tcp_rtx_queue_head(sk); 3890 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 3891 pr_err("%s: wrong queue state\n", __func__); 3892 return -EFAULT; 3893 } 3894 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 3895 if (skb_cloned(skb)) { 3896 struct sk_buff *nskb; 3897 3898 tcp_skb_tsorted_save(skb) { 3899 nskb = skb_copy(skb, GFP_ATOMIC); 3900 } tcp_skb_tsorted_restore(skb); 3901 if (!nskb) 3902 return -ENOMEM; 3903 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); 3904 tcp_highest_sack_replace(sk, skb, nskb); 3905 tcp_rtx_queue_unlink_and_free(skb, sk); 3906 __skb_header_release(nskb); 3907 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); 3908 sk_wmem_queued_add(sk, nskb->truesize); 3909 sk_mem_charge(sk, nskb->truesize); 3910 skb = nskb; 3911 } 3912 3913 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 3914 tcp_ecn_send_synack(sk, skb); 3915 } 3916 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3917 } 3918 3919 /** 3920 * tcp_make_synack - Allocate one skb and build a SYNACK packet. 3921 * @sk: listener socket 3922 * @dst: dst entry attached to the SYNACK. It is consumed and caller 3923 * should not use it again. 3924 * @req: request_sock pointer 3925 * @foc: cookie for tcp fast open 3926 * @synack_type: Type of synack to prepare 3927 * @syn_skb: SYN packet just received. It could be NULL for rtx case. 3928 */ 3929 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 3930 struct request_sock *req, 3931 struct tcp_fastopen_cookie *foc, 3932 enum tcp_synack_type synack_type, 3933 struct sk_buff *syn_skb) 3934 { 3935 struct inet_request_sock *ireq = inet_rsk(req); 3936 const struct tcp_sock *tp = tcp_sk(sk); 3937 struct tcp_out_options opts; 3938 struct tcp_key key = {}; 3939 struct sk_buff *skb; 3940 int tcp_header_size; 3941 struct tcphdr *th; 3942 int mss; 3943 u64 now; 3944 3945 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 3946 if (unlikely(!skb)) { 3947 dst_release(dst); 3948 return NULL; 3949 } 3950 /* Reserve space for headers. */ 3951 skb_reserve(skb, MAX_TCP_HEADER); 3952 3953 switch (synack_type) { 3954 case TCP_SYNACK_NORMAL: 3955 case TCP_SYNACK_RETRANS: 3956 skb_set_owner_edemux(skb, req_to_sk(req)); 3957 break; 3958 case TCP_SYNACK_COOKIE: 3959 /* Under synflood, we do not attach skb to a socket, 3960 * to avoid false sharing. 3961 */ 3962 break; 3963 case TCP_SYNACK_FASTOPEN: 3964 /* sk is a const pointer, because we want to express multiple 3965 * cpu might call us concurrently. 3966 * sk->sk_wmem_alloc in an atomic, we can promote to rw. 3967 */ 3968 skb_set_owner_w(skb, (struct sock *)sk); 3969 break; 3970 } 3971 skb_dst_set(skb, dst); 3972 3973 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 3974 3975 memset(&opts, 0, sizeof(opts)); 3976 now = tcp_clock_ns(); 3977 #ifdef CONFIG_SYN_COOKIES 3978 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) 3979 skb_set_delivery_time(skb, cookie_init_timestamp(req, now), 3980 SKB_CLOCK_MONOTONIC); 3981 else 3982 #endif 3983 { 3984 skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); 3985 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ 3986 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); 3987 } 3988 3989 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 3990 rcu_read_lock(); 3991 #endif 3992 if (tcp_rsk_used_ao(req)) { 3993 #ifdef CONFIG_TCP_AO 3994 struct tcp_ao_key *ao_key = NULL; 3995 u8 keyid = tcp_rsk(req)->ao_keyid; 3996 u8 rnext = tcp_rsk(req)->ao_rcv_next; 3997 3998 ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req), 3999 keyid, -1); 4000 /* If there is no matching key - avoid sending anything, 4001 * especially usigned segments. It could try harder and lookup 4002 * for another peer-matching key, but the peer has requested 4003 * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here. 4004 */ 4005 if (unlikely(!ao_key)) { 4006 trace_tcp_ao_synack_no_key(sk, keyid, rnext); 4007 rcu_read_unlock(); 4008 kfree_skb(skb); 4009 net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n", 4010 keyid); 4011 return NULL; 4012 } 4013 key.ao_key = ao_key; 4014 key.type = TCP_KEY_AO; 4015 #endif 4016 } else { 4017 #ifdef CONFIG_TCP_MD5SIG 4018 key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk, 4019 req_to_sk(req)); 4020 if (key.md5_key) 4021 key.type = TCP_KEY_MD5; 4022 #endif 4023 } 4024 skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4); 4025 /* bpf program will be interested in the tcp_flags */ 4026 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; 4027 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, 4028 &key, foc, synack_type, syn_skb) 4029 + sizeof(*th); 4030 4031 skb_push(skb, tcp_header_size); 4032 skb_reset_transport_header(skb); 4033 4034 th = (struct tcphdr *)skb->data; 4035 memset(th, 0, sizeof(struct tcphdr)); 4036 th->syn = 1; 4037 th->ack = 1; 4038 tcp_ecn_make_synack(req, th, synack_type); 4039 th->source = htons(ireq->ir_num); 4040 th->dest = ireq->ir_rmt_port; 4041 skb->mark = ireq->ir_mark; 4042 skb->ip_summed = CHECKSUM_PARTIAL; 4043 th->seq = htonl(tcp_rsk(req)->snt_isn); 4044 /* XXX data is queued and acked as is. No buffer/window check */ 4045 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 4046 4047 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 4048 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); 4049 tcp_options_write(th, NULL, tcp_rsk(req), &opts, &key); 4050 th->doff = (tcp_header_size >> 2); 4051 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 4052 4053 /* Okay, we have all we need - do the md5 hash if needed */ 4054 if (tcp_key_is_md5(&key)) { 4055 #ifdef CONFIG_TCP_MD5SIG 4056 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 4057 key.md5_key, req_to_sk(req), skb); 4058 #endif 4059 } else if (tcp_key_is_ao(&key)) { 4060 #ifdef CONFIG_TCP_AO 4061 tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location, 4062 key.ao_key, req, skb, 4063 opts.hash_location - (u8 *)th, 0); 4064 #endif 4065 } 4066 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 4067 rcu_read_unlock(); 4068 #endif 4069 4070 bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, 4071 synack_type, &opts); 4072 4073 skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); 4074 tcp_add_tx_delay(skb, tp); 4075 4076 return skb; 4077 } 4078 4079 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 4080 { 4081 struct inet_connection_sock *icsk = inet_csk(sk); 4082 const struct tcp_congestion_ops *ca; 4083 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 4084 4085 if (ca_key == TCP_CA_UNSPEC) 4086 return; 4087 4088 rcu_read_lock(); 4089 ca = tcp_ca_find_key(ca_key); 4090 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { 4091 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); 4092 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 4093 icsk->icsk_ca_ops = ca; 4094 } 4095 rcu_read_unlock(); 4096 } 4097 4098 /* Do all connect socket setups that can be done AF independent. */ 4099 static void tcp_connect_init(struct sock *sk) 4100 { 4101 const struct dst_entry *dst = __sk_dst_get(sk); 4102 struct tcp_sock *tp = tcp_sk(sk); 4103 __u8 rcv_wscale; 4104 u16 user_mss; 4105 u32 rcv_wnd; 4106 4107 /* We'll fix this up when we get a response from the other end. 4108 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 4109 */ 4110 tp->tcp_header_len = sizeof(struct tcphdr); 4111 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) 4112 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; 4113 4114 tcp_ao_connect_init(sk); 4115 4116 /* If user gave his TCP_MAXSEG, record it to clamp */ 4117 user_mss = READ_ONCE(tp->rx_opt.user_mss); 4118 if (user_mss) 4119 tp->rx_opt.mss_clamp = user_mss; 4120 tp->max_window = 0; 4121 tcp_mtup_init(sk); 4122 tcp_sync_mss(sk, dst_mtu(dst)); 4123 4124 tcp_ca_dst_init(sk, dst); 4125 4126 if (!tp->window_clamp) 4127 WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); 4128 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); 4129 4130 tcp_initialize_rcv_mss(sk); 4131 4132 /* limit the window selection if the user enforce a smaller rx buffer */ 4133 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 4134 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 4135 WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); 4136 4137 rcv_wnd = tcp_rwnd_init_bpf(sk); 4138 if (rcv_wnd == 0) 4139 rcv_wnd = dst_metric(dst, RTAX_INITRWND); 4140 4141 tcp_select_initial_window(sk, tcp_full_space(sk), 4142 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 4143 &tp->rcv_wnd, 4144 &tp->window_clamp, 4145 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), 4146 &rcv_wscale, 4147 rcv_wnd); 4148 4149 tp->rx_opt.rcv_wscale = rcv_wscale; 4150 tp->rcv_ssthresh = tp->rcv_wnd; 4151 4152 WRITE_ONCE(sk->sk_err, 0); 4153 sock_reset_flag(sk, SOCK_DONE); 4154 tp->snd_wnd = 0; 4155 tcp_init_wl(tp, 0); 4156 tcp_write_queue_purge(sk); 4157 WRITE_ONCE(tp->snd_una, tp->write_seq); 4158 tp->snd_sml = tp->write_seq; 4159 tp->snd_up = tp->write_seq; 4160 WRITE_ONCE(tp->snd_nxt, tp->write_seq); 4161 4162 if (likely(!tp->repair)) 4163 tp->rcv_nxt = 0; 4164 else 4165 tp->rcv_tstamp = tcp_jiffies32; 4166 tp->rcv_wup = tp->rcv_nxt; 4167 tp->rcv_mwnd_seq = tp->rcv_nxt + tp->rcv_wnd; 4168 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 4169 4170 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); 4171 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0); 4172 tcp_clear_retrans(tp); 4173 } 4174 4175 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 4176 { 4177 struct tcp_sock *tp = tcp_sk(sk); 4178 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 4179 4180 tcb->end_seq += skb->len; 4181 __skb_header_release(skb); 4182 sk_wmem_queued_add(sk, skb->truesize); 4183 sk_mem_charge(sk, skb->truesize); 4184 WRITE_ONCE(tp->write_seq, tcb->end_seq); 4185 tp->packets_out += tcp_skb_pcount(skb); 4186 } 4187 4188 /* Build and send a SYN with data and (cached) Fast Open cookie. However, 4189 * queue a data-only packet after the regular SYN, such that regular SYNs 4190 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 4191 * only the SYN sequence, the data are retransmitted in the first ACK. 4192 * If cookie is not cached or other error occurs, falls back to send a 4193 * regular SYN with Fast Open cookie request option. 4194 */ 4195 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 4196 { 4197 struct inet_connection_sock *icsk = inet_csk(sk); 4198 struct tcp_sock *tp = tcp_sk(sk); 4199 struct tcp_fastopen_request *fo = tp->fastopen_req; 4200 struct page_frag *pfrag = sk_page_frag(sk); 4201 struct sk_buff *syn_data; 4202 int space, err = 0; 4203 4204 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 4205 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) 4206 goto fallback; 4207 4208 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 4209 * user-MSS. Reserve maximum option space for middleboxes that add 4210 * private TCP options. The cost is reduced data space in SYN :( 4211 */ 4212 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); 4213 /* Sync mss_cache after updating the mss_clamp */ 4214 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 4215 4216 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - 4217 MAX_TCP_OPTION_SPACE; 4218 4219 space = min_t(size_t, space, fo->size); 4220 4221 if (space && 4222 !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE), 4223 pfrag, sk->sk_allocation)) 4224 goto fallback; 4225 syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); 4226 if (!syn_data) 4227 goto fallback; 4228 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 4229 if (space) { 4230 space = min_t(size_t, space, pfrag->size - pfrag->offset); 4231 space = tcp_wmem_schedule(sk, space); 4232 } 4233 if (space) { 4234 space = copy_page_from_iter(pfrag->page, pfrag->offset, 4235 space, &fo->data->msg_iter); 4236 if (unlikely(!space)) { 4237 tcp_skb_tsorted_anchor_cleanup(syn_data); 4238 kfree_skb(syn_data); 4239 goto fallback; 4240 } 4241 skb_fill_page_desc(syn_data, 0, pfrag->page, 4242 pfrag->offset, space); 4243 page_ref_inc(pfrag->page); 4244 pfrag->offset += space; 4245 skb_len_add(syn_data, space); 4246 skb_zcopy_set(syn_data, fo->uarg, NULL); 4247 } 4248 /* No more data pending in inet_wait_for_connect() */ 4249 if (space == fo->size) 4250 fo->data = NULL; 4251 fo->copied = space; 4252 4253 tcp_connect_queue_skb(sk, syn_data); 4254 if (syn_data->len) 4255 tcp_chrono_start(sk, TCP_CHRONO_BUSY); 4256 4257 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); 4258 4259 skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC); 4260 4261 /* Now full SYN+DATA was cloned and sent (or not), 4262 * remove the SYN from the original skb (syn_data) 4263 * we keep in write queue in case of a retransmit, as we 4264 * also have the SYN packet (with no data) in the same queue. 4265 */ 4266 TCP_SKB_CB(syn_data)->seq++; 4267 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; 4268 if (!err) { 4269 tp->syn_data = (fo->copied > 0); 4270 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); 4271 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); 4272 goto done; 4273 } 4274 4275 /* data was not sent, put it in write_queue */ 4276 __skb_queue_tail(&sk->sk_write_queue, syn_data); 4277 tp->packets_out -= tcp_skb_pcount(syn_data); 4278 4279 fallback: 4280 /* Send a regular SYN with Fast Open cookie request option */ 4281 if (fo->cookie.len > 0) 4282 fo->cookie.len = 0; 4283 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 4284 if (err) 4285 tp->syn_fastopen = 0; 4286 done: 4287 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 4288 return err; 4289 } 4290 4291 /* Build a SYN and send it off. */ 4292 int tcp_connect(struct sock *sk) 4293 { 4294 struct tcp_sock *tp = tcp_sk(sk); 4295 struct sk_buff *buff; 4296 int err; 4297 4298 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL); 4299 4300 #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO) 4301 /* Has to be checked late, after setting daddr/saddr/ops. 4302 * Return error if the peer has both a md5 and a tcp-ao key 4303 * configured as this is ambiguous. 4304 */ 4305 if (unlikely(rcu_dereference_protected(tp->md5sig_info, 4306 lockdep_sock_is_held(sk)))) { 4307 bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); 4308 bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); 4309 struct tcp_ao_info *ao_info; 4310 4311 ao_info = rcu_dereference_check(tp->ao_info, 4312 lockdep_sock_is_held(sk)); 4313 if (ao_info) { 4314 /* This is an extra check: tcp_ao_required() in 4315 * tcp_v{4,6}_parse_md5_keys() should prevent adding 4316 * md5 keys on ao_required socket. 4317 */ 4318 needs_ao |= ao_info->ao_required; 4319 WARN_ON_ONCE(ao_info->ao_required && needs_md5); 4320 } 4321 if (needs_md5 && needs_ao) 4322 return -EKEYREJECTED; 4323 4324 /* If we have a matching md5 key and no matching tcp-ao key 4325 * then free up ao_info if allocated. 4326 */ 4327 if (needs_md5) { 4328 tcp_ao_destroy_sock(sk, false); 4329 } else if (needs_ao) { 4330 tcp_clear_md5_list(sk); 4331 kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 4332 lockdep_sock_is_held(sk))); 4333 } 4334 } 4335 #endif 4336 #ifdef CONFIG_TCP_AO 4337 if (unlikely(rcu_dereference_protected(tp->ao_info, 4338 lockdep_sock_is_held(sk)))) { 4339 /* Don't allow connecting if ao is configured but no 4340 * matching key is found. 4341 */ 4342 if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) 4343 return -EKEYREJECTED; 4344 } 4345 #endif 4346 4347 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 4348 return -EHOSTUNREACH; /* Routing failure or similar. */ 4349 4350 tcp_connect_init(sk); 4351 4352 if (unlikely(tp->repair)) { 4353 tcp_finish_connect(sk, NULL); 4354 return 0; 4355 } 4356 4357 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); 4358 if (unlikely(!buff)) 4359 return -ENOBUFS; 4360 4361 /* SYN eats a sequence byte, write_seq updated by 4362 * tcp_connect_queue_skb(). 4363 */ 4364 tcp_init_nondata_skb(buff, sk, tp->write_seq, TCPHDR_SYN); 4365 tcp_mstamp_refresh(tp); 4366 tp->retrans_stamp = tcp_time_stamp_ts(tp); 4367 tcp_connect_queue_skb(sk, buff); 4368 tcp_ecn_send_syn(sk, buff); 4369 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); 4370 4371 /* Send off SYN; include data in Fast Open. */ 4372 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 4373 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 4374 if (err == -ECONNREFUSED) 4375 return err; 4376 4377 /* We change tp->snd_nxt after the tcp_transmit_skb() call 4378 * in order to make this packet get counted in tcpOutSegs. 4379 */ 4380 WRITE_ONCE(tp->snd_nxt, tp->write_seq); 4381 tp->pushed_seq = tp->write_seq; 4382 buff = tcp_send_head(sk); 4383 if (unlikely(buff)) { 4384 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); 4385 tp->pushed_seq = TCP_SKB_CB(buff)->seq; 4386 } 4387 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 4388 4389 /* Timer for repeating the SYN until an answer. */ 4390 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 4391 inet_csk(sk)->icsk_rto, false); 4392 return 0; 4393 } 4394 EXPORT_SYMBOL(tcp_connect); 4395 4396 u32 tcp_delack_max(const struct sock *sk) 4397 { 4398 u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1; 4399 4400 return min(READ_ONCE(inet_csk(sk)->icsk_delack_max), delack_from_rto_min); 4401 } 4402 4403 /* Send out a delayed ack, the caller does the policy checking 4404 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 4405 * for details. 4406 */ 4407 void tcp_send_delayed_ack(struct sock *sk) 4408 { 4409 struct inet_connection_sock *icsk = inet_csk(sk); 4410 int ato = icsk->icsk_ack.ato; 4411 unsigned long timeout; 4412 4413 if (ato > TCP_DELACK_MIN) { 4414 const struct tcp_sock *tp = tcp_sk(sk); 4415 int max_ato = HZ / 2; 4416 4417 if (inet_csk_in_pingpong_mode(sk) || 4418 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 4419 max_ato = TCP_DELACK_MAX; 4420 4421 /* Slow path, intersegment interval is "high". */ 4422 4423 /* If some rtt estimate is known, use it to bound delayed ack. 4424 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 4425 * directly. 4426 */ 4427 if (tp->srtt_us) { 4428 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), 4429 TCP_DELACK_MIN); 4430 4431 if (rtt < max_ato) 4432 max_ato = rtt; 4433 } 4434 4435 ato = min(ato, max_ato); 4436 } 4437 4438 ato = min_t(u32, ato, tcp_delack_max(sk)); 4439 4440 /* Stay within the limit we were given */ 4441 timeout = jiffies + ato; 4442 4443 /* Use new timeout only if there wasn't a older one earlier. */ 4444 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 4445 /* If delack timer is about to expire, send ACK now. */ 4446 if (time_before_eq(icsk_delack_timeout(icsk), jiffies + (ato >> 2))) { 4447 tcp_send_ack(sk); 4448 return; 4449 } 4450 4451 if (!time_before(timeout, icsk_delack_timeout(icsk))) 4452 timeout = icsk_delack_timeout(icsk); 4453 } 4454 smp_store_release(&icsk->icsk_ack.pending, 4455 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); 4456 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 4457 } 4458 4459 /* This routine sends an ack and also updates the window. */ 4460 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags) 4461 { 4462 struct sk_buff *buff; 4463 4464 /* If we have been reset, we may not send again. */ 4465 if (sk->sk_state == TCP_CLOSE) 4466 return; 4467 4468 /* We are not putting this on the write queue, so 4469 * tcp_transmit_skb() will set the ownership to this 4470 * sock. 4471 */ 4472 buff = alloc_skb(MAX_TCP_HEADER, 4473 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 4474 if (unlikely(!buff)) { 4475 struct inet_connection_sock *icsk = inet_csk(sk); 4476 unsigned long delay; 4477 4478 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; 4479 if (delay < tcp_rto_max(sk)) 4480 icsk->icsk_ack.retry++; 4481 inet_csk_schedule_ack(sk); 4482 icsk->icsk_ack.ato = TCP_ATO_MIN; 4483 tcp_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, false); 4484 return; 4485 } 4486 4487 /* Reserve space for headers and prepare control bits. */ 4488 skb_reserve(buff, MAX_TCP_HEADER); 4489 tcp_init_nondata_skb(buff, sk, 4490 tcp_acceptable_seq(sk), TCPHDR_ACK | flags); 4491 4492 /* We do not want pure acks influencing TCP Small Queues or fq/pacing 4493 * too much. 4494 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 4495 */ 4496 skb_set_tcp_pure_ack(buff); 4497 4498 /* Send it off, this clears delayed acks for us. */ 4499 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); 4500 } 4501 EXPORT_SYMBOL_GPL(__tcp_send_ack); 4502 4503 void tcp_send_ack(struct sock *sk) 4504 { 4505 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt, 0); 4506 } 4507 4508 /* This routine sends a packet with an out of date sequence 4509 * number. It assumes the other end will try to ack it. 4510 * 4511 * Question: what should we make while urgent mode? 4512 * 4.4BSD forces sending single byte of data. We cannot send 4513 * out of window data, because we have SND.NXT==SND.MAX... 4514 * 4515 * Current solution: to send TWO zero-length segments in urgent mode: 4516 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 4517 * out-of-date with SND.UNA-1 to probe window. 4518 */ 4519 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) 4520 { 4521 struct tcp_sock *tp = tcp_sk(sk); 4522 struct sk_buff *skb; 4523 4524 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 4525 skb = alloc_skb(MAX_TCP_HEADER, 4526 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 4527 if (!skb) 4528 return -1; 4529 4530 /* Reserve space for headers and set control bits. */ 4531 skb_reserve(skb, MAX_TCP_HEADER); 4532 /* Use a previous sequence. This should cause the other 4533 * end to send an ack. Don't queue or clone SKB, just 4534 * send it. 4535 */ 4536 tcp_init_nondata_skb(skb, sk, tp->snd_una - !urgent, TCPHDR_ACK); 4537 NET_INC_STATS(sock_net(sk), mib); 4538 return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); 4539 } 4540 4541 /* Called from setsockopt( ... TCP_REPAIR ) */ 4542 void tcp_send_window_probe(struct sock *sk) 4543 { 4544 if (sk->sk_state == TCP_ESTABLISHED) { 4545 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 4546 tcp_mstamp_refresh(tcp_sk(sk)); 4547 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); 4548 } 4549 } 4550 4551 /* Initiate keepalive or window probe from timer. */ 4552 int tcp_write_wakeup(struct sock *sk, int mib) 4553 { 4554 struct tcp_sock *tp = tcp_sk(sk); 4555 struct sk_buff *skb; 4556 4557 if (sk->sk_state == TCP_CLOSE) 4558 return -1; 4559 4560 skb = tcp_send_head(sk); 4561 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 4562 int err; 4563 unsigned int mss = tcp_current_mss(sk); 4564 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 4565 4566 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 4567 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 4568 4569 /* We are probing the opening of a window 4570 * but the window size is != 0 4571 * must have been a result SWS avoidance ( sender ) 4572 */ 4573 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 4574 skb->len > mss) { 4575 seg_size = min(seg_size, mss); 4576 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 4577 if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, 4578 skb, seg_size, mss, GFP_ATOMIC)) 4579 return -1; 4580 } else if (!tcp_skb_pcount(skb)) 4581 tcp_set_skb_tso_segs(skb, mss); 4582 4583 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 4584 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 4585 if (!err) 4586 tcp_event_new_data_sent(sk, skb); 4587 return err; 4588 } else { 4589 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 4590 tcp_xmit_probe_skb(sk, 1, mib); 4591 return tcp_xmit_probe_skb(sk, 0, mib); 4592 } 4593 } 4594 4595 /* A window probe timeout has occurred. If window is not closed send 4596 * a partial packet else a zero probe. 4597 */ 4598 void tcp_send_probe0(struct sock *sk) 4599 { 4600 struct inet_connection_sock *icsk = inet_csk(sk); 4601 struct tcp_sock *tp = tcp_sk(sk); 4602 struct net *net = sock_net(sk); 4603 unsigned long timeout; 4604 int err; 4605 4606 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); 4607 4608 if (tp->packets_out || tcp_write_queue_empty(sk)) { 4609 /* Cancel probe timer, if it is not required. */ 4610 WRITE_ONCE(icsk->icsk_probes_out, 0); 4611 icsk->icsk_backoff = 0; 4612 icsk->icsk_probes_tstamp = 0; 4613 return; 4614 } 4615 4616 WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1); 4617 if (err <= 0) { 4618 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) 4619 icsk->icsk_backoff++; 4620 timeout = tcp_probe0_when(sk, tcp_rto_max(sk)); 4621 } else { 4622 /* If packet was not sent due to local congestion, 4623 * Let senders fight for local resources conservatively. 4624 */ 4625 timeout = TCP_RESOURCE_PROBE_INTERVAL; 4626 } 4627 4628 timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); 4629 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, true); 4630 } 4631 4632 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) 4633 { 4634 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; 4635 struct flowi fl; 4636 int res; 4637 4638 /* Paired with WRITE_ONCE() in sock_setsockopt() */ 4639 if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) 4640 WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); 4641 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_RETRANS, 4642 NULL); 4643 if (!res) { 4644 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 4645 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 4646 if (unlikely(tcp_passive_fastopen(sk))) { 4647 /* sk has const attribute because listeners are lockless. 4648 * However in this case, we are dealing with a passive fastopen 4649 * socket thus we can change total_retrans value. 4650 */ 4651 WRITE_ONCE(tcp_sk_rw(sk)->total_retrans, 4652 tcp_sk_rw(sk)->total_retrans + 1); 4653 } 4654 trace_tcp_retransmit_synack(sk, req); 4655 WRITE_ONCE(req->num_retrans, req->num_retrans + 1); 4656 } 4657 return res; 4658 } 4659