Lines Matching +full:xps +full:- +full:timer +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0-only
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
33 * Cacophonix Gaul : draft-minshall-nagle-01
59 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
60 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
71 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
73 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
75 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
76 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
78 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
79 tp->highest_sack = skb; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent()
93 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
101 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || in tcp_acceptable_seq()
102 (tp->rx_opt.wscale_ok && in tcp_acceptable_seq()
103 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq()
104 return tp->snd_nxt; in tcp_acceptable_seq()
110 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
112 * 1. It is independent of path mtu.
113 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
127 int mss = tp->advmss; in tcp_advertise_mss()
134 tp->advmss = mss; in tcp_advertise_mss()
152 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
155 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
156 cwnd >>= 1; in tcp_cwnd_restart()
158 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_restart()
159 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
172 tp->lsndtime = now; in tcp_event_data_sent()
177 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
186 if (unlikely(tp->compressed_ack)) { in tcp_event_ack_sent()
188 tp->compressed_ack); in tcp_event_ack_sent()
189 tp->compressed_ack = 0; in tcp_event_ack_sent()
190 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_event_ack_sent()
194 if (unlikely(rcv_nxt != tp->rcv_nxt)) in tcp_event_ack_sent()
204 * be a multiple of mss if possible. We assume here that mss >= 1.
227 * we will truncate our initial window offering to 32K-1 in tcp_select_initial_window()
232 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_initial_window()
243 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_select_initial_window()
246 *rcv_wscale = clamp_t(int, ilog2(space) - 15, in tcp_select_initial_window()
257 * value can be stuffed directly into th->window for an outgoing
264 u32 old_win = tp->rcv_wnd; in tcp_select_window()
270 if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) { in tcp_select_window()
271 tp->pred_flags = 0; in tcp_select_window()
272 tp->rcv_wnd = 0; in tcp_select_window()
273 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
283 * window in time. --DaveM in tcp_select_window()
287 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { in tcp_select_window()
291 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
295 tp->rcv_wnd = new_win; in tcp_select_window()
296 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
301 if (!tp->rx_opt.rcv_wscale && in tcp_select_window()
302 READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_window()
305 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
308 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
312 tp->pred_flags = 0; in tcp_select_window()
322 /* Packet ECN state for a SYN-ACK */
327 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
328 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
329 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
340 bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 || in tcp_ecn_send_syn()
350 tp->ecn_flags = 0; in tcp_ecn_send_syn()
353 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
354 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
362 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) in tcp_ecn_clear_syn()
363 /* tp->ecn_flags are cleared at a later point in time when in tcp_ecn_clear_syn()
366 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn()
372 if (inet_rsk(req)->ecn_ok) in tcp_ecn_make_synack()
373 th->ece = 1; in tcp_ecn_make_synack()
384 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
385 /* Not-retransmitted data segment: set ECT and inject CWR. */ in tcp_ecn_send()
386 if (skb->len != tcp_header_len && in tcp_ecn_send()
387 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
389 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
390 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
391 th->cwr = 1; in tcp_ecn_send()
392 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
398 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
399 th->ece = 1; in tcp_ecn_send()
403 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
408 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
410 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
412 tcp_skb_pcount_set(skb, 1); in tcp_init_nondata_skb()
414 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
417 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
422 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
426 #define OPTION_TS BIT(1)
467 if (unlikely(OPTION_MPTCP & opts->options)) in mptcp_options_write()
468 mptcp_write_options(th, ptr, tp, &opts->mptcp); in mptcp_options_write()
514 * Thus, "req" is passed here and the cgroup-bpf-progs in bpf_skops_hdr_opt_len()
519 * consistent between fastopen and non-fastopen on in bpf_skops_hdr_opt_len()
527 sock_ops.is_fullsock = 1; in bpf_skops_hdr_opt_len()
542 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; in bpf_skops_hdr_opt_len()
544 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; in bpf_skops_hdr_opt_len()
546 *remaining -= opts->bpf_opt_len; in bpf_skops_hdr_opt_len()
555 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; in bpf_skops_write_hdr_opt()
572 sock_ops.is_fullsock = 1; in bpf_skops_write_hdr_opt()
578 first_opt_off = tcp_hdrlen(skb) - max_opt_len; in bpf_skops_write_hdr_opt()
586 nr_written = max_opt_len - sock_ops.remaining_opt_len; in bpf_skops_write_hdr_opt()
589 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, in bpf_skops_write_hdr_opt()
590 max_opt_len - nr_written); in bpf_skops_write_hdr_opt()
617 u8 maclen = tcp_ao_maclen(key->ao_key); in process_tcp_ao_options()
623 (tcprsk->ao_keyid << 8) | in process_tcp_ao_options()
624 (tcprsk->ao_rcv_next)); in process_tcp_ao_options()
629 ao_info = rcu_dereference_check(tp->ao_info, in process_tcp_ao_options()
630 lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); in process_tcp_ao_options()
631 rnext_key = READ_ONCE(ao_info->rnext_key); in process_tcp_ao_options()
635 (tcp_ao_len(key->ao_key) << 16) | in process_tcp_ao_options()
636 (key->ao_key->sndid << 8) | in process_tcp_ao_options()
637 (rnext_key->rcvid)); in process_tcp_ao_options()
639 opts->hash_location = (__u8 *)ptr; in process_tcp_ao_options()
653 * Luckily we can at least blame others for their non-compliance but from
654 * inter-operability perspective it seems that we're somewhat stuck with
667 __be32 *ptr = (__be32 *)(th + 1); in tcp_options_write()
668 u16 options = opts->options; /* mungable copy */ in tcp_options_write()
674 opts->hash_location = (__u8 *)ptr; in tcp_options_write()
679 if (unlikely(opts->mss)) { in tcp_options_write()
682 opts->mss); in tcp_options_write()
698 *ptr++ = htonl(opts->tsval); in tcp_options_write()
699 *ptr++ = htonl(opts->tsecr); in tcp_options_write()
713 opts->ws); in tcp_options_write()
716 if (unlikely(opts->num_sack_blocks)) { in tcp_options_write()
717 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
718 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
724 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * in tcp_options_write()
727 for (this_sack = 0; this_sack < opts->num_sack_blocks; in tcp_options_write()
733 tp->rx_opt.dsack = 0; in tcp_options_write()
737 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; in tcp_options_write()
741 if (foc->exp) { in tcp_options_write()
742 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; in tcp_options_write()
747 len = TCPOLEN_FASTOPEN_BASE + foc->len; in tcp_options_write()
752 memcpy(p, foc->val, foc->len); in tcp_options_write()
754 p[foc->len] = TCPOPT_NOP; in tcp_options_write()
755 p[foc->len + 1] = TCPOPT_NOP; in tcp_options_write()
771 if (tp->syn_smc) { in smc_set_option()
773 opts->options |= OPTION_SMC; in smc_set_option()
774 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option()
788 if (tp->syn_smc && ireq->smc_ok) { in smc_set_option_cond()
790 opts->options |= OPTION_SMC; in smc_set_option_cond()
791 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option_cond()
805 if (mptcp_synack_options(req, &size, &opts->mptcp)) { in mptcp_set_option_cond()
807 opts->options |= OPTION_MPTCP; in mptcp_set_option_cond()
808 *remaining -= size; in mptcp_set_option_cond()
823 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
829 opts->options |= OPTION_MD5; in tcp_syn_options()
830 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_syn_options()
832 timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps); in tcp_syn_options()
834 opts->options |= OPTION_AO; in tcp_syn_options()
835 remaining -= tcp_ao_len_aligned(key->ao_key); in tcp_syn_options()
841 * advertised. But we subtract them from tp->mss_cache so that in tcp_syn_options()
848 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
849 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_syn_options()
852 opts->options |= OPTION_TS; in tcp_syn_options()
853 opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset; in tcp_syn_options()
854 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
855 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_syn_options()
857 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { in tcp_syn_options()
858 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
859 opts->options |= OPTION_WSCALE; in tcp_syn_options()
860 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_syn_options()
862 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { in tcp_syn_options()
863 opts->options |= OPTION_SACK_ADVERTISE; in tcp_syn_options()
864 if (unlikely(!(OPTION_TS & opts->options))) in tcp_syn_options()
865 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_syn_options()
868 if (fastopen && fastopen->cookie.len >= 0) { in tcp_syn_options()
869 u32 need = fastopen->cookie.len; in tcp_syn_options()
871 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_syn_options()
875 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_syn_options()
876 opts->fastopen_cookie = &fastopen->cookie; in tcp_syn_options()
877 remaining -= need; in tcp_syn_options()
878 tp->syn_fastopen = 1; in tcp_syn_options()
879 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
888 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { in tcp_syn_options()
890 opts->options |= OPTION_MPTCP; in tcp_syn_options()
891 remaining -= size; in tcp_syn_options()
898 return MAX_TCP_OPTION_SPACE - remaining; in tcp_syn_options()
901 /* Set up TCP options for SYN-ACKs. */
915 opts->options |= OPTION_MD5; in tcp_synack_options()
916 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_synack_options()
924 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
926 opts->options |= OPTION_AO; in tcp_synack_options()
927 remaining -= tcp_ao_len_aligned(key->ao_key); in tcp_synack_options()
928 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
932 opts->mss = mss; in tcp_synack_options()
933 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_synack_options()
935 if (likely(ireq->wscale_ok)) { in tcp_synack_options()
936 opts->ws = ireq->rcv_wscale; in tcp_synack_options()
937 opts->options |= OPTION_WSCALE; in tcp_synack_options()
938 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_synack_options()
940 if (likely(ireq->tstamp_ok)) { in tcp_synack_options()
941 opts->options |= OPTION_TS; in tcp_synack_options()
942 opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) + in tcp_synack_options()
943 tcp_rsk(req)->ts_off; in tcp_synack_options()
944 opts->tsecr = READ_ONCE(req->ts_recent); in tcp_synack_options()
945 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_synack_options()
947 if (likely(ireq->sack_ok)) { in tcp_synack_options()
948 opts->options |= OPTION_SACK_ADVERTISE; in tcp_synack_options()
949 if (unlikely(!ireq->tstamp_ok)) in tcp_synack_options()
950 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_synack_options()
952 if (foc != NULL && foc->len >= 0) { in tcp_synack_options()
953 u32 need = foc->len; in tcp_synack_options()
955 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_synack_options()
959 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_synack_options()
960 opts->fastopen_cookie = foc; in tcp_synack_options()
961 remaining -= need; in tcp_synack_options()
972 return MAX_TCP_OPTION_SPACE - remaining; in tcp_synack_options()
986 opts->options = 0; in tcp_established_options()
990 opts->options |= OPTION_MD5; in tcp_established_options()
993 opts->options |= OPTION_AO; in tcp_established_options()
994 size += tcp_ao_len_aligned(key->ao_key); in tcp_established_options()
997 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
998 opts->options |= OPTION_TS; in tcp_established_options()
999 opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + in tcp_established_options()
1000 tp->tsoffset : 0; in tcp_established_options()
1001 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
1012 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1016 &opts->mptcp)) { in tcp_established_options()
1017 opts->options |= OPTION_MPTCP; in tcp_established_options()
1022 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
1024 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1029 opts->num_sack_blocks = in tcp_established_options()
1031 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / in tcp_established_options()
1035 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; in tcp_established_options()
1040 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1044 size = MAX_TCP_OPTION_SPACE - remaining; in tcp_established_options()
1059 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
1073 if ((1 << sk->sk_state) & in tcp_tsq_write()
1078 if (tp->lost_out > tp->retrans_out && in tcp_tsq_write()
1084 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
1094 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
1101 * transferring tsq->head because tcp_wfree() might
1114 list_splice_init(&tsq->head, &list); in tcp_tasklet_func()
1119 list_del(&tp->tsq_node); in tcp_tasklet_func()
1123 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tasklet_func()
1136 * tcp_release_cb - tcp release_sock() callback
1144 unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); in tcp_release_cb()
1152 } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); in tcp_release_cb()
1168 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1183 INIT_LIST_HEAD(&tsq->head); in tcp_tasklet_init()
1184 tasklet_setup(&tsq->tasklet, tcp_tasklet_func); in tcp_tasklet_init()
1195 struct sock *sk = skb->sk; in tcp_wfree()
1204 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
1209 * - less callbacks to tcp_write_xmit(), reducing stress (batches) in tcp_wfree()
1210 * - chance for incoming ACK (processed by another cpu maybe) in tcp_wfree()
1211 * to migrate this flow (skb->ooo_okay will be eventually set) in tcp_wfree()
1213 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
1216 oval = smp_load_acquire(&sk->sk_tsq_flags); in tcp_wfree()
1222 } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); in tcp_wfree()
1227 empty = list_empty(&tsq->head); in tcp_wfree()
1228 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
1230 tasklet_schedule(&tsq->tasklet); in tcp_wfree()
1240 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer) in tcp_pace_kick() argument
1242 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); in tcp_pace_kick()
1256 if (sk->sk_pacing_status != SK_PACING_NONE) { in tcp_update_skb_after_send()
1257 unsigned long rate = READ_ONCE(sk->sk_pacing_rate); in tcp_update_skb_after_send()
1260 * Note that tp->data_segs_out overflows after 2^32 packets, in tcp_update_skb_after_send()
1263 if (rate != ~0UL && rate && tp->data_segs_out >= 10) { in tcp_update_skb_after_send()
1264 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); in tcp_update_skb_after_send()
1265 u64 credit = tp->tcp_wstamp_ns - prior_wstamp; in tcp_update_skb_after_send()
1268 len_ns -= min_t(u64, len_ns / 2, credit); in tcp_update_skb_after_send()
1269 tp->tcp_wstamp_ns += len_ns; in tcp_update_skb_after_send()
1272 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_update_skb_after_send()
1307 prior_wstamp = tp->tcp_wstamp_ns; in __tcp_transmit_skb()
1308 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); in __tcp_transmit_skb()
1309 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); in __tcp_transmit_skb()
1321 return -ENOBUFS; in __tcp_transmit_skb()
1322 /* retransmit skbs might have a non zero value in skb->dev in __tcp_transmit_skb()
1323 * because skb->dev is aliased with skb->rbnode.rb_left in __tcp_transmit_skb()
1325 skb->dev = NULL; in __tcp_transmit_skb()
1333 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { in __tcp_transmit_skb()
1341 * and in this case it is better to delay the delivery of 1-MSS in __tcp_transmit_skb()
1345 if (tcp_skb_pcount(skb) > 1) in __tcp_transmit_skb()
1346 tcb->tcp_flags |= TCPHDR_PSH; in __tcp_transmit_skb()
1350 /* We set skb->ooo_okay to one if this packet can select in __tcp_transmit_skb()
1354 * if XPS is enabled, or sk->sk_txhash otherwise. in __tcp_transmit_skb()
1356 * 1) No packet with payload is in qdisc/device queues. in __tcp_transmit_skb()
1363 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || in __tcp_transmit_skb()
1371 skb->pfmemalloc = 0; in __tcp_transmit_skb()
1377 skb->sk = sk; in __tcp_transmit_skb()
1378 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; in __tcp_transmit_skb()
1379 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1381 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); in __tcp_transmit_skb()
1384 th = (struct tcphdr *)skb->data; in __tcp_transmit_skb()
1385 th->source = inet->inet_sport; in __tcp_transmit_skb()
1386 th->dest = inet->inet_dport; in __tcp_transmit_skb()
1387 th->seq = htonl(tcb->seq); in __tcp_transmit_skb()
1388 th->ack_seq = htonl(rcv_nxt); in __tcp_transmit_skb()
1390 tcb->tcp_flags); in __tcp_transmit_skb()
1392 th->check = 0; in __tcp_transmit_skb()
1393 th->urg_ptr = 0; in __tcp_transmit_skb()
1396 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in __tcp_transmit_skb()
1397 if (before(tp->snd_up, tcb->seq + 0x10000)) { in __tcp_transmit_skb()
1398 th->urg_ptr = htons(tp->snd_up - tcb->seq); in __tcp_transmit_skb()
1399 th->urg = 1; in __tcp_transmit_skb()
1400 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in __tcp_transmit_skb()
1401 th->urg_ptr = htons(0xFFFF); in __tcp_transmit_skb()
1402 th->urg = 1; in __tcp_transmit_skb()
1406 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1407 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { in __tcp_transmit_skb()
1408 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1414 th->window = htons(min(tp->rcv_wnd, 65535U)); in __tcp_transmit_skb()
1423 tp->af_specific->calc_md5_hash(opts.hash_location, in __tcp_transmit_skb()
1433 return -ENOMEM; in __tcp_transmit_skb()
1440 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, in __tcp_transmit_skb()
1444 if (likely(tcb->tcp_flags & TCPHDR_ACK)) in __tcp_transmit_skb()
1447 if (skb->len != tcp_header_size) { in __tcp_transmit_skb()
1449 tp->data_segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1450 tp->bytes_sent += skb->len - tcp_header_size; in __tcp_transmit_skb()
1453 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1457 tp->segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1459 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ in __tcp_transmit_skb()
1460 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in __tcp_transmit_skb()
1461 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); in __tcp_transmit_skb()
1463 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ in __tcp_transmit_skb()
1466 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in __tcp_transmit_skb()
1471 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, in __tcp_transmit_skb()
1473 sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1490 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1495 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1503 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1506 sk_wmem_queued_add(sk, skb->truesize); in tcp_queue_skb()
1507 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1515 if (skb->len <= mss_now) { in tcp_set_skb_tso_segs()
1517 * non-TSO case. in tcp_set_skb_tso_segs()
1519 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_set_skb_tso_segs()
1520 tcp_skb_pcount_set(skb, 1); in tcp_set_skb_tso_segs()
1521 return 1; in tcp_set_skb_tso_segs()
1523 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1524 tso_segs = DIV_ROUND_UP(skb->len, mss_now); in tcp_set_skb_tso_segs()
1536 tp->packets_out -= decr; in tcp_adjust_pcount()
1538 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1539 tp->sacked_out -= decr; in tcp_adjust_pcount()
1540 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1541 tp->retrans_out -= decr; in tcp_adjust_pcount()
1542 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1543 tp->lost_out -= decr; in tcp_adjust_pcount()
1547 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1549 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1550 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1551 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_adjust_pcount()
1552 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1559 return TCP_SKB_CB(skb)->txstamp_ack || in tcp_has_tx_tstamp()
1560 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); in tcp_has_tx_tstamp()
1568 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1570 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1572 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1573 shinfo2->tx_flags |= tsflags; in tcp_fragment_tstamp()
1574 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1575 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; in tcp_fragment_tstamp()
1576 TCP_SKB_CB(skb)->txstamp_ack = 0; in tcp_fragment_tstamp()
1582 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; in tcp_skb_fragment_eor()
1583 TCP_SKB_CB(skb)->eor = 0; in tcp_skb_fragment_eor()
1593 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1595 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1614 if (WARN_ON(len > skb->len)) in tcp_fragment()
1615 return -EINVAL; in tcp_fragment()
1624 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); in tcp_fragment()
1625 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1630 return -ENOMEM; in tcp_fragment()
1634 return -ENOMEM; in tcp_fragment()
1639 return -ENOMEM; /* We'll just try again later. */ in tcp_fragment()
1643 sk_wmem_queued_add(sk, buff->truesize); in tcp_fragment()
1644 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1645 nlen = skb->len - len; in tcp_fragment()
1646 buff->truesize += nlen; in tcp_fragment()
1647 skb->truesize -= nlen; in tcp_fragment()
1650 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1651 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1652 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1655 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1656 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1657 TCP_SKB_CB(buff)->tcp_flags = flags; in tcp_fragment()
1658 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1663 skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC); in tcp_fragment()
1673 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; in tcp_fragment()
1678 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1679 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1690 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); in tcp_fragment()
1707 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1708 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1712 eat -= size; in __pskb_trim_head()
1714 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
1716 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head()
1717 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
1723 shinfo->nr_frags = k; in __pskb_trim_head()
1725 skb->data_len -= len; in __pskb_trim_head()
1726 skb->len = skb->data_len; in __pskb_trim_head()
1736 return -ENOMEM; in tcp_trim_head()
1740 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1742 skb->truesize -= delta_truesize; in tcp_trim_head()
1743 sk_wmem_queued_add(sk, -delta_truesize); in tcp_trim_head()
1747 /* Any change of skb->len requires recalculation of tso factor. */ in tcp_trim_head()
1748 if (tcp_skb_pcount(skb) > 1) in tcp_trim_head()
1762 It is MMS_S - sizeof(tcphdr) of rfc1122 in __tcp_mtu_to_mss()
1764 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1767 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1768 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1771 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1775 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); in __tcp_mtu_to_mss()
1783 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1784 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1795 tp->tcp_header_len + in tcp_mss_to_mtu()
1796 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1797 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1808 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; in tcp_mtup_init()
1809 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1810 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1811 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); in tcp_mtup_init()
1812 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1813 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1814 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtup_init()
1820 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1823 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1827 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1829 tp->mss_cache is current effective sending mss, including
1832 tp->rx_opt.mss_clamp.
1837 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1838 are READ ONLY outside this function. --ANK (980731)
1846 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1847 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
1853 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
1854 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
1855 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1856 tp->mss_cache = mss_now; in tcp_sync_mss()
1874 mss_now = tp->mss_cache; in tcp_current_mss()
1878 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1884 /* The mss_cache is sized based on tp->tcp_header_len, which assumes in tcp_current_mss()
1888 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1889 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1890 mss_now -= delta; in tcp_current_mss()
1904 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1905 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1908 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1910 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1911 tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); in tcp_cwnd_application_limited()
1913 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1915 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_application_limited()
1920 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
1924 * is fully utilized. If cwnd-limited then remember that fact for the in tcp_cwnd_validate()
1925 * current window. If not cwnd-limited then track the maximum number of in tcp_cwnd_validate()
1926 * outstanding packets in the current window. (If cwnd-limited then we in tcp_cwnd_validate()
1927 * chose to not update tp->max_packets_out to avoid an extra else in tcp_cwnd_validate()
1930 if (!before(tp->snd_una, tp->cwnd_usage_seq) || in tcp_cwnd_validate()
1932 (!tp->is_cwnd_limited && in tcp_cwnd_validate()
1933 tp->packets_out > tp->max_packets_out)) { in tcp_cwnd_validate()
1934 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1935 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1936 tp->cwnd_usage_seq = tp->snd_nxt; in tcp_cwnd_validate()
1941 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1942 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_validate()
1945 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1946 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1948 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && in tcp_cwnd_validate()
1949 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1950 !ca_ops->cong_control) in tcp_cwnd_validate()
1955 * 1) just sent some data (see tcp_write_xmit) in tcp_cwnd_validate()
1960 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
1961 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
1962 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
1970 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1971 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1975 * Note that a TSO packet might end with a sub-mss segment
1977 * if ((skb->len % mss) != 0)
1978 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1980 * skb_pcount = skb->len / mss_now
1985 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1986 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1990 * 1. It is full sized. (provided by caller in %partial bool)
2001 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
2008 * - For close peers, we rather send bigger packets to reduce
2010 * - For long distance/rtt flows, we would like to get ACK clocking
2011 * with 1 ACK per ms.
2014 * in bigger TSO bursts. We we cut the RTT-based allowance in half
2015 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
2024 bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift); in tcp_tso_autosize()
2026 r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); in tcp_tso_autosize()
2027 if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) in tcp_tso_autosize()
2028 bytes += sk->sk_gso_max_size >> r; in tcp_tso_autosize()
2030 bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); in tcp_tso_autosize()
2040 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
2043 min_tso = ca_ops->min_tso_segs ? in tcp_tso_segs()
2044 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
2045 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); in tcp_tso_segs()
2048 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
2061 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
2067 needed = min(skb->len, window); in tcp_mss_split_point()
2078 return needed - partial; in tcp_mss_split_point()
2098 halfcwnd = max(cwnd >> 1, 1U); in tcp_cwnd_test()
2099 return min(halfcwnd, cwnd - in_flight); in tcp_cwnd_test()
2110 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) in tcp_init_tso_segs()
2133 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
2136 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
2147 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
2149 if (skb->len > cur_mss) in tcp_snd_wnd_test()
2150 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
2159 * know that all the data is in scatter-gather pages, and that the
2165 int nlen = skb->len - len; in tso_fragment()
2170 DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); in tso_fragment()
2174 return -ENOMEM; in tso_fragment()
2178 sk_wmem_queued_add(sk, buff->truesize); in tso_fragment()
2179 sk_mem_charge(sk, buff->truesize); in tso_fragment()
2180 buff->truesize += nlen; in tso_fragment()
2181 skb->truesize -= nlen; in tso_fragment()
2184 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
2185 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
2186 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
2189 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
2190 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
2191 TCP_SKB_CB(buff)->tcp_flags = flags; in tso_fragment()
2226 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
2230 * only if the last write was recent (1 ms). in tcp_tso_should_defer()
2231 * Note that tp->tcp_wstamp_ns can be in the future if we have in tcp_tso_should_defer()
2234 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; in tcp_tso_should_defer()
2240 BUG_ON(tcp_skb_pcount(skb) <= 1); in tcp_tso_should_defer()
2243 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
2246 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
2250 /* If a full-sized TSO skb can be sent, do it. */ in tcp_tso_should_defer()
2251 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
2255 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
2258 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
2260 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); in tcp_tso_should_defer()
2274 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
2282 delta = tp->tcp_clock_cache - head->tstamp; in tcp_tso_should_defer()
2284 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) in tcp_tso_should_defer()
2289 * 1) We are cwnd-limited in tcp_tso_should_defer()
2290 * 2) We are rwnd-limited in tcp_tso_should_defer()
2294 if (cong_win <= skb->len) { in tcp_tso_should_defer()
2299 if (send_win <= skb->len) { in tcp_tso_should_defer()
2306 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || in tcp_tso_should_defer()
2307 TCP_SKB_CB(skb)->eor) in tcp_tso_should_defer()
2324 interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); in tcp_mtu_check_reprobe()
2325 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
2330 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
2331 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2333 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
2334 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2337 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_check_reprobe()
2347 if (len <= skb->len) in tcp_can_coalesce_send_queue_head()
2353 len -= skb->len; in tcp_can_coalesce_send_queue_head()
2362 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; in tcp_clone_payload()
2366 if (!sk_wmem_schedule(sk, to->truesize + probe_size)) in tcp_clone_payload()
2367 return -ENOMEM; in tcp_clone_payload()
2369 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_clone_payload()
2370 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; in tcp_clone_payload()
2373 return -EINVAL; in tcp_clone_payload()
2375 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { in tcp_clone_payload()
2379 probe_size - len); in tcp_clone_payload()
2389 return -E2BIG; in tcp_clone_payload()
2402 skb_shinfo(to)->nr_frags = nr_frags; in tcp_clone_payload()
2403 to->truesize += probe_size; in tcp_clone_payload()
2404 to->len += probe_size; in tcp_clone_payload()
2405 to->data_len += probe_size; in tcp_clone_payload()
2418 TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags; in tcp_eat_one_skb()
2419 TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor; in tcp_eat_one_skb()
2431 * 1 if a probe was sent,
2432 * -1 otherwise
2451 if (likely(!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
2452 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
2453 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2455 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) in tcp_mtu_probe()
2456 return -1; in tcp_mtu_probe()
2459 * and current mss_clamp. if (search_high - search_low) in tcp_mtu_probe()
2463 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2464 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
2465 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
2466 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
2468 * and then reprobe timer has expired. We stick with current in tcp_mtu_probe()
2471 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2472 interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { in tcp_mtu_probe()
2477 return -1; in tcp_mtu_probe()
2481 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
2482 return -1; in tcp_mtu_probe()
2484 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
2485 return -1; in tcp_mtu_probe()
2486 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
2492 return -1; in tcp_mtu_probe()
2498 return -1; in tcp_mtu_probe()
2503 return -1; in tcp_mtu_probe()
2509 return -1; in tcp_mtu_probe()
2511 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2512 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2518 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
2519 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
2520 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; in tcp_mtu_probe()
2527 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
2529 if (skb->len <= copy) { in tcp_mtu_probe()
2532 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
2536 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2544 tcp_init_tso_segs(nskb, nskb->len); in tcp_mtu_probe()
2547 * be resegmented into mss-sized pieces by tcp_write_xmit(). in tcp_mtu_probe()
2549 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { in tcp_mtu_probe()
2552 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); in tcp_mtu_probe()
2555 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2556 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2557 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2559 return 1; in tcp_mtu_probe()
2562 return -1; in tcp_mtu_probe()
2572 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) in tcp_pacing_check()
2575 if (!hrtimer_is_queued(&tp->pacing_timer)) { in tcp_pacing_check()
2576 hrtimer_start(&tp->pacing_timer, in tcp_pacing_check()
2577 ns_to_ktime(tp->tcp_wstamp_ns), in tcp_pacing_check()
2586 const struct rb_node *node = sk->tcp_rtx_queue.rb_node; in tcp_rtx_queue_empty_or_single_skb()
2593 return !node->rb_left && !node->rb_right; in tcp_rtx_queue_empty_or_single_skb()
2597 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2600 * - better RTT estimation and ACK scheduling
2601 * - faster recovery
2602 * - high rates
2613 2 * skb->truesize, in tcp_small_queue_check()
2614 READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check()
2615 if (sk->sk_pacing_status == SK_PACING_NONE) in tcp_small_queue_check()
2617 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); in tcp_small_queue_check()
2621 tcp_sk(sk)->tcp_tx_delay) { in tcp_small_queue_check()
2622 u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) * in tcp_small_queue_check()
2623 tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
2626 * approximate our needs assuming an ~100% skb->truesize overhead. in tcp_small_queue_check()
2630 extra_bytes >>= (20 - 1); in tcp_small_queue_check()
2633 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2642 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2648 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2657 enum tcp_chrono old = tp->chrono_type; in tcp_chrono_set()
2660 tp->chrono_stat[old - 1] += now - tp->chrono_start; in tcp_chrono_set()
2661 tp->chrono_start = now; in tcp_chrono_set()
2662 tp->chrono_type = new; in tcp_chrono_set()
2674 if (type > tp->chrono_type) in tcp_chrono_start()
2692 else if (type == tp->chrono_type) in tcp_chrono_stop()
2701 struct sk_buff *next_skb = skb->next; in tcp_grow_skb()
2710 nlen = min_t(u32, amount, next_skb->len); in tcp_grow_skb()
2714 TCP_SKB_CB(skb)->end_seq += nlen; in tcp_grow_skb()
2715 TCP_SKB_CB(next_skb)->seq += nlen; in tcp_grow_skb()
2717 if (!next_skb->len) { in tcp_grow_skb()
2719 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_grow_skb()
2730 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2758 sent_pkts = 1; in tcp_write_xmit()
2767 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2768 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ in tcp_write_xmit()
2769 tp->tcp_wstamp_ns = tp->tcp_clock_cache; in tcp_write_xmit()
2770 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); in tcp_write_xmit()
2771 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_write_xmit()
2783 cwnd_quota = 1; in tcp_write_xmit()
2788 missing_bytes = cwnd_quota * mss_now - skb->len; in tcp_write_xmit()
2799 if (tso_segs == 1) { in tcp_write_xmit()
2812 if (tso_segs > 1 && !tcp_urg_mode(tp)) in tcp_write_xmit()
2817 if (skb->len > limit && in tcp_write_xmit()
2826 * We do not want to send a pure-ack packet and have in tcp_write_xmit()
2829 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) in tcp_write_xmit()
2832 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
2859 tp->prr_out += sent_pkts; in tcp_write_xmit()
2866 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2879 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_schedule_loss_probe()
2882 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); in tcp_schedule_loss_probe()
2887 !tp->packets_out || !tcp_is_sack(tp) || in tcp_schedule_loss_probe()
2888 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()
2889 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()
2896 if (tp->srtt_us) { in tcp_schedule_loss_probe()
2897 timeout_us = tp->srtt_us >> 2; in tcp_schedule_loss_probe()
2898 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2909 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
2926 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in skb_still_in_host_queue()
2948 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2951 tp->tlp_retrans = 0; in tcp_send_loss_probe()
2954 pcount = tp->packets_out; in tcp_send_loss_probe()
2956 if (tp->packets_out > pcount) in tcp_send_loss_probe()
2960 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
2962 tcp_warn_once(sk, tp->packets_out, "invalid inflight: "); in tcp_send_loss_probe()
2963 smp_store_release(&inet_csk(sk)->icsk_pending, 0); in tcp_send_loss_probe()
2974 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2976 (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2985 if (__tcp_retransmit_skb(sk, skb, 1)) in tcp_send_loss_probe()
2988 tp->tlp_retrans = 1; in tcp_send_loss_probe()
2992 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
2995 /* Reset s.t. tcp_rearm_rto will restart timer from now */ in tcp_send_loss_probe()
2996 smp_store_release(&inet_csk(sk)->icsk_pending, 0); in tcp_send_loss_probe()
3012 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
3021 * true push pending frames to setup probe timer etc.
3027 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
3029 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
3035 * 1. The window can never be shrunk once it is offered (RFC 793)
3041 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
3047 * since header prediction assumes th->window stays fixed.
3049 * Strictly speaking, keeping th->window fixed violates the receiver
3059 * If the free space is less than the 1/4 of the maximum
3060 * space available and the free space is less than 1/2 mss,
3062 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
3093 * fluctuations. --SAW 1998/11/1 in __tcp_select_window()
3095 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
3103 full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
3112 * a non-zero scaling factor in effect. in __tcp_select_window()
3114 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) in __tcp_select_window()
3119 if (free_space < (full_space >> 1)) { in __tcp_select_window()
3120 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3128 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3130 /* if free space is less than mss estimate, or is below 1/16th in __tcp_select_window()
3131 * of the maximum allowed, try to move to zero-window, else in __tcp_select_window()
3141 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
3142 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3147 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
3152 * 1<<rcv_wscale > mss. in __tcp_select_window()
3154 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3156 window = tp->rcv_wnd; in __tcp_select_window()
3159 * If our current window offering is within 1 mss of the in __tcp_select_window()
3165 if (window <= free_space - mss || window > free_space) in __tcp_select_window()
3168 free_space > window + (full_space >> 1)) in __tcp_select_window()
3176 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3178 if (free_space < (full_space >> 1)) { in __tcp_select_window()
3179 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3186 free_space < (1 << tp->rx_opt.rcv_wscale)) in __tcp_select_window()
3190 if (free_space > tp->rcv_ssthresh) { in __tcp_select_window()
3191 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3196 * the memory-based limit, and rcv_ssthresh is not a hard limit in __tcp_select_window()
3199 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3213 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_skb_collapse_tstamp()
3214 shinfo->tskey = next_shinfo->tskey; in tcp_skb_collapse_tstamp()
3215 TCP_SKB_CB(skb)->txstamp_ack |= in tcp_skb_collapse_tstamp()
3216 TCP_SKB_CB(next_skb)->txstamp_ack; in tcp_skb_collapse_tstamp()
3227 next_skb_size = next_skb->len; in tcp_collapse_retrans()
3229 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); in tcp_collapse_retrans()
3231 if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size)) in tcp_collapse_retrans()
3237 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
3240 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
3245 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
3246 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; in tcp_collapse_retrans()
3250 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
3251 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
3264 if (tcp_skb_pcount(skb) > 1) in tcp_can_collapse()
3271 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
3287 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) in tcp_retrans_try_collapse()
3289 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
3299 space -= skb->len; in tcp_retrans_try_collapse()
3309 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
3318 * state updates are done by the caller. Returns non-zero if an
3330 if (icsk->icsk_mtup.probe_size) in __tcp_retransmit_skb()
3331 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
3334 return -EBUSY; in __tcp_retransmit_skb()
3337 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
3338 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in __tcp_retransmit_skb()
3339 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; in __tcp_retransmit_skb()
3340 TCP_SKB_CB(skb)->seq++; in __tcp_retransmit_skb()
3343 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { in __tcp_retransmit_skb()
3344 WARN_ON_ONCE(1); in __tcp_retransmit_skb()
3345 return -EINVAL; in __tcp_retransmit_skb()
3347 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
3348 return -ENOMEM; in __tcp_retransmit_skb()
3351 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
3352 return -EHOSTUNREACH; /* Routing failure or similar. */ in __tcp_retransmit_skb()
3355 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in __tcp_retransmit_skb()
3363 if (TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
3364 return -EAGAIN; in __tcp_retransmit_skb()
3374 if (skb->len > len) { in __tcp_retransmit_skb()
3377 return -ENOMEM; /* We'll try again later. */ in __tcp_retransmit_skb()
3380 return -ENOMEM; in __tcp_retransmit_skb()
3384 diff -= tcp_skb_pcount(skb); in __tcp_retransmit_skb()
3388 if (skb->len < avail_wnd) in __tcp_retransmit_skb()
3393 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) in __tcp_retransmit_skb()
3399 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
3401 tp->total_retrans += segs; in __tcp_retransmit_skb()
3402 tp->bytes_retrans += skb->len; in __tcp_retransmit_skb()
3404 /* make sure skb->data is aligned on arches that require it in __tcp_retransmit_skb()
3405 * and check if ack-trimming & collapsing extended the headroom in __tcp_retransmit_skb()
3408 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
3415 nskb->dev = NULL; in __tcp_retransmit_skb()
3418 err = -ENOBUFS; in __tcp_retransmit_skb()
3423 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
3427 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
3432 TCP_SKB_CB(skb)->seq, segs, err); in __tcp_retransmit_skb()
3436 } else if (err != -EBUSY) { in __tcp_retransmit_skb()
3443 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
3455 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
3459 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
3460 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3464 if (!tp->retrans_stamp) in tcp_retransmit_skb()
3465 tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb); in tcp_retransmit_skb()
3467 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
3468 tp->undo_retrans = 0; in tcp_retransmit_skb()
3469 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3487 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
3491 skb = tp->retransmit_skb_hint ?: rtx_head; in tcp_xmit_retransmit_queue()
3502 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
3504 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); in tcp_xmit_retransmit_queue()
3507 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
3513 if (tp->retrans_out >= tp->lost_out) { in tcp_xmit_retransmit_queue()
3521 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3530 if (tcp_small_queue_check(sk, skb, 1)) in tcp_xmit_retransmit_queue()
3539 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
3542 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) in tcp_xmit_retransmit_queue()
3548 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
3563 delta = size - sk->sk_forward_alloc; in sk_forced_mem_schedule()
3570 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in sk_forced_mem_schedule()
3571 mem_cgroup_charge_skmem(sk->sk_memcg, amt, in sk_forced_mem_schedule()
3590 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3593 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; in tcp_send_fin()
3594 TCP_SKB_CB(tskb)->end_seq++; in tcp_send_fin()
3595 tp->write_seq++; in tcp_send_fin()
3599 * We need to set tp->snd_nxt to the value it would have in tcp_send_fin()
3601 * does not change tp->snd_nxt. in tcp_send_fin()
3603 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); in tcp_send_fin()
3613 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); in tcp_send_fin()
3615 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3617 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
3627 * by RFC 2525, section 2.17. -DaveM
3658 /* Send a crossed SYN-ACK during socket establishment.
3669 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
3671 return -EFAULT; in tcp_send_synack()
3673 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
3681 return -ENOMEM; in tcp_send_synack()
3682 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); in tcp_send_synack()
3686 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3687 sk_wmem_queued_add(sk, nskb->truesize); in tcp_send_synack()
3688 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3692 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
3695 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
3699 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3744 * sk->sk_wmem_alloc in an atomic, we can promote to rw. in tcp_make_synack()
3756 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) in tcp_make_synack()
3763 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ in tcp_make_synack()
3764 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); in tcp_make_synack()
3773 u8 keyid = tcp_rsk(req)->ao_keyid; in tcp_make_synack()
3774 u8 rnext = tcp_rsk(req)->ao_rcv_next; in tcp_make_synack()
3776 ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req), in tcp_make_synack()
3777 keyid, -1); in tcp_make_synack()
3778 /* If there is no matching key - avoid sending anything, in tcp_make_synack()
3780 * for another peer-matching key, but the peer has requested in tcp_make_synack()
3787 … net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n", in tcp_make_synack()
3796 key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk, in tcp_make_synack()
3802 skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4); in tcp_make_synack()
3804 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; in tcp_make_synack()
3812 th = (struct tcphdr *)skb->data; in tcp_make_synack()
3814 th->syn = 1; in tcp_make_synack()
3815 th->ack = 1; in tcp_make_synack()
3817 th->source = htons(ireq->ir_num); in tcp_make_synack()
3818 th->dest = ireq->ir_rmt_port; in tcp_make_synack()
3819 skb->mark = ireq->ir_mark; in tcp_make_synack()
3820 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_make_synack()
3821 th->seq = htonl(tcp_rsk(req)->snt_isn); in tcp_make_synack()
3823 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); in tcp_make_synack()
3826 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); in tcp_make_synack()
3828 th->doff = (tcp_header_size >> 2); in tcp_make_synack()
3831 /* Okay, we have all we need - do the md5 hash if needed */ in tcp_make_synack()
3834 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, in tcp_make_synack()
3839 tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location, in tcp_make_synack()
3841 opts.hash_location - (u8 *)th, 0); in tcp_make_synack()
3869 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { in tcp_ca_dst_init()
3870 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
3871 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
3872 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
3888 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_connect_init()
3889 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) in tcp_connect_init()
3890 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_connect_init()
3895 if (tp->rx_opt.user_mss) in tcp_connect_init()
3896 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3897 tp->max_window = 0; in tcp_connect_init()
3903 if (!tp->window_clamp) in tcp_connect_init()
3904 WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); in tcp_connect_init()
3905 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_connect_init()
3910 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3911 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3912 WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); in tcp_connect_init()
3919 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3920 &tp->rcv_wnd, in tcp_connect_init()
3921 &tp->window_clamp, in tcp_connect_init()
3922 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), in tcp_connect_init()
3926 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3927 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3929 WRITE_ONCE(sk->sk_err, 0); in tcp_connect_init()
3931 tp->snd_wnd = 0; in tcp_connect_init()
3934 tp->snd_una = tp->write_seq; in tcp_connect_init()
3935 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3936 tp->snd_up = tp->write_seq; in tcp_connect_init()
3937 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect_init()
3939 if (likely(!tp->repair)) in tcp_connect_init()
3940 tp->rcv_nxt = 0; in tcp_connect_init()
3942 tp->rcv_tstamp = tcp_jiffies32; in tcp_connect_init()
3943 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3944 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_connect_init()
3946 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
3947 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3956 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
3958 sk_wmem_queued_add(sk, skb->truesize); in tcp_connect_queue_skb()
3959 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3960 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_connect_queue_skb()
3961 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3965 * queue a data-only packet after the regular SYN, such that regular SYNs
3966 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3975 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3980 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3981 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3984 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and in tcp_send_syn_data()
3985 * user-MSS. Reserve maximum option space for middleboxes that add in tcp_send_syn_data()
3988 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); in tcp_send_syn_data()
3990 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_send_syn_data()
3992 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - in tcp_send_syn_data()
3995 space = min_t(size_t, space, fo->size); in tcp_send_syn_data()
3999 pfrag, sk->sk_allocation)) in tcp_send_syn_data()
4001 syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); in tcp_send_syn_data()
4004 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); in tcp_send_syn_data()
4006 space = min_t(size_t, space, pfrag->size - pfrag->offset); in tcp_send_syn_data()
4010 space = copy_page_from_iter(pfrag->page, pfrag->offset, in tcp_send_syn_data()
4011 space, &fo->data->msg_iter); in tcp_send_syn_data()
4017 skb_fill_page_desc(syn_data, 0, pfrag->page, in tcp_send_syn_data()
4018 pfrag->offset, space); in tcp_send_syn_data()
4019 page_ref_inc(pfrag->page); in tcp_send_syn_data()
4020 pfrag->offset += space; in tcp_send_syn_data()
4022 skb_zcopy_set(syn_data, fo->uarg, NULL); in tcp_send_syn_data()
4025 if (space == fo->size) in tcp_send_syn_data()
4026 fo->data = NULL; in tcp_send_syn_data()
4027 fo->copied = space; in tcp_send_syn_data()
4030 if (syn_data->len) in tcp_send_syn_data()
4033 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
4035 skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC); in tcp_send_syn_data()
4042 TCP_SKB_CB(syn_data)->seq++; in tcp_send_syn_data()
4043 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; in tcp_send_syn_data()
4045 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
4046 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
4052 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
4053 tp->packets_out -= tcp_skb_pcount(syn_data); in tcp_send_syn_data()
4057 if (fo->cookie.len > 0) in tcp_send_syn_data()
4058 fo->cookie.len = 0; in tcp_send_syn_data()
4059 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
4061 tp->syn_fastopen = 0; in tcp_send_syn_data()
4063 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ in tcp_send_syn_data()
4078 * Return error if the peer has both a md5 and a tcp-ao key in tcp_connect()
4081 if (unlikely(rcu_dereference_protected(tp->md5sig_info, in tcp_connect()
4083 bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); in tcp_connect()
4084 bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); in tcp_connect()
4087 ao_info = rcu_dereference_check(tp->ao_info, in tcp_connect()
4094 needs_ao |= ao_info->ao_required; in tcp_connect()
4095 WARN_ON_ONCE(ao_info->ao_required && needs_md5); in tcp_connect()
4098 return -EKEYREJECTED; in tcp_connect()
4100 /* If we have a matching md5 key and no matching tcp-ao key in tcp_connect()
4107 kfree(rcu_replace_pointer(tp->md5sig_info, NULL, in tcp_connect()
4113 if (unlikely(rcu_dereference_protected(tp->ao_info, in tcp_connect()
4118 if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) in tcp_connect()
4119 return -EKEYREJECTED; in tcp_connect()
4123 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
4124 return -EHOSTUNREACH; /* Routing failure or similar. */ in tcp_connect()
4128 if (unlikely(tp->repair)) { in tcp_connect()
4133 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); in tcp_connect()
4135 return -ENOBUFS; in tcp_connect()
4140 tcp_init_nondata_skb(buff, tp->write_seq, TCPHDR_SYN); in tcp_connect()
4142 tp->retrans_stamp = tcp_time_stamp_ts(tp); in tcp_connect()
4145 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
4148 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
4149 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
4150 if (err == -ECONNREFUSED) in tcp_connect()
4153 /* We change tp->snd_nxt after the tcp_transmit_skb() call in tcp_connect()
4156 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect()
4157 tp->pushed_seq = tp->write_seq; in tcp_connect()
4160 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); in tcp_connect()
4161 tp->pushed_seq = TCP_SKB_CB(buff)->seq; in tcp_connect()
4165 /* Timer for repeating the SYN until an answer. */ in tcp_connect()
4167 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
4174 u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1; in tcp_delack_max()
4176 return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min); in tcp_delack_max()
4186 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
4194 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
4200 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements in tcp_send_delayed_ack()
4203 if (tp->srtt_us) { in tcp_send_delayed_ack()
4204 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
4220 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
4221 /* If delack timer is about to expire, send ACK now. */ in tcp_send_delayed_ack()
4222 if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { in tcp_send_delayed_ack()
4227 if (!time_before(timeout, icsk->icsk_ack.timeout)) in tcp_send_delayed_ack()
4228 timeout = icsk->icsk_ack.timeout; in tcp_send_delayed_ack()
4230 smp_store_release(&icsk->icsk_ack.pending, in tcp_send_delayed_ack()
4231 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); in tcp_send_delayed_ack()
4232 icsk->icsk_ack.timeout = timeout; in tcp_send_delayed_ack()
4233 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
4242 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
4255 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; in __tcp_send_ack()
4257 icsk->icsk_ack.retry++; in __tcp_send_ack()
4259 icsk->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
4270 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 in __tcp_send_ack()
4281 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); in tcp_send_ack()
4291 * Current solution: to send TWO zero-length segments in urgent mode:
4293 * out-of-date with SND.UNA-1 to probe window.
4304 return -1; in tcp_xmit_probe_skb()
4312 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
4320 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
4321 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
4327 /* Initiate keepalive or window probe from timer. */
4333 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
4334 return -1; in tcp_write_wakeup()
4337 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
4340 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
4342 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
4343 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
4349 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
4350 skb->len > mss) { in tcp_write_wakeup()
4352 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4355 return -1; in tcp_write_wakeup()
4359 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4360 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
4365 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
4366 tcp_xmit_probe_skb(sk, 1, mib); in tcp_write_wakeup()
4384 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
4385 /* Cancel probe timer, if it is not required. */ in tcp_send_probe0()
4386 icsk->icsk_probes_out = 0; in tcp_send_probe0()
4387 icsk->icsk_backoff = 0; in tcp_send_probe0()
4388 icsk->icsk_probes_tstamp = 0; in tcp_send_probe0()
4392 icsk->icsk_probes_out++; in tcp_send_probe0()
4394 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) in tcp_send_probe0()
4395 icsk->icsk_backoff++; in tcp_send_probe0()
4410 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; in tcp_rtx_synack()
4415 if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) in tcp_rtx_synack()
4416 WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); in tcp_rtx_synack()
4417 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, in tcp_rtx_synack()
4427 tcp_sk_rw(sk)->total_retrans++; in tcp_rtx_synack()