Lines Matching +full:non +full:- +full:urgent
1 // SPDX-License-Identifier: GPL-2.0-only
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
33 * Cacophonix Gaul : draft-minshall-nagle-01
61 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
62 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
73 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
75 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
77 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
78 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
80 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
81 tp->highest_sack = skb; in tcp_event_new_data_sent()
83 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
84 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent()
95 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
103 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || in tcp_acceptable_seq()
104 (tp->rx_opt.wscale_ok && in tcp_acceptable_seq()
105 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq()
106 return tp->snd_nxt; in tcp_acceptable_seq()
112 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
115 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
129 int mss = tp->advmss; in tcp_advertise_mss()
136 tp->advmss = mss; in tcp_advertise_mss()
154 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
157 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
160 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_restart()
161 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
174 tp->lsndtime = now; in tcp_event_data_sent()
179 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
188 if (unlikely(tp->compressed_ack)) { in tcp_event_ack_sent()
190 tp->compressed_ack); in tcp_event_ack_sent()
191 tp->compressed_ack = 0; in tcp_event_ack_sent()
192 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_event_ack_sent()
196 if (unlikely(rcv_nxt != tp->rcv_nxt)) in tcp_event_ack_sent()
229 * we will truncate our initial window offering to 32K-1 in tcp_select_initial_window()
234 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_initial_window()
245 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_select_initial_window()
248 *rcv_wscale = clamp_t(int, ilog2(space) - 15, in tcp_select_initial_window()
259 * value can be stuffed directly into th->window for an outgoing
266 u32 old_win = tp->rcv_wnd; in tcp_select_window()
272 if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) { in tcp_select_window()
273 tp->pred_flags = 0; in tcp_select_window()
274 tp->rcv_wnd = 0; in tcp_select_window()
275 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
285 * window in time. --DaveM in tcp_select_window()
289 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { in tcp_select_window()
293 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
297 tp->rcv_wnd = new_win; in tcp_select_window()
298 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
303 if (!tp->rx_opt.rcv_wscale && in tcp_select_window()
304 READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_window()
307 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
310 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
314 tp->pred_flags = 0; in tcp_select_window()
339 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ACCECN; in tcp_ecn_send()
341 /* Not-retransmitted data segment: set ECT and inject CWR. */ in tcp_ecn_send()
342 if (skb->len != tcp_header_len && in tcp_ecn_send()
343 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
345 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
346 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
347 th->cwr = 1; in tcp_ecn_send()
348 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
354 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
355 th->ece = 1; in tcp_ecn_send()
359 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
365 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
367 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
372 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
375 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
380 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
428 if (unlikely(OPTION_MPTCP & opts->options)) in mptcp_options_write()
429 mptcp_write_options(th, ptr, tp, &opts->mptcp); in mptcp_options_write()
475 * Thus, "req" is passed here and the cgroup-bpf-progs in bpf_skops_hdr_opt_len()
480 * consistent between fastopen and non-fastopen on in bpf_skops_hdr_opt_len()
504 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; in bpf_skops_hdr_opt_len()
506 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; in bpf_skops_hdr_opt_len()
508 *remaining -= opts->bpf_opt_len; in bpf_skops_hdr_opt_len()
517 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; in bpf_skops_write_hdr_opt()
541 first_opt_off = tcp_hdrlen(skb) - max_opt_len; in bpf_skops_write_hdr_opt()
549 nr_written = max_opt_len - sock_ops.remaining_opt_len; in bpf_skops_write_hdr_opt()
552 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, in bpf_skops_write_hdr_opt()
553 max_opt_len - nr_written); in bpf_skops_write_hdr_opt()
580 u8 maclen = tcp_ao_maclen(key->ao_key); in process_tcp_ao_options()
586 (tcprsk->ao_keyid << 8) | in process_tcp_ao_options()
587 (tcprsk->ao_rcv_next)); in process_tcp_ao_options()
592 ao_info = rcu_dereference_check(tp->ao_info, in process_tcp_ao_options()
593 lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); in process_tcp_ao_options()
594 rnext_key = READ_ONCE(ao_info->rnext_key); in process_tcp_ao_options()
598 (tcp_ao_len(key->ao_key) << 16) | in process_tcp_ao_options()
599 (key->ao_key->sndid << 8) | in process_tcp_ao_options()
600 (rnext_key->rcvid)); in process_tcp_ao_options()
602 opts->hash_location = (__u8 *)ptr; in process_tcp_ao_options()
621 * Luckily we can at least blame others for their non-compliance but from
622 * inter-operability perspective it seems that we're somewhat stuck with
638 u16 options = opts->options; /* mungable copy */ in tcp_options_write()
644 opts->hash_location = (__u8 *)ptr; in tcp_options_write()
649 if (unlikely(opts->mss)) { in tcp_options_write()
652 opts->mss); in tcp_options_write()
668 *ptr++ = htonl(opts->tsval); in tcp_options_write()
669 *ptr++ = htonl(opts->tsecr); in tcp_options_write()
673 const u32 *ecn_bytes = opts->use_synack_ecn_bytes ? in tcp_options_write()
675 tp->received_ecn_bytes; in tcp_options_write()
676 const u8 ect0_idx = INET_ECN_ECT_0 - 1; in tcp_options_write()
677 const u8 ect1_idx = INET_ECN_ECT_1 - 1; in tcp_options_write()
678 const u8 ce_idx = INET_ECN_CE - 1; in tcp_options_write()
688 opts->num_accecn_fields * TCPOLEN_ACCECN_PERFIELD; in tcp_options_write()
690 if (opts->num_accecn_fields == 2) { in tcp_options_write()
695 } else if (opts->num_accecn_fields == 1) { in tcp_options_write()
700 } else if (opts->num_accecn_fields == 0) { in tcp_options_write()
703 } else if (opts->num_accecn_fields == 3) { in tcp_options_write()
712 tp->accecn_minlen = 0; in tcp_options_write()
713 tp->accecn_opt_tstamp = tp->tcp_mstamp; in tcp_options_write()
714 if (tp->accecn_opt_demand) in tcp_options_write()
715 tp->accecn_opt_demand--; in tcp_options_write()
731 /* Do not split the leftover 2-byte to fit into a single in tcp_options_write()
743 opts->ws); in tcp_options_write()
746 if (unlikely(opts->num_sack_blocks)) { in tcp_options_write()
747 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
748 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
754 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * in tcp_options_write()
759 for (this_sack = 0; this_sack < opts->num_sack_blocks; in tcp_options_write()
765 tp->rx_opt.dsack = 0; in tcp_options_write()
777 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; in tcp_options_write()
781 if (foc->exp) { in tcp_options_write()
782 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; in tcp_options_write()
787 len = TCPOLEN_FASTOPEN_BASE + foc->len; in tcp_options_write()
792 memcpy(p, foc->val, foc->len); in tcp_options_write()
794 p[foc->len] = TCPOPT_NOP; in tcp_options_write()
795 p[foc->len + 1] = TCPOPT_NOP; in tcp_options_write()
811 if (tp->syn_smc) { in smc_set_option()
813 opts->options |= OPTION_SMC; in smc_set_option()
814 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option()
828 if (tp->syn_smc && ireq->smc_ok) { in smc_set_option_cond()
830 opts->options |= OPTION_SMC; in smc_set_option_cond()
831 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option_cond()
845 if (mptcp_synack_options(req, &size, &opts->mptcp)) { in mptcp_set_option_cond()
847 opts->options |= OPTION_MPTCP; in mptcp_set_option_cond()
848 *remaining -= size; in mptcp_set_option_cond()
857 if ((opts->options & (OPTION_SACK_ADVERTISE | OPTION_TS)) == in tcp_synack_options_combine_saving()
860 else if (opts->options & OPTION_WSCALE) in tcp_synack_options_combine_saving()
884 if (opts->use_synack_ecn_bytes) in tcp_options_fit_accecn()
887 max_combine_saving = opts->num_sack_blocks > 0 ? 2 : 0; in tcp_options_fit_accecn()
888 opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS; in tcp_options_fit_accecn()
889 while (opts->num_accecn_fields >= required) { in tcp_options_fit_accecn()
899 } else if (opts->num_accecn_fields == required && in tcp_options_fit_accecn()
900 opts->num_sack_blocks > 2 && in tcp_options_fit_accecn()
903 opts->num_sack_blocks--; in tcp_options_fit_accecn()
907 opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS; in tcp_options_fit_accecn()
912 opts->num_accecn_fields--; in tcp_options_fit_accecn()
913 size -= TCPOLEN_ACCECN_PERFIELD; in tcp_options_fit_accecn()
916 if (opts->num_accecn_fields >= required) in tcp_options_fit_accecn()
917 size -= sack_blocks_reduce * TCPOLEN_SACK_PERBLOCK; in tcp_options_fit_accecn()
919 opts->num_sack_blocks += sack_blocks_reduce; in tcp_options_fit_accecn()
921 if (opts->num_accecn_fields < required) in tcp_options_fit_accecn()
924 opts->options |= OPTION_ACCECN; in tcp_options_fit_accecn()
937 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
943 opts->options |= OPTION_MD5; in tcp_syn_options()
944 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_syn_options()
946 timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps); in tcp_syn_options()
948 opts->options |= OPTION_AO; in tcp_syn_options()
949 remaining -= tcp_ao_len_aligned(key->ao_key); in tcp_syn_options()
955 * advertised. But we subtract them from tp->mss_cache so that in tcp_syn_options()
962 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
963 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_syn_options()
966 opts->options |= OPTION_TS; in tcp_syn_options()
967 opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset; in tcp_syn_options()
968 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
969 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_syn_options()
971 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { in tcp_syn_options()
972 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
973 opts->options |= OPTION_WSCALE; in tcp_syn_options()
974 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_syn_options()
976 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { in tcp_syn_options()
977 opts->options |= OPTION_SACK_ADVERTISE; in tcp_syn_options()
978 if (unlikely(!(OPTION_TS & opts->options))) in tcp_syn_options()
979 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_syn_options()
982 if (fastopen && fastopen->cookie.len >= 0) { in tcp_syn_options()
983 u32 need = fastopen->cookie.len; in tcp_syn_options()
985 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_syn_options()
989 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_syn_options()
990 opts->fastopen_cookie = &fastopen->cookie; in tcp_syn_options()
991 remaining -= need; in tcp_syn_options()
992 tp->syn_fastopen = 1; in tcp_syn_options()
993 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
1002 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { in tcp_syn_options()
1004 opts->options |= OPTION_MPTCP; in tcp_syn_options()
1005 remaining -= size; in tcp_syn_options()
1015 if (unlikely((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) && in tcp_syn_options()
1017 inet_csk(sk)->icsk_retransmits < 2 && in tcp_syn_options()
1018 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) && in tcp_syn_options()
1020 opts->use_synack_ecn_bytes = 1; in tcp_syn_options()
1021 remaining -= tcp_options_fit_accecn(opts, 0, remaining); in tcp_syn_options()
1026 return MAX_TCP_OPTION_SPACE - remaining; in tcp_syn_options()
1029 /* Set up TCP options for SYN-ACKs. */
1044 opts->options |= OPTION_MD5; in tcp_synack_options()
1045 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_synack_options()
1053 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
1055 opts->options |= OPTION_AO; in tcp_synack_options()
1056 remaining -= tcp_ao_len_aligned(key->ao_key); in tcp_synack_options()
1057 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
1061 opts->mss = mss; in tcp_synack_options()
1062 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_synack_options()
1064 if (likely(ireq->wscale_ok)) { in tcp_synack_options()
1065 opts->ws = ireq->rcv_wscale; in tcp_synack_options()
1066 opts->options |= OPTION_WSCALE; in tcp_synack_options()
1067 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_synack_options()
1069 if (likely(ireq->tstamp_ok)) { in tcp_synack_options()
1070 opts->options |= OPTION_TS; in tcp_synack_options()
1071 opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) + in tcp_synack_options()
1072 tcp_rsk(req)->ts_off; in tcp_synack_options()
1073 if (!tcp_rsk(req)->snt_tsval_first) { in tcp_synack_options()
1074 if (!opts->tsval) in tcp_synack_options()
1075 opts->tsval = ~0U; in tcp_synack_options()
1076 tcp_rsk(req)->snt_tsval_first = opts->tsval; in tcp_synack_options()
1078 WRITE_ONCE(tcp_rsk(req)->snt_tsval_last, opts->tsval); in tcp_synack_options()
1079 opts->tsecr = req->ts_recent; in tcp_synack_options()
1080 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_synack_options()
1082 if (likely(ireq->sack_ok)) { in tcp_synack_options()
1083 opts->options |= OPTION_SACK_ADVERTISE; in tcp_synack_options()
1084 if (unlikely(!ireq->tstamp_ok)) in tcp_synack_options()
1085 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_synack_options()
1087 if (foc != NULL && foc->len >= 0) { in tcp_synack_options()
1088 u32 need = foc->len; in tcp_synack_options()
1090 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_synack_options()
1094 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_synack_options()
1095 opts->fastopen_cookie = foc; in tcp_synack_options()
1096 remaining -= need; in tcp_synack_options()
1104 if (treq->accecn_ok && in tcp_synack_options()
1105 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) && in tcp_synack_options()
1106 req->num_timeout < 1 && remaining >= TCPOLEN_ACCECN_BASE) { in tcp_synack_options()
1107 opts->use_synack_ecn_bytes = 1; in tcp_synack_options()
1108 remaining -= tcp_options_fit_accecn(opts, 0, remaining); in tcp_synack_options()
1114 return MAX_TCP_OPTION_SPACE - remaining; in tcp_synack_options()
1128 opts->options = 0; in tcp_established_options()
1132 opts->options |= OPTION_MD5; in tcp_established_options()
1135 opts->options |= OPTION_AO; in tcp_established_options()
1136 size += tcp_ao_len_aligned(key->ao_key); in tcp_established_options()
1139 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
1140 opts->options |= OPTION_TS; in tcp_established_options()
1141 opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + in tcp_established_options()
1142 tp->tsoffset : 0; in tcp_established_options()
1143 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
1154 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1158 &opts->mptcp)) { in tcp_established_options()
1159 opts->options |= OPTION_MPTCP; in tcp_established_options()
1164 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
1166 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1169 opts->num_sack_blocks = in tcp_established_options()
1171 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / in tcp_established_options()
1175 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; in tcp_established_options()
1177 opts->num_sack_blocks = 0; in tcp_established_options()
1180 opts->num_sack_blocks = 0; in tcp_established_options()
1184 int ecn_opt = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option); in tcp_established_options()
1186 if (ecn_opt && tp->saw_accecn_opt && !tcp_accecn_opt_fail_send(tp) && in tcp_established_options()
1187 (ecn_opt >= TCP_ACCECN_OPTION_FULL || tp->accecn_opt_demand || in tcp_established_options()
1189 opts->use_synack_ecn_bytes = 0; in tcp_established_options()
1190 size += tcp_options_fit_accecn(opts, tp->accecn_minlen, in tcp_established_options()
1191 MAX_TCP_OPTION_SPACE - size); in tcp_established_options()
1197 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1201 size = MAX_TCP_OPTION_SPACE - remaining; in tcp_established_options()
1216 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
1230 if ((1 << sk->sk_state) & in tcp_tsq_write()
1235 if (tp->lost_out > tp->retrans_out && in tcp_tsq_write()
1241 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
1251 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
1258 * transferring tsq->head because tcp_wfree() might
1259 * interrupt us (non NAPI drivers)
1271 list_splice_init(&tsq->head, &list); in tcp_tsq_workfn()
1276 list_del(&tp->tsq_node); in tcp_tsq_workfn()
1280 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tsq_workfn()
1293 * tcp_release_cb - tcp release_sock() callback
1301 unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); in tcp_release_cb()
1309 } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); in tcp_release_cb()
1325 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1340 INIT_LIST_HEAD(&tsq->head); in tcp_tsq_work_init()
1341 INIT_WORK(&tsq->work, tcp_tsq_workfn); in tcp_tsq_work_init()
1352 struct sock *sk = skb->sk; in tcp_wfree()
1361 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
1366 * - less callbacks to tcp_write_xmit(), reducing stress (batches) in tcp_wfree()
1367 * - chance for incoming ACK (processed by another cpu maybe) in tcp_wfree()
1368 * to migrate this flow (skb->ooo_okay will be eventually set) in tcp_wfree()
1370 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
1373 oval = smp_load_acquire(&sk->sk_tsq_flags); in tcp_wfree()
1379 } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); in tcp_wfree()
1384 empty = list_empty(&tsq->head); in tcp_wfree()
1385 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
1387 queue_work(system_bh_wq, &tsq->work); in tcp_wfree()
1413 if (sk->sk_pacing_status != SK_PACING_NONE) { in tcp_update_skb_after_send()
1414 unsigned long rate = READ_ONCE(sk->sk_pacing_rate); in tcp_update_skb_after_send()
1417 * Note that tp->data_segs_out overflows after 2^32 packets, in tcp_update_skb_after_send()
1420 if (rate != ~0UL && rate && tp->data_segs_out >= 10) { in tcp_update_skb_after_send()
1421 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); in tcp_update_skb_after_send()
1422 u64 credit = tp->tcp_wstamp_ns - prior_wstamp; in tcp_update_skb_after_send()
1425 len_ns -= min_t(u64, len_ns / 2, credit); in tcp_update_skb_after_send()
1426 tp->tcp_wstamp_ns += len_ns; in tcp_update_skb_after_send()
1429 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_update_skb_after_send()
1464 prior_wstamp = tp->tcp_wstamp_ns; in __tcp_transmit_skb()
1465 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); in __tcp_transmit_skb()
1466 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); in __tcp_transmit_skb()
1478 return -ENOBUFS; in __tcp_transmit_skb()
1479 /* retransmit skbs might have a non zero value in skb->dev in __tcp_transmit_skb()
1480 * because skb->dev is aliased with skb->rbnode.rb_left in __tcp_transmit_skb()
1482 skb->dev = NULL; in __tcp_transmit_skb()
1490 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { in __tcp_transmit_skb()
1496 * Note that we do not force the PSH flag for non GSO packets, in __tcp_transmit_skb()
1498 * and in this case it is better to delay the delivery of 1-MSS in __tcp_transmit_skb()
1503 tcb->tcp_flags |= TCPHDR_PSH; in __tcp_transmit_skb()
1507 /* We set skb->ooo_okay to one if this packet can select in __tcp_transmit_skb()
1511 * if XPS is enabled, or sk->sk_txhash otherwise. in __tcp_transmit_skb()
1520 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || in __tcp_transmit_skb()
1528 skb->pfmemalloc = 0; in __tcp_transmit_skb()
1534 skb->sk = sk; in __tcp_transmit_skb()
1535 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; in __tcp_transmit_skb()
1536 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1538 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); in __tcp_transmit_skb()
1541 th = (struct tcphdr *)skb->data; in __tcp_transmit_skb()
1542 th->source = inet->inet_sport; in __tcp_transmit_skb()
1543 th->dest = inet->inet_dport; in __tcp_transmit_skb()
1544 th->seq = htonl(tcb->seq); in __tcp_transmit_skb()
1545 th->ack_seq = htonl(rcv_nxt); in __tcp_transmit_skb()
1547 (tcb->tcp_flags & TCPHDR_FLAGS_MASK)); in __tcp_transmit_skb()
1549 th->check = 0; in __tcp_transmit_skb()
1550 th->urg_ptr = 0; in __tcp_transmit_skb()
1553 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in __tcp_transmit_skb()
1554 if (before(tp->snd_up, tcb->seq + 0x10000)) { in __tcp_transmit_skb()
1555 th->urg_ptr = htons(tp->snd_up - tcb->seq); in __tcp_transmit_skb()
1556 th->urg = 1; in __tcp_transmit_skb()
1557 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in __tcp_transmit_skb()
1558 th->urg_ptr = htons(0xFFFF); in __tcp_transmit_skb()
1559 th->urg = 1; in __tcp_transmit_skb()
1563 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1564 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { in __tcp_transmit_skb()
1565 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1571 th->window = htons(min(tp->rcv_wnd, 65535U)); in __tcp_transmit_skb()
1580 tp->af_specific->calc_md5_hash(opts.hash_location, in __tcp_transmit_skb()
1590 return -ENOMEM; in __tcp_transmit_skb()
1597 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, in __tcp_transmit_skb()
1601 if (likely(tcb->tcp_flags & TCPHDR_ACK)) in __tcp_transmit_skb()
1604 if (skb->len != tcp_header_size) { in __tcp_transmit_skb()
1606 tp->data_segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1607 tp->bytes_sent += skb->len - tcp_header_size; in __tcp_transmit_skb()
1610 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1614 tp->segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1616 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ in __tcp_transmit_skb()
1617 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in __tcp_transmit_skb()
1618 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); in __tcp_transmit_skb()
1620 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ in __tcp_transmit_skb()
1623 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in __tcp_transmit_skb()
1628 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, in __tcp_transmit_skb()
1630 sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1647 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1660 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1664 sk_wmem_queued_add(sk, skb->truesize); in tcp_queue_skb()
1665 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1673 if (skb->len <= mss_now) { in tcp_set_skb_tso_segs()
1675 * non-TSO case. in tcp_set_skb_tso_segs()
1677 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_set_skb_tso_segs()
1681 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1682 tso_segs = DIV_ROUND_UP(skb->len, mss_now); in tcp_set_skb_tso_segs()
1694 tp->packets_out -= decr; in tcp_adjust_pcount()
1696 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1697 tp->sacked_out -= decr; in tcp_adjust_pcount()
1698 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1699 tp->retrans_out -= decr; in tcp_adjust_pcount()
1700 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1701 tp->lost_out -= decr; in tcp_adjust_pcount()
1705 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1712 return TCP_SKB_CB(skb)->txstamp_ack || in tcp_has_tx_tstamp()
1713 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); in tcp_has_tx_tstamp()
1721 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1723 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1725 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1726 shinfo2->tx_flags |= tsflags; in tcp_fragment_tstamp()
1727 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1728 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; in tcp_fragment_tstamp()
1729 TCP_SKB_CB(skb)->txstamp_ack = 0; in tcp_fragment_tstamp()
1735 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; in tcp_skb_fragment_eor()
1736 TCP_SKB_CB(skb)->eor = 0; in tcp_skb_fragment_eor()
1746 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1748 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1767 if (WARN_ON(len > skb->len)) in tcp_fragment()
1768 return -EINVAL; in tcp_fragment()
1777 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); in tcp_fragment()
1778 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1783 return -ENOMEM; in tcp_fragment()
1787 return -ENOMEM; in tcp_fragment()
1792 return -ENOMEM; /* We'll just try again later. */ in tcp_fragment()
1796 sk_wmem_queued_add(sk, buff->truesize); in tcp_fragment()
1797 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1798 nlen = skb->len - len; in tcp_fragment()
1799 buff->truesize += nlen; in tcp_fragment()
1800 skb->truesize -= nlen; in tcp_fragment()
1803 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1804 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1805 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1808 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1809 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1810 TCP_SKB_CB(buff)->tcp_flags = flags; in tcp_fragment()
1811 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1816 skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC); in tcp_fragment()
1826 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; in tcp_fragment()
1831 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1832 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1843 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); in tcp_fragment()
1860 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1861 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1865 eat -= size; in __pskb_trim_head()
1867 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
1869 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head()
1870 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
1876 shinfo->nr_frags = k; in __pskb_trim_head()
1878 skb->data_len -= len; in __pskb_trim_head()
1879 skb->len = skb->data_len; in __pskb_trim_head()
1889 return -ENOMEM; in tcp_trim_head()
1893 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1895 skb->truesize -= delta_truesize; in tcp_trim_head()
1896 sk_wmem_queued_add(sk, -delta_truesize); in tcp_trim_head()
1900 /* Any change of skb->len requires recalculation of tso factor. */ in tcp_trim_head()
1915 It is MMS_S - sizeof(tcphdr) of rfc1122 in __tcp_mtu_to_mss()
1917 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1920 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1921 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1924 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1928 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); in __tcp_mtu_to_mss()
1936 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1937 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1948 tp->tcp_header_len + in tcp_mss_to_mtu()
1949 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1950 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1961 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; in tcp_mtup_init()
1962 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1963 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1964 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); in tcp_mtup_init()
1965 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1966 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1967 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtup_init()
1972 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1975 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1979 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1981 tp->mss_cache is current effective sending mss, including
1984 tp->rx_opt.mss_clamp.
1989 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1990 are READ ONLY outside this function. --ANK (980731)
1998 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1999 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
2005 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
2006 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
2007 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
2008 tp->mss_cache = mss_now; in tcp_sync_mss()
2026 mss_now = tp->mss_cache; in tcp_current_mss()
2030 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
2036 /* The mss_cache is sized based on tp->tcp_header_len, which assumes in tcp_current_mss()
2040 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
2041 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
2042 mss_now -= delta; in tcp_current_mss()
2056 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
2057 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
2060 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
2062 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
2065 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
2067 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_application_limited()
2072 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
2076 * is fully utilized. If cwnd-limited then remember that fact for the in tcp_cwnd_validate()
2077 * current window. If not cwnd-limited then track the maximum number of in tcp_cwnd_validate()
2078 * outstanding packets in the current window. (If cwnd-limited then we in tcp_cwnd_validate()
2079 * chose to not update tp->max_packets_out to avoid an extra else in tcp_cwnd_validate()
2082 if (!before(tp->snd_una, tp->cwnd_usage_seq) || in tcp_cwnd_validate()
2084 (!tp->is_cwnd_limited && in tcp_cwnd_validate()
2085 tp->packets_out > tp->max_packets_out)) { in tcp_cwnd_validate()
2086 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
2087 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
2088 tp->cwnd_usage_seq = tp->snd_nxt; in tcp_cwnd_validate()
2093 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
2094 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_validate()
2097 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
2098 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
2100 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && in tcp_cwnd_validate()
2101 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
2102 !ca_ops->cong_control) in tcp_cwnd_validate()
2112 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
2113 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
2114 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
2122 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
2123 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
2127 * Note that a TSO packet might end with a sub-mss segment
2129 * if ((skb->len % mss) != 0)
2130 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
2132 * skb_pcount = skb->len / mss_now
2137 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
2138 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
2153 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
2160 * - For close peers, we rather send bigger packets to reduce
2162 * - For long distance/rtt flows, we would like to get ACK clocking
2166 * in bigger TSO bursts. We we cut the RTT-based allowance in half
2167 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
2176 bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift); in tcp_tso_autosize()
2178 r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); in tcp_tso_autosize()
2179 if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) in tcp_tso_autosize()
2180 bytes += sk->sk_gso_max_size >> r; in tcp_tso_autosize()
2182 bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); in tcp_tso_autosize()
2192 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
2195 min_tso = ca_ops->min_tso_segs ? in tcp_tso_segs()
2196 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
2197 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); in tcp_tso_segs()
2200 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
2213 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
2219 needed = min(skb->len, window); in tcp_mss_split_point()
2230 return needed - partial; in tcp_mss_split_point()
2251 return min(halfcwnd, cwnd - in_flight); in tcp_cwnd_test()
2284 /* Don't use the nagle rule for urgent data (or for the final FIN). */ in tcp_nagle_test()
2285 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
2288 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
2299 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
2301 if (skb->len > cur_mss) in tcp_snd_wnd_test()
2302 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
2311 * know that all the data is in scatter-gather pages, and that the
2317 int nlen = skb->len - len; in tso_fragment()
2322 DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); in tso_fragment()
2326 return -ENOMEM; in tso_fragment()
2330 sk_wmem_queued_add(sk, buff->truesize); in tso_fragment()
2331 sk_mem_charge(sk, buff->truesize); in tso_fragment()
2332 buff->truesize += nlen; in tso_fragment()
2333 skb->truesize -= nlen; in tso_fragment()
2336 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
2337 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
2338 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
2341 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
2342 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
2343 TCP_SKB_CB(buff)->tcp_flags = flags; in tso_fragment()
2379 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
2384 * Note that tp->tcp_wstamp_ns can be in the future if we have in tcp_tso_should_defer()
2387 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; in tcp_tso_should_defer()
2396 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
2399 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
2403 /* If a full-sized TSO skb can be sent, do it. */ in tcp_tso_should_defer()
2404 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
2408 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
2411 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
2413 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); in tcp_tso_should_defer()
2427 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
2436 srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us; in tcp_tso_should_defer()
2438 expected_ack = head->tstamp + srtt_in_ns; in tcp_tso_should_defer()
2440 how_far_is_the_ack = expected_ack - tp->tcp_clock_cache; in tcp_tso_should_defer()
2447 if ((s64)(how_far_is_the_ack - threshold) > 0) in tcp_tso_should_defer()
2452 * 1) We are cwnd-limited in tcp_tso_should_defer()
2453 * 2) We are rwnd-limited in tcp_tso_should_defer()
2457 if (cong_win <= skb->len) { in tcp_tso_should_defer()
2462 if (send_win <= skb->len) { in tcp_tso_should_defer()
2469 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || in tcp_tso_should_defer()
2470 TCP_SKB_CB(skb)->eor) in tcp_tso_should_defer()
2487 interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); in tcp_mtu_check_reprobe()
2488 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
2493 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
2494 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2496 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
2497 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2500 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_check_reprobe()
2510 if (len <= skb->len) in tcp_can_coalesce_send_queue_head()
2516 len -= skb->len; in tcp_can_coalesce_send_queue_head()
2525 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; in tcp_clone_payload()
2529 if (!sk_wmem_schedule(sk, to->truesize + probe_size)) in tcp_clone_payload()
2530 return -ENOMEM; in tcp_clone_payload()
2532 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_clone_payload()
2533 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; in tcp_clone_payload()
2536 return -EINVAL; in tcp_clone_payload()
2538 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { in tcp_clone_payload()
2542 probe_size - len); in tcp_clone_payload()
2552 return -E2BIG; in tcp_clone_payload()
2565 skb_shinfo(to)->nr_frags = nr_frags; in tcp_clone_payload()
2566 to->truesize += probe_size; in tcp_clone_payload()
2567 to->len += probe_size; in tcp_clone_payload()
2568 to->data_len += probe_size; in tcp_clone_payload()
2581 TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags; in tcp_eat_one_skb()
2582 TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor; in tcp_eat_one_skb()
2595 * -1 otherwise
2614 if (likely(!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
2615 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
2616 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2618 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) in tcp_mtu_probe()
2619 return -1; in tcp_mtu_probe()
2622 * and current mss_clamp. if (search_high - search_low) in tcp_mtu_probe()
2626 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2627 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
2628 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
2629 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
2634 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2635 interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { in tcp_mtu_probe()
2640 return -1; in tcp_mtu_probe()
2644 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
2645 return -1; in tcp_mtu_probe()
2647 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
2648 return -1; in tcp_mtu_probe()
2649 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
2655 return -1; in tcp_mtu_probe()
2661 return -1; in tcp_mtu_probe()
2666 return -1; in tcp_mtu_probe()
2672 return -1; in tcp_mtu_probe()
2674 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2675 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2681 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
2682 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
2683 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; in tcp_mtu_probe()
2690 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
2692 if (skb->len <= copy) { in tcp_mtu_probe()
2695 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
2699 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2707 tcp_init_tso_segs(nskb, nskb->len); in tcp_mtu_probe()
2710 * be resegmented into mss-sized pieces by tcp_write_xmit(). in tcp_mtu_probe()
2715 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); in tcp_mtu_probe()
2718 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2719 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2720 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2725 return -1; in tcp_mtu_probe()
2735 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) in tcp_pacing_check()
2738 if (!hrtimer_is_queued(&tp->pacing_timer)) { in tcp_pacing_check()
2739 hrtimer_start(&tp->pacing_timer, in tcp_pacing_check()
2740 ns_to_ktime(tp->tcp_wstamp_ns), in tcp_pacing_check()
2749 const struct rb_node *node = sk->tcp_rtx_queue.rb_node; in tcp_rtx_queue_empty_or_single_skb()
2756 return !node->rb_left && !node->rb_right; in tcp_rtx_queue_empty_or_single_skb()
2763 * - better RTT estimation and ACK scheduling
2764 * - faster recovery
2765 * - high rates
2776 2 * skb->truesize, in tcp_small_queue_check()
2777 READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check()
2779 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); in tcp_small_queue_check()
2783 tcp_sk(sk)->tcp_tx_delay) { in tcp_small_queue_check()
2784 u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) * in tcp_small_queue_check()
2785 tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
2788 * approximate our needs assuming an ~100% skb->truesize overhead. in tcp_small_queue_check()
2792 extra_bytes >>= (20 - 1); in tcp_small_queue_check()
2795 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2804 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2810 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2819 enum tcp_chrono old = tp->chrono_type; in tcp_chrono_set()
2822 tp->chrono_stat[old - 1] += now - tp->chrono_start; in tcp_chrono_set()
2823 tp->chrono_start = now; in tcp_chrono_set()
2824 tp->chrono_type = new; in tcp_chrono_set()
2836 if (type > tp->chrono_type) in tcp_chrono_start()
2854 else if (type == tp->chrono_type) in tcp_chrono_stop()
2863 struct sk_buff *next_skb = skb->next; in tcp_grow_skb()
2872 nlen = min_t(u32, amount, next_skb->len); in tcp_grow_skb()
2876 TCP_SKB_CB(skb)->end_seq += nlen; in tcp_grow_skb()
2877 TCP_SKB_CB(next_skb)->seq += nlen; in tcp_grow_skb()
2879 if (!next_skb->len) { in tcp_grow_skb()
2881 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_grow_skb()
2892 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2934 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2936 tp->tcp_wstamp_ns = tp->tcp_clock_cache; in tcp_write_xmit()
2937 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); in tcp_write_xmit()
2938 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_write_xmit()
2955 missing_bytes = cwnd_quota * mss_now - skb->len; in tcp_write_xmit()
2984 if (skb->len > limit && in tcp_write_xmit()
2993 * We do not want to send a pure-ack packet and have in tcp_write_xmit()
2996 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) in tcp_write_xmit()
3026 tp->prr_out += sent_pkts; in tcp_write_xmit()
3033 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
3046 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_schedule_loss_probe()
3049 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); in tcp_schedule_loss_probe()
3054 !tp->packets_out || !tcp_is_sack(tp) || in tcp_schedule_loss_probe()
3055 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()
3056 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()
3063 if (tp->srtt_us) { in tcp_schedule_loss_probe()
3064 timeout_us = tp->srtt_us >> 2; in tcp_schedule_loss_probe()
3065 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
3076 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
3093 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in skb_still_in_host_queue()
3115 if (tp->tlp_high_seq) in tcp_send_loss_probe()
3118 tp->tlp_retrans = 0; in tcp_send_loss_probe()
3121 pcount = tp->packets_out; in tcp_send_loss_probe()
3123 if (tp->packets_out > pcount) in tcp_send_loss_probe()
3127 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
3129 tcp_warn_once(sk, tp->packets_out, "invalid inflight: "); in tcp_send_loss_probe()
3130 smp_store_release(&inet_csk(sk)->icsk_pending, 0); in tcp_send_loss_probe()
3141 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
3143 (pcount - 1) * mss, mss, in tcp_send_loss_probe()
3155 tp->tlp_retrans = 1; in tcp_send_loss_probe()
3159 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
3163 smp_store_release(&inet_csk(sk)->icsk_pending, 0); in tcp_send_loss_probe()
3179 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
3194 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
3196 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
3208 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
3214 * since header prediction assumes th->window stays fixed.
3216 * Strictly speaking, keeping th->window fixed violates the receiver
3260 * fluctuations. --SAW 1998/11/1 in __tcp_select_window()
3262 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
3270 full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
3279 * a non-zero scaling factor in effect. in __tcp_select_window()
3281 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) in __tcp_select_window()
3287 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3295 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3298 * of the maximum allowed, try to move to zero-window, else in __tcp_select_window()
3308 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
3309 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3314 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
3321 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3323 window = tp->rcv_wnd; in __tcp_select_window()
3332 if (window <= free_space - mss || window > free_space) in __tcp_select_window()
3343 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3346 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3353 free_space < (1 << tp->rx_opt.rcv_wscale)) in __tcp_select_window()
3357 if (free_space > tp->rcv_ssthresh) { in __tcp_select_window()
3358 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3363 * the memory-based limit, and rcv_ssthresh is not a hard limit in __tcp_select_window()
3366 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3380 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_skb_collapse_tstamp()
3381 shinfo->tskey = next_shinfo->tskey; in tcp_skb_collapse_tstamp()
3382 TCP_SKB_CB(skb)->txstamp_ack |= in tcp_skb_collapse_tstamp()
3383 TCP_SKB_CB(next_skb)->txstamp_ack; in tcp_skb_collapse_tstamp()
3394 next_skb_size = next_skb->len; in tcp_collapse_retrans()
3404 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
3407 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
3412 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
3413 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; in tcp_collapse_retrans()
3416 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
3417 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
3437 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
3453 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) in tcp_retrans_try_collapse()
3455 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
3465 space -= skb->len; in tcp_retrans_try_collapse()
3475 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
3484 * state updates are done by the caller. Returns non-zero if an
3496 if (icsk->icsk_mtup.probe_size) in __tcp_retransmit_skb()
3497 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
3500 err = -EBUSY; in __tcp_retransmit_skb()
3505 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
3506 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in __tcp_retransmit_skb()
3507 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; in __tcp_retransmit_skb()
3508 TCP_SKB_CB(skb)->seq++; in __tcp_retransmit_skb()
3511 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { in __tcp_retransmit_skb()
3513 err = -EINVAL; in __tcp_retransmit_skb()
3516 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) { in __tcp_retransmit_skb()
3517 err = -ENOMEM; in __tcp_retransmit_skb()
3522 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { in __tcp_retransmit_skb()
3523 err = -EHOSTUNREACH; /* Routing failure or similar. */ in __tcp_retransmit_skb()
3528 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in __tcp_retransmit_skb()
3536 if (TCP_SKB_CB(skb)->seq != tp->snd_una) { in __tcp_retransmit_skb()
3537 err = -EAGAIN; in __tcp_retransmit_skb()
3549 if (skb->len > len) { in __tcp_retransmit_skb()
3552 err = -ENOMEM; /* We'll try again later. */ in __tcp_retransmit_skb()
3557 err = -ENOMEM; in __tcp_retransmit_skb()
3563 diff -= tcp_skb_pcount(skb); in __tcp_retransmit_skb()
3567 if (skb->len < avail_wnd) in __tcp_retransmit_skb()
3575 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) in __tcp_retransmit_skb()
3581 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
3583 tp->total_retrans += segs; in __tcp_retransmit_skb()
3584 tp->bytes_retrans += skb->len; in __tcp_retransmit_skb()
3586 /* make sure skb->data is aligned on arches that require it in __tcp_retransmit_skb()
3587 * and check if ack-trimming & collapsing extended the headroom in __tcp_retransmit_skb()
3590 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
3597 nskb->dev = NULL; in __tcp_retransmit_skb()
3600 err = -ENOBUFS; in __tcp_retransmit_skb()
3605 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
3614 TCP_SKB_CB(skb)->seq, segs, err); in __tcp_retransmit_skb()
3616 if (unlikely(err) && err != -EBUSY) in __tcp_retransmit_skb()
3622 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
3636 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
3640 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
3641 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3645 if (!tp->retrans_stamp) in tcp_retransmit_skb()
3646 tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb); in tcp_retransmit_skb()
3648 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
3649 tp->undo_retrans = 0; in tcp_retransmit_skb()
3650 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3668 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
3672 skb = tp->retransmit_skb_hint ?: rtx_head; in tcp_xmit_retransmit_queue()
3683 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
3685 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); in tcp_xmit_retransmit_queue()
3688 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
3694 if (tp->retrans_out >= tp->lost_out) { in tcp_xmit_retransmit_queue()
3702 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3720 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
3723 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) in tcp_xmit_retransmit_queue()
3729 inet_csk(sk)->icsk_rto, true); in tcp_xmit_retransmit_queue()
3743 delta = size - sk->sk_forward_alloc; in sk_forced_mem_schedule()
3769 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3772 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; in tcp_send_fin()
3773 TCP_SKB_CB(tskb)->end_seq++; in tcp_send_fin()
3774 tp->write_seq++; in tcp_send_fin()
3778 * We need to set tp->snd_nxt to the value it would have in tcp_send_fin()
3780 * does not change tp->snd_nxt. in tcp_send_fin()
3782 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); in tcp_send_fin()
3792 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); in tcp_send_fin()
3794 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3796 tcp_init_nondata_skb(skb, sk, tp->write_seq, in tcp_send_fin()
3806 * by RFC 2525, section 2.17. -DaveM
3837 /* Send a crossed SYN-ACK during socket establishment.
3848 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
3850 return -EFAULT; in tcp_send_synack()
3852 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
3860 return -ENOMEM; in tcp_send_synack()
3861 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); in tcp_send_synack()
3865 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3866 sk_wmem_queued_add(sk, nskb->truesize); in tcp_send_synack()
3867 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3871 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
3878 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3923 * sk->sk_wmem_alloc in an atomic, we can promote to rw. in tcp_make_synack()
3935 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) in tcp_make_synack()
3942 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ in tcp_make_synack()
3943 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); in tcp_make_synack()
3952 u8 keyid = tcp_rsk(req)->ao_keyid; in tcp_make_synack()
3953 u8 rnext = tcp_rsk(req)->ao_rcv_next; in tcp_make_synack()
3955 ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req), in tcp_make_synack()
3956 keyid, -1); in tcp_make_synack()
3957 /* If there is no matching key - avoid sending anything, in tcp_make_synack()
3959 * for another peer-matching key, but the peer has requested in tcp_make_synack()
3966 … net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n", in tcp_make_synack()
3975 key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk, in tcp_make_synack()
3981 skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4); in tcp_make_synack()
3983 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; in tcp_make_synack()
3991 th = (struct tcphdr *)skb->data; in tcp_make_synack()
3993 th->syn = 1; in tcp_make_synack()
3994 th->ack = 1; in tcp_make_synack()
3996 th->source = htons(ireq->ir_num); in tcp_make_synack()
3997 th->dest = ireq->ir_rmt_port; in tcp_make_synack()
3998 skb->mark = ireq->ir_mark; in tcp_make_synack()
3999 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_make_synack()
4000 th->seq = htonl(tcp_rsk(req)->snt_isn); in tcp_make_synack()
4002 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); in tcp_make_synack()
4005 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); in tcp_make_synack()
4007 th->doff = (tcp_header_size >> 2); in tcp_make_synack()
4010 /* Okay, we have all we need - do the md5 hash if needed */ in tcp_make_synack()
4013 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, in tcp_make_synack()
4018 tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location, in tcp_make_synack()
4020 opts.hash_location - (u8 *)th, 0); in tcp_make_synack()
4048 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { in tcp_ca_dst_init()
4049 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
4050 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
4051 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
4068 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_connect_init()
4069 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) in tcp_connect_init()
4070 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_connect_init()
4075 user_mss = READ_ONCE(tp->rx_opt.user_mss); in tcp_connect_init()
4077 tp->rx_opt.mss_clamp = user_mss; in tcp_connect_init()
4078 tp->max_window = 0; in tcp_connect_init()
4084 if (!tp->window_clamp) in tcp_connect_init()
4085 WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); in tcp_connect_init()
4086 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_connect_init()
4091 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
4092 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
4093 WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); in tcp_connect_init()
4100 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
4101 &tp->rcv_wnd, in tcp_connect_init()
4102 &tp->window_clamp, in tcp_connect_init()
4103 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), in tcp_connect_init()
4107 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
4108 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
4110 WRITE_ONCE(sk->sk_err, 0); in tcp_connect_init()
4112 tp->snd_wnd = 0; in tcp_connect_init()
4115 tp->snd_una = tp->write_seq; in tcp_connect_init()
4116 tp->snd_sml = tp->write_seq; in tcp_connect_init()
4117 tp->snd_up = tp->write_seq; in tcp_connect_init()
4118 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect_init()
4120 if (likely(!tp->repair)) in tcp_connect_init()
4121 tp->rcv_nxt = 0; in tcp_connect_init()
4123 tp->rcv_tstamp = tcp_jiffies32; in tcp_connect_init()
4124 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
4125 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_connect_init()
4127 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
4128 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0); in tcp_connect_init()
4137 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
4139 sk_wmem_queued_add(sk, skb->truesize); in tcp_connect_queue_skb()
4140 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
4141 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_connect_queue_skb()
4142 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
4146 * queue a data-only packet after the regular SYN, such that regular SYNs
4147 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
4156 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
4161 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
4162 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
4165 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and in tcp_send_syn_data()
4166 * user-MSS. Reserve maximum option space for middleboxes that add in tcp_send_syn_data()
4169 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); in tcp_send_syn_data()
4171 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_send_syn_data()
4173 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - in tcp_send_syn_data()
4176 space = min_t(size_t, space, fo->size); in tcp_send_syn_data()
4180 pfrag, sk->sk_allocation)) in tcp_send_syn_data()
4182 syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); in tcp_send_syn_data()
4185 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); in tcp_send_syn_data()
4187 space = min_t(size_t, space, pfrag->size - pfrag->offset); in tcp_send_syn_data()
4191 space = copy_page_from_iter(pfrag->page, pfrag->offset, in tcp_send_syn_data()
4192 space, &fo->data->msg_iter); in tcp_send_syn_data()
4198 skb_fill_page_desc(syn_data, 0, pfrag->page, in tcp_send_syn_data()
4199 pfrag->offset, space); in tcp_send_syn_data()
4200 page_ref_inc(pfrag->page); in tcp_send_syn_data()
4201 pfrag->offset += space; in tcp_send_syn_data()
4203 skb_zcopy_set(syn_data, fo->uarg, NULL); in tcp_send_syn_data()
4206 if (space == fo->size) in tcp_send_syn_data()
4207 fo->data = NULL; in tcp_send_syn_data()
4208 fo->copied = space; in tcp_send_syn_data()
4211 if (syn_data->len) in tcp_send_syn_data()
4214 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
4216 skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC); in tcp_send_syn_data()
4223 TCP_SKB_CB(syn_data)->seq++; in tcp_send_syn_data()
4224 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; in tcp_send_syn_data()
4226 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
4227 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
4233 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
4234 tp->packets_out -= tcp_skb_pcount(syn_data); in tcp_send_syn_data()
4238 if (fo->cookie.len > 0) in tcp_send_syn_data()
4239 fo->cookie.len = 0; in tcp_send_syn_data()
4240 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
4242 tp->syn_fastopen = 0; in tcp_send_syn_data()
4244 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ in tcp_send_syn_data()
4259 * Return error if the peer has both a md5 and a tcp-ao key in tcp_connect()
4262 if (unlikely(rcu_dereference_protected(tp->md5sig_info, in tcp_connect()
4264 bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); in tcp_connect()
4265 bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); in tcp_connect()
4268 ao_info = rcu_dereference_check(tp->ao_info, in tcp_connect()
4275 needs_ao |= ao_info->ao_required; in tcp_connect()
4276 WARN_ON_ONCE(ao_info->ao_required && needs_md5); in tcp_connect()
4279 return -EKEYREJECTED; in tcp_connect()
4281 /* If we have a matching md5 key and no matching tcp-ao key in tcp_connect()
4288 kfree(rcu_replace_pointer(tp->md5sig_info, NULL, in tcp_connect()
4294 if (unlikely(rcu_dereference_protected(tp->ao_info, in tcp_connect()
4299 if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) in tcp_connect()
4300 return -EKEYREJECTED; in tcp_connect()
4304 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
4305 return -EHOSTUNREACH; /* Routing failure or similar. */ in tcp_connect()
4309 if (unlikely(tp->repair)) { in tcp_connect()
4314 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); in tcp_connect()
4316 return -ENOBUFS; in tcp_connect()
4321 tcp_init_nondata_skb(buff, sk, tp->write_seq, TCPHDR_SYN); in tcp_connect()
4323 tp->retrans_stamp = tcp_time_stamp_ts(tp); in tcp_connect()
4326 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
4329 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
4330 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
4331 if (err == -ECONNREFUSED) in tcp_connect()
4334 /* We change tp->snd_nxt after the tcp_transmit_skb() call in tcp_connect()
4337 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect()
4338 tp->pushed_seq = tp->write_seq; in tcp_connect()
4341 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); in tcp_connect()
4342 tp->pushed_seq = TCP_SKB_CB(buff)->seq; in tcp_connect()
4348 inet_csk(sk)->icsk_rto, false); in tcp_connect()
4355 u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1; in tcp_delack_max()
4357 return min(READ_ONCE(inet_csk(sk)->icsk_delack_max), delack_from_rto_min); in tcp_delack_max()
4367 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
4375 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
4381 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements in tcp_send_delayed_ack()
4384 if (tp->srtt_us) { in tcp_send_delayed_ack()
4385 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
4401 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
4411 smp_store_release(&icsk->icsk_ack.pending, in tcp_send_delayed_ack()
4412 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); in tcp_send_delayed_ack()
4413 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
4422 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
4435 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; in __tcp_send_ack()
4437 icsk->icsk_ack.retry++; in __tcp_send_ack()
4439 icsk->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
4462 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt, 0); in tcp_send_ack()
4468 * Question: what should we make while urgent mode?
4472 * Current solution: to send TWO zero-length segments in urgent mode:
4473 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
4474 * out-of-date with SND.UNA-1 to probe window.
4476 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) in tcp_xmit_probe_skb() argument
4485 return -1; in tcp_xmit_probe_skb()
4493 tcp_init_nondata_skb(skb, sk, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
4501 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
4502 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
4514 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
4515 return -1; in tcp_write_wakeup()
4518 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
4521 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
4523 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
4524 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
4530 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
4531 skb->len > mss) { in tcp_write_wakeup()
4533 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4536 return -1; in tcp_write_wakeup()
4540 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4546 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
4565 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
4567 WRITE_ONCE(icsk->icsk_probes_out, 0); in tcp_send_probe0()
4568 icsk->icsk_backoff = 0; in tcp_send_probe0()
4569 icsk->icsk_probes_tstamp = 0; in tcp_send_probe0()
4573 WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1); in tcp_send_probe0()
4575 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) in tcp_send_probe0()
4576 icsk->icsk_backoff++; in tcp_send_probe0()
4591 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; in tcp_rtx_synack()
4596 if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) in tcp_rtx_synack()
4597 WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); in tcp_rtx_synack()
4598 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, in tcp_rtx_synack()
4608 tcp_sk_rw(sk)->total_retrans++; in tcp_rtx_synack()
4611 WRITE_ONCE(req->num_retrans, req->num_retrans + 1); in tcp_rtx_synack()