Lines Matching refs:tp
124 void clean_acked_data_enable(struct tcp_sock *tp, in clean_acked_data_enable() argument
127 tp->tcp_clean_acked = cad; in clean_acked_data_enable()
132 void clean_acked_data_disable(struct tcp_sock *tp) in clean_acked_data_disable() argument
135 tp->tcp_clean_acked = NULL; in clean_acked_data_disable()
252 struct tcp_sock *tp = tcp_sk(sk); in tcp_measure_rcv_mss() local
257 if (tp->window_clamp < tp->rcvq_space.space) in tcp_measure_rcv_mss()
258 tp->rcvq_space.space = tp->window_clamp; in tcp_measure_rcv_mss()
346 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_ecn_check() local
348 if (tcp_ecn_disabled(tp)) in tcp_data_ecn_check()
357 if (tp->ecn_flags & TCP_ECN_SEEN) in tcp_data_ecn_check()
364 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR) && in tcp_data_ecn_check()
365 tcp_ecn_mode_rfc3168(tp)) { in tcp_data_ecn_check()
368 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in tcp_data_ecn_check()
374 if (!tcp_ecn_mode_rfc3168(tp)) in tcp_data_ecn_check()
376 tp->ecn_flags |= TCP_ECN_SEEN; in tcp_data_ecn_check()
381 if (!tcp_ecn_mode_rfc3168(tp)) in tcp_data_ecn_check()
383 tp->ecn_flags |= TCP_ECN_SEEN; in tcp_data_ecn_check()
389 static bool tcp_accecn_process_option(struct tcp_sock *tp, in tcp_accecn_process_option() argument
393 u8 estimate_ecnfield = tp->est_ecnfield; in tcp_accecn_process_option()
401 if (tcp_accecn_opt_fail_recv(tp)) in tcp_accecn_process_option()
404 if (!(flag & FLAG_SLOWPATH) || !tp->rx_opt.accecn) { in tcp_accecn_process_option()
405 if (!tp->saw_accecn_opt) { in tcp_accecn_process_option()
409 if (tp->bytes_sent >= (1 << 23) - 1) { in tcp_accecn_process_option()
412 tcp_accecn_saw_opt_fail_recv(tp, saw_opt); in tcp_accecn_process_option()
420 tp->delivered_ecn_bytes[ecnfield] += delivered_bytes; in tcp_accecn_process_option()
426 ptr = skb_transport_header(skb) + tp->rx_opt.accecn; in tcp_accecn_process_option()
433 if (tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) { in tcp_accecn_process_option()
434 tp->saw_accecn_opt = tcp_accecn_option_init(skb, in tcp_accecn_process_option()
435 tp->rx_opt.accecn); in tcp_accecn_process_option()
436 if (tp->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN) in tcp_accecn_process_option()
437 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_RECV); in tcp_accecn_process_option()
452 cnt = &tp->delivered_ecn_bytes[ecnfield - 1]; in tcp_accecn_process_option()
460 tp->est_ecnfield = ecnfield; in tcp_accecn_process_option()
472 tp->est_ecnfield = 0; in tcp_accecn_process_option()
477 static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count) in tcp_count_delivered_ce() argument
479 tp->delivered_ce += ecn_count; in tcp_count_delivered_ce()
483 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, in tcp_count_delivered() argument
486 tp->delivered += delivered; in tcp_count_delivered()
487 if (tcp_ecn_mode_rfc3168(tp) && ece_ack) in tcp_count_delivered()
488 tcp_count_delivered_ce(tp, delivered); in tcp_count_delivered()
498 struct tcp_sock *tp = tcp_sk(sk); in __tcp_accecn_process() local
507 opt_deltas_valid = tcp_accecn_process_option(tp, skb, in __tcp_accecn_process()
520 if (tp->received_ce_pending >= TCP_ACCECN_ACE_MAX_DELTA) in __tcp_accecn_process()
524 delta = (corrected_ace - tp->delivered_ce) & TCP_ACCECN_CEP_ACE_MASK; in __tcp_accecn_process()
532 d_ceb = tp->delivered_ecn_bytes[INET_ECN_CE - 1] - old_ceb; in __tcp_accecn_process()
537 (tcp_is_sack(tp) || in __tcp_accecn_process()
553 if (d_ceb > delta * tp->mss_cache) in __tcp_accecn_process()
556 safe_delta * tp->mss_cache >> TCP_ACCECN_SAFETY_SHIFT) in __tcp_accecn_process()
567 struct tcp_sock *tp = tcp_sk(sk); in tcp_accecn_process() local
573 tcp_count_delivered_ce(tp, delta); in tcp_accecn_process()
576 if (tp->pred_flags) in tcp_accecn_process()
577 tcp_fast_path_on(tp); in tcp_accecn_process()
589 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
597 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + in tcp_sndbuf_expand()
604 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp)); in tcp_sndbuf_expand()
605 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand()
648 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
653 while (tp->rcv_ssthresh <= window) { in __tcp_grow_window()
685 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
688 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; in tcp_grow_window()
702 incr = 2 * tp->advmss; in tcp_grow_window()
708 tp->rcv_ssthresh += min(room, incr); in tcp_grow_window()
725 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
731 tcp_mstamp_refresh(tp); in tcp_init_buffer_space()
732 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_init_buffer_space()
733 tp->rcvq_space.seq = tp->copied_seq; in tcp_init_buffer_space()
737 if (tp->window_clamp >= maxwin) { in tcp_init_buffer_space()
738 WRITE_ONCE(tp->window_clamp, maxwin); in tcp_init_buffer_space()
740 if (tcp_app_win && maxwin > 4 * tp->advmss) in tcp_init_buffer_space()
741 WRITE_ONCE(tp->window_clamp, in tcp_init_buffer_space()
743 4 * tp->advmss)); in tcp_init_buffer_space()
748 tp->window_clamp > 2 * tp->advmss && in tcp_init_buffer_space()
749 tp->window_clamp + tp->advmss > maxwin) in tcp_init_buffer_space()
750 WRITE_ONCE(tp->window_clamp, in tcp_init_buffer_space()
751 max(2 * tp->advmss, maxwin - tp->advmss)); in tcp_init_buffer_space()
753 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space()
754 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_buffer_space()
755 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, in tcp_init_buffer_space()
756 (u32)TCP_INIT_CWND * tp->advmss); in tcp_init_buffer_space()
762 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
778 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window()
790 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
791 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); in tcp_initialize_rcv_mss()
793 hint = min(hint, tp->rcv_wnd / 2); in tcp_initialize_rcv_mss()
812 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update() argument
814 u32 new_sample, old_sample = tp->rcv_rtt_est.rtt_us; in tcp_rcv_rtt_update()
833 if (tp->rcv_nxt != tp->copied_seq) in tcp_rcv_rtt_update()
838 tp->rcv_rtt_est.rtt_us = new_sample; in tcp_rcv_rtt_update()
841 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure() argument
845 if (tp->rcv_rtt_est.time == 0) in tcp_rcv_rtt_measure()
847 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) in tcp_rcv_rtt_measure()
849 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); in tcp_rcv_rtt_measure()
852 tcp_rcv_rtt_update(tp, delta_us, 1); in tcp_rcv_rtt_measure()
855 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure()
856 tp->rcv_rtt_est.time = tp->tcp_mstamp; in tcp_rcv_rtt_measure()
859 static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp, u32 min_delta) in tcp_rtt_tsopt_us() argument
863 delta = tcp_time_stamp_ts(tp) - tp->rx_opt.rcv_tsecr; in tcp_rtt_tsopt_us()
864 if (tp->tcp_usec_ts) in tcp_rtt_tsopt_us()
879 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
881 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) in tcp_rcv_rtt_measure_ts()
883 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
887 s32 delta = tcp_rtt_tsopt_us(tp, 0); in tcp_rcv_rtt_measure_ts()
890 tcp_rcv_rtt_update(tp, delta, 0); in tcp_rcv_rtt_measure_ts()
897 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcvbuf_grow() local
902 oldval = tp->rcvq_space.space; in tcp_rcvbuf_grow()
903 tp->rcvq_space.space = newval; in tcp_rcvbuf_grow()
912 rtt_us = tp->rcv_rtt_est.rtt_us >> 3; in tcp_rcvbuf_grow()
926 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_rcvbuf_grow()
927 rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt; in tcp_rcvbuf_grow()
935 WRITE_ONCE(tp->window_clamp, in tcp_rcvbuf_grow()
945 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
950 if (unlikely(!tp->rcv_rtt_est.rtt_us)) in tcp_rcv_space_adjust()
957 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); in tcp_rcv_space_adjust()
958 if (time < (tp->rcv_rtt_est.rtt_us >> 3)) in tcp_rcv_space_adjust()
962 copied = tp->copied_seq - tp->rcvq_space.seq; in tcp_rcv_space_adjust()
964 inq = tp->rcv_nxt - tp->copied_seq; in tcp_rcv_space_adjust()
966 if (copied <= tp->rcvq_space.space) in tcp_rcv_space_adjust()
974 tp->rcvq_space.seq = tp->copied_seq; in tcp_rcv_space_adjust()
975 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_rcv_space_adjust()
1000 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
1008 tcp_rcv_rtt_measure(tp); in tcp_event_data_recv()
1055 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
1057 u32 srtt = tp->srtt_us; in tcp_rtt_estimator()
1080 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
1092 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
1094 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ in tcp_rtt_estimator()
1095 if (tp->mdev_us > tp->mdev_max_us) { in tcp_rtt_estimator()
1096 tp->mdev_max_us = tp->mdev_us; in tcp_rtt_estimator()
1097 if (tp->mdev_max_us > tp->rttvar_us) in tcp_rtt_estimator()
1098 tp->rttvar_us = tp->mdev_max_us; in tcp_rtt_estimator()
1100 if (after(tp->snd_una, tp->rtt_seq)) { in tcp_rtt_estimator()
1101 if (tp->mdev_max_us < tp->rttvar_us) in tcp_rtt_estimator()
1102 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator()
1103 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
1104 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
1111 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ in tcp_rtt_estimator()
1112 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
1113 tp->mdev_max_us = tp->rttvar_us; in tcp_rtt_estimator()
1114 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
1118 tp->srtt_us = max(1U, srtt); in tcp_rtt_estimator()
1123 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
1127 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); in tcp_update_pacing_rate()
1137 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2) in tcp_update_pacing_rate()
1142 rate *= max(tcp_snd_cwnd(tp), tp->packets_out); in tcp_update_pacing_rate()
1144 if (likely(tp->srtt_us)) in tcp_update_pacing_rate()
1145 do_div(rate, tp->srtt_us); in tcp_update_pacing_rate()
1160 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
1171 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
1185 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd() argument
1191 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
1215 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, in tcp_dsack_seen() argument
1225 if (seq_len > tp->max_window) in tcp_dsack_seen()
1227 if (seq_len > tp->mss_cache) in tcp_dsack_seen()
1228 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache); in tcp_dsack_seen()
1229 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen()
1232 tp->dsack_dups += dup_segs; in tcp_dsack_seen()
1234 if (tp->dsack_dups > tp->total_retrans) in tcp_dsack_seen()
1237 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; in tcp_dsack_seen()
1244 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP)) in tcp_dsack_seen()
1245 tp->rack.dsack_seen = 1; in tcp_dsack_seen()
1261 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering() local
1262 const u32 mss = tp->mss_cache; in tcp_check_sack_reordering()
1265 fack = tcp_highest_sack_seq(tp); in tcp_check_sack_reordering()
1270 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering()
1273 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_check_sack_reordering()
1274 tp->reordering, in tcp_check_sack_reordering()
1276 tp->sacked_out, in tcp_check_sack_reordering()
1277 tp->undo_marker ? tp->undo_retrans : 0); in tcp_check_sack_reordering()
1279 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering()
1284 tp->reord_seen++; in tcp_check_sack_reordering()
1294 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
1296 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || in tcp_verify_retransmit_hint()
1297 (tp->retransmit_skb_hint && in tcp_verify_retransmit_hint()
1299 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) in tcp_verify_retransmit_hint()
1300 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
1306 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_notify_skb_loss_event() argument
1308 tp->lost += tcp_skb_pcount(skb); in tcp_notify_skb_loss_event()
1314 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_skb_lost() local
1319 tcp_verify_retransmit_hint(tp, skb); in tcp_mark_skb_lost()
1324 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1327 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1330 tp->lost_out += tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1332 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1429 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid() argument
1433 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1437 if (!before(start_seq, tp->snd_nxt)) in tcp_is_sackblock_valid()
1443 if (after(start_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1446 if (!is_dsack || !tp->undo_marker) in tcp_is_sackblock_valid()
1450 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1453 if (!before(start_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1457 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1463 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid()
1470 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1488 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); in tcp_check_dsack()
1497 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_check_dsack()
1499 after(end_seq_0, tp->undo_marker)) in tcp_check_dsack()
1500 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs); in tcp_check_dsack()
1568 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1572 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_sacktag_one()
1573 after(end_seq, tp->undo_marker)) in tcp_sacktag_one()
1574 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); in tcp_sacktag_one()
1581 if (!after(end_seq, tp->snd_una)) in tcp_sacktag_one()
1585 tcp_rack_advance(tp, sacked, end_seq, xmit_time); in tcp_sacktag_one()
1594 tp->lost_out -= pcount; in tcp_sacktag_one()
1595 tp->retrans_out -= pcount; in tcp_sacktag_one()
1603 tcp_highest_sack_seq(tp)) && in tcp_sacktag_one()
1607 if (!after(end_seq, tp->high_seq)) in tcp_sacktag_one()
1616 tp->lost_out -= pcount; in tcp_sacktag_one()
1622 tp->sacked_out += pcount; in tcp_sacktag_one()
1634 tp->retrans_out -= pcount; in tcp_sacktag_one()
1649 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1696 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1697 tp->retransmit_skb_hint = prev; in tcp_shifted_skb()
1755 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1769 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1849 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1894 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1954 tcp_highest_sack_seq(tp))) in tcp_sacktag_walk()
2010 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument
2012 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok()
2019 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
2033 state->reord = tp->snd_nxt; in tcp_sacktag_write_queue()
2035 if (!tp->sacked_out) in tcp_sacktag_write_queue()
2045 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) in tcp_sacktag_write_queue()
2048 if (!tp->packets_out) in tcp_sacktag_write_queue()
2059 if (!tcp_is_sackblock_valid(tp, dup_sack, in tcp_sacktag_write_queue()
2065 if (!tp->undo_marker) in tcp_sacktag_write_queue()
2071 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && in tcp_sacktag_write_queue()
2072 !after(sp[used_sacks].end_seq, tp->snd_una)) in tcp_sacktag_write_queue()
2110 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
2112 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue()
2114 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue()
2116 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue()
2131 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue()
2136 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && in tcp_sacktag_write_queue()
2158 if (tcp_highest_sack_seq(tp) == cache->end_seq) { in tcp_sacktag_write_queue()
2173 if (!before(start_seq, tcp_highest_sack_seq(tp))) { in tcp_sacktag_write_queue()
2189 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { in tcp_sacktag_write_queue()
2190 tp->recv_sack_cache[i].start_seq = 0; in tcp_sacktag_write_queue()
2191 tp->recv_sack_cache[i].end_seq = 0; in tcp_sacktag_write_queue()
2194 tp->recv_sack_cache[i++] = sp[j]; in tcp_sacktag_write_queue()
2196 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) in tcp_sacktag_write_queue()
2199 tcp_verify_left_out(tp); in tcp_sacktag_write_queue()
2203 WARN_ON((int)tp->sacked_out < 0); in tcp_sacktag_write_queue()
2204 WARN_ON((int)tp->lost_out < 0); in tcp_sacktag_write_queue()
2205 WARN_ON((int)tp->retrans_out < 0); in tcp_sacktag_write_queue()
2206 WARN_ON((int)tcp_packets_in_flight(tp) < 0); in tcp_sacktag_write_queue()
2214 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked() argument
2218 holes = max(tp->lost_out, 1U); in tcp_limit_reno_sacked()
2219 holes = min(holes, tp->packets_out); in tcp_limit_reno_sacked()
2221 if ((tp->sacked_out + holes) > tp->packets_out) { in tcp_limit_reno_sacked()
2222 tp->sacked_out = tp->packets_out - holes; in tcp_limit_reno_sacked()
2234 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
2236 if (!tcp_limit_reno_sacked(tp)) in tcp_check_reno_reordering()
2239 tp->reordering = min_t(u32, tp->packets_out + addend, in tcp_check_reno_reordering()
2241 tp->reord_seen++; in tcp_check_reno_reordering()
2250 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
2251 u32 prior_sacked = tp->sacked_out; in tcp_add_reno_sack()
2254 tp->sacked_out += num_dupack; in tcp_add_reno_sack()
2256 delivered = tp->sacked_out - prior_sacked; in tcp_add_reno_sack()
2258 tcp_count_delivered(tp, delivered, ece_ack); in tcp_add_reno_sack()
2259 tcp_verify_left_out(tp); in tcp_add_reno_sack()
2267 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
2271 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1), in tcp_remove_reno_sacks()
2273 if (acked - 1 >= tp->sacked_out) in tcp_remove_reno_sacks()
2274 tp->sacked_out = 0; in tcp_remove_reno_sacks()
2276 tp->sacked_out -= acked - 1; in tcp_remove_reno_sacks()
2279 tcp_verify_left_out(tp); in tcp_remove_reno_sacks()
2282 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack() argument
2284 tp->sacked_out = 0; in tcp_reset_reno_sack()
2287 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans() argument
2289 tp->retrans_out = 0; in tcp_clear_retrans()
2290 tp->lost_out = 0; in tcp_clear_retrans()
2291 tp->undo_marker = 0; in tcp_clear_retrans()
2292 tp->undo_retrans = -1; in tcp_clear_retrans()
2293 tp->sacked_out = 0; in tcp_clear_retrans()
2294 tp->rto_stamp = 0; in tcp_clear_retrans()
2295 tp->total_rto = 0; in tcp_clear_retrans()
2296 tp->total_rto_recoveries = 0; in tcp_clear_retrans()
2297 tp->total_rto_time = 0; in tcp_clear_retrans()
2300 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo() argument
2302 tp->undo_marker = tp->snd_una; in tcp_init_undo()
2306 tp->undo_retrans = tp->retrans_out; in tcp_init_undo()
2308 if (tp->tlp_high_seq && tp->tlp_retrans) in tcp_init_undo()
2309 tp->undo_retrans++; in tcp_init_undo()
2311 if (!tp->undo_retrans) in tcp_init_undo()
2312 tp->undo_retrans = -1; in tcp_init_undo()
2321 struct tcp_sock *tp = tcp_sk(sk); in tcp_timeout_mark_lost() local
2329 tp->sacked_out = 0; in tcp_timeout_mark_lost()
2331 tp->is_sack_reneg = 1; in tcp_timeout_mark_lost()
2332 } else if (tcp_is_reno(tp)) { in tcp_timeout_mark_lost()
2333 tcp_reset_reno_sack(tp); in tcp_timeout_mark_lost()
2340 else if (skb != head && tcp_rack_skb_timeout(tp, skb, 0) > 0) in tcp_timeout_mark_lost()
2344 tcp_verify_left_out(tp); in tcp_timeout_mark_lost()
2345 tcp_clear_all_retrans_hints(tp); in tcp_timeout_mark_lost()
2352 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
2361 !after(tp->high_seq, tp->snd_una) || in tcp_enter_loss()
2363 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
2364 tp->prior_cwnd = tcp_snd_cwnd(tp); in tcp_enter_loss()
2365 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
2367 tcp_init_undo(tp); in tcp_enter_loss()
2369 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1); in tcp_enter_loss()
2370 tp->snd_cwnd_cnt = 0; in tcp_enter_loss()
2371 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_enter_loss()
2378 tp->sacked_out >= reordering) in tcp_enter_loss()
2379 tp->reordering = min_t(unsigned int, tp->reordering, in tcp_enter_loss()
2383 tp->high_seq = tp->snd_nxt; in tcp_enter_loss()
2384 tp->tlp_high_seq = 0; in tcp_enter_loss()
2385 tcp_ecn_queue_cwr(tp); in tcp_enter_loss()
2391 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) && in tcp_enter_loss()
2410 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2411 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), in tcp_check_sack_reneging()
2512 static bool tcp_time_to_recover(const struct tcp_sock *tp) in tcp_time_to_recover() argument
2515 return tp->lost_out != 0; in tcp_time_to_recover()
2518 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) in tcp_tsopt_ecr_before() argument
2520 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_tsopt_ecr_before()
2521 before(tp->rx_opt.rcv_tsecr, when); in tcp_tsopt_ecr_before()
2527 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, in tcp_skb_spurious_retrans() argument
2531 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb)); in tcp_skb_spurious_retrans()
2537 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed() argument
2539 const struct sock *sk = (const struct sock *)tp; in tcp_packet_delayed()
2542 if (tp->retrans_stamp) in tcp_packet_delayed()
2543 return tcp_tsopt_ecr_before(tp, tp->retrans_stamp); in tcp_packet_delayed()
2557 if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq)) in tcp_packet_delayed()
2588 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2591 if (tp->retrans_out) in tcp_any_retrans_done()
2614 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2621 tcp_snd_cwnd(tp), tcp_left_out(tp), in DBGUNDO()
2622 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2623 tp->packets_out); in DBGUNDO()
2630 tcp_snd_cwnd(tp), tcp_left_out(tp), in DBGUNDO()
2631 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2632 tp->packets_out); in DBGUNDO()
2640 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2648 tp->lost_out = 0; in tcp_undo_cwnd_reduction()
2649 tcp_clear_all_retrans_hints(tp); in tcp_undo_cwnd_reduction()
2652 if (tp->prior_ssthresh) { in tcp_undo_cwnd_reduction()
2655 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); in tcp_undo_cwnd_reduction()
2657 if (tp->prior_ssthresh > tp->snd_ssthresh) { in tcp_undo_cwnd_reduction()
2658 tp->snd_ssthresh = tp->prior_ssthresh; in tcp_undo_cwnd_reduction()
2659 tcp_ecn_withdraw_cwr(tp); in tcp_undo_cwnd_reduction()
2662 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_undo_cwnd_reduction()
2663 tp->undo_marker = 0; in tcp_undo_cwnd_reduction()
2664 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ in tcp_undo_cwnd_reduction()
2667 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo() argument
2669 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); in tcp_may_undo()
2674 struct tcp_sock *tp = tcp_sk(sk); in tcp_is_non_sack_preventing_reopen() local
2676 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_is_non_sack_preventing_reopen()
2681 tp->retrans_stamp = 0; in tcp_is_non_sack_preventing_reopen()
2690 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2692 if (tcp_may_undo(tp)) { in tcp_try_undo_recovery()
2706 } else if (tp->rack.reo_wnd_persist) { in tcp_try_undo_recovery()
2707 tp->rack.reo_wnd_persist--; in tcp_try_undo_recovery()
2712 tp->is_sack_reneg = 0; in tcp_try_undo_recovery()
2719 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2721 if (tp->undo_marker && !tp->undo_retrans) { in tcp_try_undo_dsack()
2722 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, in tcp_try_undo_dsack()
2723 tp->rack.reo_wnd_persist + 1); in tcp_try_undo_dsack()
2735 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2737 if (frto_undo || tcp_may_undo(tp)) { in tcp_try_undo_loss()
2748 if (frto_undo || tcp_is_sack(tp)) { in tcp_try_undo_loss()
2750 tp->is_sack_reneg = 0; in tcp_try_undo_loss()
2768 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2770 tp->high_seq = tp->snd_nxt; in tcp_init_cwnd_reduction()
2771 tp->tlp_high_seq = 0; in tcp_init_cwnd_reduction()
2772 tp->snd_cwnd_cnt = 0; in tcp_init_cwnd_reduction()
2773 tp->prior_cwnd = tcp_snd_cwnd(tp); in tcp_init_cwnd_reduction()
2774 tp->prr_delivered = 0; in tcp_init_cwnd_reduction()
2775 tp->prr_out = 0; in tcp_init_cwnd_reduction()
2776 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2777 tcp_ecn_queue_cwr(tp); in tcp_init_cwnd_reduction()
2782 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2784 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); in tcp_cwnd_reduction()
2786 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) in tcp_cwnd_reduction()
2791 tp->prr_delivered += newly_acked_sacked; in tcp_cwnd_reduction()
2793 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + in tcp_cwnd_reduction()
2794 tp->prior_cwnd - 1; in tcp_cwnd_reduction()
2795 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction()
2797 sndcnt = max_t(int, tp->prr_delivered - tp->prr_out, in tcp_cwnd_reduction()
2804 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); in tcp_cwnd_reduction()
2805 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt); in tcp_cwnd_reduction()
2810 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2816 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && in tcp_end_cwnd_reduction()
2817 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2818 tcp_snd_cwnd_set(tp, tp->snd_ssthresh); in tcp_end_cwnd_reduction()
2819 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_end_cwnd_reduction()
2827 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2829 tp->prior_ssthresh = 0; in tcp_enter_cwr()
2831 tp->undo_marker = 0; in tcp_enter_cwr()
2840 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2843 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2848 tp->high_seq = tp->snd_nxt; in tcp_try_keep_open()
2854 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2856 tcp_verify_left_out(tp); in tcp_try_to_open()
2859 tp->retrans_stamp = 0; in tcp_try_to_open()
2880 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2884 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2886 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache); in tcp_mtup_probe_success()
2889 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val)); in tcp_mtup_probe_success()
2891 tp->snd_cwnd_cnt = 0; in tcp_mtup_probe_success()
2892 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_mtup_probe_success()
2893 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2914 struct tcp_sock *tp = tcp_sk(sk); in tcp_non_congestion_loss_retransmit() local
2917 tp->high_seq = tp->snd_nxt; in tcp_non_congestion_loss_retransmit()
2918 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_non_congestion_loss_retransmit()
2919 tp->prior_ssthresh = 0; in tcp_non_congestion_loss_retransmit()
2920 tp->undo_marker = 0; in tcp_non_congestion_loss_retransmit()
2932 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2946 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT) in tcp_simple_retransmit()
2956 if (!tp->lost_out) in tcp_simple_retransmit()
2959 if (tcp_is_reno(tp)) in tcp_simple_retransmit()
2960 tcp_limit_reno_sacked(tp); in tcp_simple_retransmit()
2962 tcp_verify_left_out(tp); in tcp_simple_retransmit()
2975 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2981 if (tcp_is_reno(tp)) in tcp_enter_recovery()
2988 tp->prior_ssthresh = 0; in tcp_enter_recovery()
2989 tcp_init_undo(tp); in tcp_enter_recovery()
2993 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2999 static void tcp_update_rto_time(struct tcp_sock *tp) in tcp_update_rto_time() argument
3001 if (tp->rto_stamp) { in tcp_update_rto_time()
3002 tp->total_rto_time += tcp_time_stamp_ms(tp) - tp->rto_stamp; in tcp_update_rto_time()
3003 tp->rto_stamp = 0; in tcp_update_rto_time()
3013 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
3014 bool recovered = !before(tp->snd_una, tp->high_seq); in tcp_process_loss()
3016 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) && in tcp_process_loss()
3020 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ in tcp_process_loss()
3028 if (after(tp->snd_nxt, tp->high_seq)) { in tcp_process_loss()
3030 tp->frto = 0; /* Step 3.a. loss was real */ in tcp_process_loss()
3032 tp->high_seq = tp->snd_nxt; in tcp_process_loss()
3038 after(tcp_wnd_end(tp), tp->snd_nxt)) { in tcp_process_loss()
3042 tp->frto = 0; in tcp_process_loss()
3051 if (tcp_is_reno(tp)) { in tcp_process_loss()
3055 if (after(tp->snd_nxt, tp->high_seq) && num_dupack) in tcp_process_loss()
3058 tcp_reset_reno_sack(tp); in tcp_process_loss()
3066 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
3068 if (tp->undo_marker && tcp_packet_delayed(tp)) { in tcp_try_undo_partial()
3079 if (tp->retrans_out) in tcp_try_undo_partial()
3083 tp->retrans_stamp = 0; in tcp_try_undo_partial()
3095 struct tcp_sock *tp = tcp_sk(sk); in tcp_identify_packet_loss() local
3100 if (unlikely(tcp_is_reno(tp))) { in tcp_identify_packet_loss()
3103 u32 prior_retrans = tp->retrans_out; in tcp_identify_packet_loss()
3107 if (prior_retrans > tp->retrans_out) in tcp_identify_packet_loss()
3128 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
3132 if (!tp->packets_out && tp->sacked_out) in tcp_fastretrans_alert()
3133 tp->sacked_out = 0; in tcp_fastretrans_alert()
3138 tp->prior_ssthresh = 0; in tcp_fastretrans_alert()
3145 tcp_verify_left_out(tp); in tcp_fastretrans_alert()
3150 WARN_ON(tp->retrans_out != 0 && !tp->syn_data); in tcp_fastretrans_alert()
3151 tp->retrans_stamp = 0; in tcp_fastretrans_alert()
3152 } else if (!before(tp->snd_una, tp->high_seq)) { in tcp_fastretrans_alert()
3157 if (tp->snd_una != tp->high_seq) { in tcp_fastretrans_alert()
3164 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
3165 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
3177 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
3187 if (!tcp_time_to_recover(tp)) in tcp_fastretrans_alert()
3198 tcp_update_rto_time(tp); in tcp_fastretrans_alert()
3206 if (tcp_is_reno(tp)) { in tcp_fastretrans_alert()
3208 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
3216 if (!tcp_time_to_recover(tp)) { in tcp_fastretrans_alert()
3224 tp->snd_una == tp->mtu_probe.probe_seq_start) { in tcp_fastretrans_alert()
3227 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_fastretrans_alert()
3242 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min() local
3244 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { in tcp_update_rtt_min()
3251 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, in tcp_update_rtt_min()
3259 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
3275 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && in tcp_ack_update_rtt()
3276 tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) in tcp_ack_update_rtt()
3277 seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp, 1); in tcp_ack_update_rtt()
3323 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
3328 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_rearm_rto()
3331 if (!tp->packets_out) { in tcp_rearm_rto()
3358 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3361 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3364 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3404 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3405 u32 prior_sacked = tp->sacked_out; in tcp_clean_rtx_queue()
3406 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ in tcp_clean_rtx_queue()
3425 if (after(scb->end_seq, tp->snd_una)) { in tcp_clean_rtx_queue()
3427 !after(tp->snd_una, scb->seq)) in tcp_clean_rtx_queue()
3440 tp->retrans_out -= acked_pcount; in tcp_clean_rtx_queue()
3450 if (!after(scb->end_seq, tp->high_seq)) in tcp_clean_rtx_queue()
3455 tp->sacked_out -= acked_pcount; in tcp_clean_rtx_queue()
3458 } else if (tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3459 tcp_count_delivered(tp, acked_pcount, ece_ack); in tcp_clean_rtx_queue()
3460 if (!tcp_skb_spurious_retrans(tp, skb)) in tcp_clean_rtx_queue()
3461 tcp_rack_advance(tp, sacked, scb->end_seq, in tcp_clean_rtx_queue()
3465 tp->lost_out -= acked_pcount; in tcp_clean_rtx_queue()
3467 tp->packets_out -= acked_pcount; in tcp_clean_rtx_queue()
3482 tp->retrans_stamp = 0; in tcp_clean_rtx_queue()
3491 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3492 tp->retransmit_skb_hint = NULL; in tcp_clean_rtx_queue()
3500 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) in tcp_clean_rtx_queue()
3501 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue()
3510 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); in tcp_clean_rtx_queue()
3511 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); in tcp_clean_rtx_queue()
3514 (tp->snd_una - prior_snd_una) < tp->mss_cache && in tcp_clean_rtx_queue()
3515 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue()
3525 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue()
3526 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue()
3534 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { in tcp_clean_rtx_queue()
3538 if (tcp_is_reno(tp)) { in tcp_clean_rtx_queue()
3556 TCP_SKB_CB(skb)->seq : tp->snd_una) - in tcp_clean_rtx_queue()
3559 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, in tcp_clean_rtx_queue()
3572 sample.in_flight = tp->mss_cache * in tcp_clean_rtx_queue()
3573 (tp->delivered - sack->rate->prior_delivered); in tcp_clean_rtx_queue()
3578 WARN_ON((int)tp->sacked_out < 0); in tcp_clean_rtx_queue()
3579 WARN_ON((int)tp->lost_out < 0); in tcp_clean_rtx_queue()
3580 WARN_ON((int)tp->retrans_out < 0); in tcp_clean_rtx_queue()
3581 if (!tp->packets_out && tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3583 if (tp->lost_out) { in tcp_clean_rtx_queue()
3585 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3586 tp->lost_out = 0; in tcp_clean_rtx_queue()
3588 if (tp->sacked_out) { in tcp_clean_rtx_queue()
3590 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3591 tp->sacked_out = 0; in tcp_clean_rtx_queue()
3593 if (tp->retrans_out) { in tcp_clean_rtx_queue()
3595 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3596 tp->retrans_out = 0; in tcp_clean_rtx_queue()
3607 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3612 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3677 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window() argument
3681 return after(ack, tp->snd_una) || in tcp_may_update_window()
3682 after(ack_seq, tp->snd_wl1) || in tcp_may_update_window()
3683 (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin)); in tcp_may_update_window()
3686 static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack) in tcp_snd_sne_update() argument
3694 ao = rcu_dereference_protected(tp->ao_info, in tcp_snd_sne_update()
3695 lockdep_sock_is_held((struct sock *)tp)); in tcp_snd_sne_update()
3696 if (ao && ack < tp->snd_una) { in tcp_snd_sne_update()
3698 trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne); in tcp_snd_sne_update()
3704 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3706 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3708 sock_owned_by_me((struct sock *)tp); in tcp_snd_una_update()
3709 tp->bytes_acked += delta; in tcp_snd_una_update()
3710 tcp_snd_sne_update(tp, ack); in tcp_snd_una_update()
3711 tp->snd_una = ack; in tcp_snd_una_update()
3714 static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_sne_update() argument
3722 ao = rcu_dereference_protected(tp->ao_info, in tcp_rcv_sne_update()
3723 lockdep_sock_is_held((struct sock *)tp)); in tcp_rcv_sne_update()
3724 if (ao && seq < tp->rcv_nxt) { in tcp_rcv_sne_update()
3726 trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne); in tcp_rcv_sne_update()
3732 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update() argument
3734 u32 delta = seq - tp->rcv_nxt; in tcp_rcv_nxt_update()
3736 sock_owned_by_me((struct sock *)tp); in tcp_rcv_nxt_update()
3737 tp->bytes_received += delta; in tcp_rcv_nxt_update()
3738 tcp_rcv_sne_update(tp, seq); in tcp_rcv_nxt_update()
3739 WRITE_ONCE(tp->rcv_nxt, seq); in tcp_rcv_nxt_update()
3750 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3755 nwin <<= tp->rx_opt.snd_wscale; in tcp_ack_update_window()
3757 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { in tcp_ack_update_window()
3759 tcp_update_wl(tp, ack_seq); in tcp_ack_update_window()
3761 if (tp->snd_wnd != nwin) { in tcp_ack_update_window()
3762 tp->snd_wnd = nwin; in tcp_ack_update_window()
3767 tp->pred_flags = 0; in tcp_ack_update_window()
3773 if (nwin > tp->max_window) { in tcp_ack_update_window()
3774 tp->max_window = nwin; in tcp_ack_update_window()
3780 tcp_snd_una_update(tp, ack); in tcp_ack_update_window()
3829 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_ack_reflect_ect() local
3833 flags = tcp_accecn_reflector_flags(tp->syn_ect_rcv); in tcp_send_ack_reflect_ect()
3834 __tcp_send_ack(sk, tp->rcv_nxt, flags); in tcp_send_ack_reflect_ect()
3840 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3847 &tp->last_oow_ack_time)) in tcp_send_challenge_ack()
3872 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent() argument
3874 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; in tcp_store_ts_recent()
3875 tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); in tcp_store_ts_recent()
3878 static int __tcp_replace_ts_recent(struct tcp_sock *tp, s32 tstamp_delta) in __tcp_replace_ts_recent() argument
3880 tcp_store_ts_recent(tp); in __tcp_replace_ts_recent()
3884 static int tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent() argument
3888 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { in tcp_replace_ts_recent()
3896 if (tcp_paws_check(&tp->rx_opt, 0)) { in tcp_replace_ts_recent()
3897 delta = tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent; in tcp_replace_ts_recent()
3898 return __tcp_replace_ts_recent(tp, delta); in tcp_replace_ts_recent()
3910 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3912 if (before(ack, tp->tlp_high_seq)) in tcp_process_tlp_ack()
3915 if (!tp->tlp_retrans) { in tcp_process_tlp_ack()
3917 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3920 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3921 } else if (after(ack, tp->tlp_high_seq)) { in tcp_process_tlp_ack()
3934 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3963 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery() local
3971 if (after(tp->snd_nxt, tp->high_seq)) in tcp_xmit_recovery()
3973 tp->frto = 0; in tcp_xmit_recovery()
3983 struct tcp_sock *tp = tcp_sk(sk); in tcp_newly_delivered() local
3986 delivered = tp->delivered - prior_delivered; in tcp_newly_delivered()
3990 if (tcp_ecn_mode_rfc3168(tp)) in tcp_newly_delivered()
4002 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
4005 u32 prior_snd_una = tp->snd_una; in tcp_ack()
4006 bool is_sack_reneg = tp->is_sack_reneg; in tcp_ack()
4010 int prior_packets = tp->packets_out; in tcp_ack()
4011 u32 delivered = tp->delivered; in tcp_ack()
4012 u32 lost = tp->lost; in tcp_ack()
4032 max_window = min_t(u64, tp->max_window, tp->bytes_acked); in tcp_ack()
4045 if (after(ack, tp->snd_nxt)) in tcp_ack()
4054 if (tp->tcp_clean_acked) in tcp_ack()
4055 tp->tcp_clean_acked(sk, ack); in tcp_ack()
4059 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; in tcp_ack()
4060 rs.prior_in_flight = tcp_packets_in_flight(tp); in tcp_ack()
4066 flag |= tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
4074 tcp_update_wl(tp, ack_seq); in tcp_ack()
4075 tcp_snd_una_update(tp, ack); in tcp_ack()
4091 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) in tcp_ack()
4095 tcp_count_delivered(tp, sack_state.sack_delivered, in tcp_ack()
4114 tp->rcv_tstamp = tcp_jiffies32; in tcp_ack()
4124 if (tcp_ecn_mode_accecn(tp)) in tcp_ack()
4126 tp->delivered - delivered, in tcp_ack()
4132 if (tp->tlp_high_seq) in tcp_ack()
4156 lost = tp->lost - lost; /* freshly marked lost */ in tcp_ack()
4164 if (tcp_ecn_mode_accecn(tp)) in tcp_ack()
4166 tp->delivered - delivered, in tcp_ack()
4182 if (tp->tlp_high_seq) in tcp_ack()
4422 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp() argument
4428 tp->rx_opt.saw_tstamp = 1; in tcp_parse_aligned_timestamp()
4430 tp->rx_opt.rcv_tsval = ntohl(*ptr); in tcp_parse_aligned_timestamp()
4433 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; in tcp_parse_aligned_timestamp()
4435 tp->rx_opt.rcv_tsecr = 0; in tcp_parse_aligned_timestamp()
4446 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options() argument
4452 tp->rx_opt.saw_tstamp = 0; in tcp_fast_parse_options()
4453 tp->rx_opt.accecn = 0; in tcp_fast_parse_options()
4455 } else if (tp->rx_opt.tstamp_ok && in tcp_fast_parse_options()
4457 if (tcp_parse_aligned_timestamp(tp, th)) { in tcp_fast_parse_options()
4458 tp->rx_opt.accecn = 0; in tcp_fast_parse_options()
4463 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
4464 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_fast_parse_options()
4465 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_fast_parse_options()
4567 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack_check() local
4578 if (seq != tp->rcv_nxt) in tcp_disordered_ack_check()
4579 return before(seq, tp->rcv_nxt) ? in tcp_disordered_ack_check()
4584 if (ack != tp->snd_una) in tcp_disordered_ack_check()
4588 if (tcp_may_update_window(tp, ack, seq, ntohs(th->window) << in tcp_disordered_ack_check()
4589 tp->rx_opt.snd_wscale)) in tcp_disordered_ack_check()
4593 if ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > in tcp_disordered_ack_check()
4616 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sequence() local
4618 if (before(end_seq, tp->rcv_wup)) in tcp_sequence()
4621 if (after(end_seq, tp->rcv_nxt + tcp_receive_window(tp))) { in tcp_sequence()
4622 if (after(seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_sequence()
4693 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4743 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_fin()
4744 if (tcp_is_sack(tp)) in tcp_fin()
4745 tcp_sack_reset(&tp->rx_opt); in tcp_fin()
4774 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4776 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_dsack_set()
4779 if (before(seq, tp->rcv_nxt)) in tcp_dsack_set()
4786 tp->rx_opt.dsack = 1; in tcp_dsack_set()
4787 tp->duplicate_sack[0].start_seq = seq; in tcp_dsack_set()
4788 tp->duplicate_sack[0].end_seq = end_seq; in tcp_dsack_set()
4794 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4796 if (!tp->rx_opt.dsack) in tcp_dsack_extend()
4799 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); in tcp_dsack_extend()
4827 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4830 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4834 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_send_dupack()
4838 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4839 end_seq = tp->rcv_nxt; in tcp_send_dupack()
4850 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce() argument
4853 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_maybe_coalesce()
4859 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { in tcp_sack_maybe_coalesce()
4866 tp->rx_opt.num_sacks--; in tcp_sack_maybe_coalesce()
4867 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) in tcp_sack_maybe_coalesce()
4878 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_compress_send_ack() local
4880 if (!tp->compressed_ack) in tcp_sack_compress_send_ack()
4883 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_sack_compress_send_ack()
4891 tp->compressed_ack - 1); in tcp_sack_compress_send_ack()
4893 tp->compressed_ack = 0; in tcp_sack_compress_send_ack()
4905 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4906 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_new_ofo_skb()
4907 int cur_sacks = tp->rx_opt.num_sacks; in tcp_sack_new_ofo_skb()
4921 tcp_sack_maybe_coalesce(tp); in tcp_sack_new_ofo_skb()
4937 tp->rx_opt.num_sacks--; in tcp_sack_new_ofo_skb()
4947 tp->rx_opt.num_sacks++; in tcp_sack_new_ofo_skb()
4952 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove() argument
4954 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_remove()
4955 int num_sacks = tp->rx_opt.num_sacks; in tcp_sack_remove()
4959 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_sack_remove()
4960 tp->rx_opt.num_sacks = 0; in tcp_sack_remove()
4966 if (!before(tp->rcv_nxt, sp->start_seq)) { in tcp_sack_remove()
4970 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); in tcp_sack_remove()
4974 tp->selective_acks[i-1] = tp->selective_acks[i]; in tcp_sack_remove()
4981 tp->rx_opt.num_sacks = num_sacks; in tcp_sack_remove()
5061 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
5062 __u32 dsack_high = tp->rcv_nxt; in tcp_ofo_queue()
5067 p = rb_first(&tp->out_of_order_queue); in tcp_ofo_queue()
5070 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
5081 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue()
5083 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { in tcp_ofo_queue()
5090 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
5150 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
5168 tp->pred_flags = 0; in tcp_data_queue_ofo()
5171 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs); in tcp_data_queue_ofo()
5176 p = &tp->out_of_order_queue.rb_node; in tcp_data_queue_ofo()
5177 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue_ofo()
5179 if (tcp_is_sack(tp)) { in tcp_data_queue_ofo()
5180 tp->rx_opt.num_sacks = 1; in tcp_data_queue_ofo()
5181 tp->selective_acks[0].start_seq = seq; in tcp_data_queue_ofo()
5182 tp->selective_acks[0].end_seq = end_seq; in tcp_data_queue_ofo()
5185 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5186 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
5193 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, in tcp_data_queue_ofo()
5199 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5206 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { in tcp_data_queue_ofo()
5207 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo()
5240 &tp->out_of_order_queue); in tcp_data_queue_ofo()
5259 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5271 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5279 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
5282 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5289 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5296 tcp_rcvbuf_grow(sk, tp->rcvq_space.space); in tcp_data_queue_ofo()
5376 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
5397 tp->rx_opt.dsack = 0; in tcp_data_queue()
5403 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
5404 if (tcp_receive_window(tp) == 0) { in tcp_data_queue()
5443 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue()
5449 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_data_queue()
5453 if (tp->rx_opt.num_sacks) in tcp_data_queue()
5454 tcp_sack_remove(tp); in tcp_data_queue()
5465 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
5482 tp->rcv_nxt + tcp_receive_window(tp))) { in tcp_data_queue()
5487 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
5489 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
5494 if (!tcp_receive_window(tp)) { in tcp_data_queue()
5666 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
5671 skb = skb_rb_first(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5674 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5693 tcp_collapse(sk, NULL, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
5726 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
5731 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_prune_ofo_queue()
5735 node = &tp->ooo_last_skb->rbnode; in tcp_prune_ofo_queue()
5745 rb_erase(node, &tp->out_of_order_queue); in tcp_prune_ofo_queue()
5748 tp->ooo_last_skb = rb_to_skb(prev); in tcp_prune_ofo_queue()
5765 if (tp->rx_opt.sack_ok) in tcp_prune_ofo_queue()
5766 tcp_sack_reset(&tp->rx_opt); in tcp_prune_ofo_queue()
5780 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
5801 tp->copied_seq, tp->rcv_nxt); in tcp_prune_queue()
5821 tp->pred_flags = 0; in tcp_prune_queue()
5827 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
5854 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)) in tcp_should_expand_sndbuf()
5862 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
5866 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_new_space()
5905 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
5911 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
5917 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || in __tcp_ack_snd_check()
5918 __tcp_select_window(sk) >= tp->rcv_wnd)) || in __tcp_ack_snd_check()
5936 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in __tcp_ack_snd_check()
5941 if (!tcp_is_sack(tp) || in __tcp_ack_snd_check()
5942 tp->compressed_ack >= READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_nr)) in __tcp_ack_snd_check()
5945 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { in __tcp_ack_snd_check()
5946 tp->compressed_ack_rcv_nxt = tp->rcv_nxt; in __tcp_ack_snd_check()
5947 tp->dup_ack_counter = 0; in __tcp_ack_snd_check()
5949 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) { in __tcp_ack_snd_check()
5950 tp->dup_ack_counter++; in __tcp_ack_snd_check()
5953 tp->compressed_ack++; in __tcp_ack_snd_check()
5954 if (hrtimer_is_queued(&tp->compressed_ack_timer)) in __tcp_ack_snd_check()
5961 rtt = tp->rcv_rtt_est.rtt_us; in __tcp_ack_snd_check()
5962 if (tp->srtt_us && tp->srtt_us < rtt) in __tcp_ack_snd_check()
5963 rtt = tp->srtt_us; in __tcp_ack_snd_check()
5975 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), in __tcp_ack_snd_check()
6001 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
6009 if (after(tp->copied_seq, ptr)) in tcp_check_urg()
6022 if (before(ptr, tp->rcv_nxt)) in tcp_check_urg()
6026 if (tp->urg_data && !after(ptr, tp->urg_seq)) in tcp_check_urg()
6047 if (tp->urg_seq == tp->copied_seq && tp->urg_data && in tcp_check_urg()
6048 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
6050 tp->copied_seq++; in tcp_check_urg()
6051 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
6057 WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET); in tcp_check_urg()
6058 WRITE_ONCE(tp->urg_seq, ptr); in tcp_check_urg()
6061 tp->pred_flags = 0; in tcp_check_urg()
6067 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
6074 if (unlikely(tp->urg_data == TCP_URG_NOTYET)) { in tcp_urg()
6075 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - in tcp_urg()
6083 WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp); in tcp_urg()
6100 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check() local
6102 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && in tcp_reset_check()
6113 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
6118 if (!tcp_fast_parse_options(sock_net(sk), skb, th, tp) || in tcp_validate_incoming()
6119 !tp->rx_opt.saw_tstamp || in tcp_validate_incoming()
6120 tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW)) in tcp_validate_incoming()
6142 &tp->last_oow_ack_time)) in tcp_validate_incoming()
6166 &tp->last_oow_ack_time)) in tcp_validate_incoming()
6185 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || in tcp_validate_incoming()
6189 if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { in tcp_validate_incoming()
6190 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_validate_incoming()
6194 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; in tcp_validate_incoming()
6209 if (tp->syn_fastopen && !tp->data_segs_in && in tcp_validate_incoming()
6223 if (tcp_ecn_mode_accecn(tp)) { in tcp_validate_incoming()
6225 if (tp->rx_opt.accecn && in tcp_validate_incoming()
6226 tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) { in tcp_validate_incoming()
6227 u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn); in tcp_validate_incoming()
6229 tcp_accecn_saw_opt_fail_recv(tp, saw_opt); in tcp_validate_incoming()
6235 TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt && in tcp_validate_incoming()
6236 TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt) in tcp_validate_incoming()
6289 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
6295 tcp_mstamp_refresh(tp); in tcp_rcv_established()
6313 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_established()
6314 tp->rx_opt.accecn = 0; in tcp_rcv_established()
6325 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && in tcp_rcv_established()
6326 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
6327 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
6328 int tcp_header_len = tp->tcp_header_len; in tcp_rcv_established()
6340 if (!tcp_parse_aligned_timestamp(tp, th)) in tcp_rcv_established()
6343 delta = tp->rx_opt.rcv_tsval - in tcp_rcv_established()
6344 tp->rx_opt.ts_recent; in tcp_rcv_established()
6365 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
6366 flag |= __tcp_replace_ts_recent(tp, in tcp_rcv_established()
6381 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_established()
6396 tp->rcv_nxt + tcp_receive_window(tp))) in tcp_rcv_established()
6408 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
6409 flag |= __tcp_replace_ts_recent(tp, in tcp_rcv_established()
6425 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
6432 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_established()
6494 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_transfer() local
6506 if (tp->total_retrans > 1 && tp->undo_marker) in tcp_init_transfer()
6507 tcp_snd_cwnd_set(tp, 1); in tcp_init_transfer()
6509 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk))); in tcp_init_transfer()
6510 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_transfer()
6521 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
6539 tp->lsndtime = tcp_jiffies32; in tcp_finish_connect()
6542 tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
6544 if (!tp->rx_opt.snd_wscale) in tcp_finish_connect()
6545 __tcp_fast_path_on(tp, tp->snd_wnd); in tcp_finish_connect()
6547 tp->pred_flags = 0; in tcp_finish_connect()
6553 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
6554 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
6555 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; in tcp_rcv_fastopen_synack()
6558 if (mss == READ_ONCE(tp->rx_opt.user_mss)) { in tcp_rcv_fastopen_synack()
6568 if (!tp->syn_fastopen) { in tcp_rcv_fastopen_synack()
6571 } else if (tp->total_retrans) { in tcp_rcv_fastopen_synack()
6578 } else if (cookie->len < 0 && !tp->syn_data) { in tcp_rcv_fastopen_synack()
6583 try_exp = tp->syn_fastopen_exp ? 2 : 1; in tcp_rcv_fastopen_synack()
6589 if (tp->total_retrans) in tcp_rcv_fastopen_synack()
6590 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED; in tcp_rcv_fastopen_synack()
6592 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED; in tcp_rcv_fastopen_synack()
6600 tp->syn_data_acked = tp->syn_data; in tcp_rcv_fastopen_synack()
6601 if (tp->syn_data_acked) { in tcp_rcv_fastopen_synack()
6604 if (tp->delivered > 1) in tcp_rcv_fastopen_synack()
6605 --tp->delivered; in tcp_rcv_fastopen_synack()
6613 static void smc_check_reset_syn(struct tcp_sock *tp) in smc_check_reset_syn() argument
6617 if (tp->syn_smc && !tp->rx_opt.smc_ok) in smc_check_reset_syn()
6618 tp->syn_smc = 0; in smc_check_reset_syn()
6625 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_spurious_syn() local
6632 syn_stamp = tp->retrans_stamp; in tcp_try_undo_spurious_syn()
6633 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp && in tcp_try_undo_spurious_syn()
6634 syn_stamp == tp->rx_opt.rcv_tsecr) in tcp_try_undo_spurious_syn()
6635 tp->undo_marker = 0; in tcp_try_undo_spurious_syn()
6642 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
6644 int saved_clamp = tp->rx_opt.mss_clamp; in tcp_rcv_synsent_state_process()
6648 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
6649 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_rcv_synsent_state_process()
6650 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_rcv_synsent_state_process()
6661 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
6662 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_synsent_state_process()
6671 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_rcv_synsent_state_process()
6672 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, in tcp_rcv_synsent_state_process()
6673 tcp_time_stamp_ts(tp))) { in tcp_rcv_synsent_state_process()
6713 if (tcp_ecn_mode_any(tp)) in tcp_rcv_synsent_state_process()
6717 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
6724 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6725 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6730 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6732 if (!tp->rx_opt.wscale_ok) { in tcp_rcv_synsent_state_process()
6733 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; in tcp_rcv_synsent_state_process()
6734 WRITE_ONCE(tp->window_clamp, in tcp_rcv_synsent_state_process()
6735 min(tp->window_clamp, 65535U)); in tcp_rcv_synsent_state_process()
6738 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6739 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6740 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6742 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_synsent_state_process()
6743 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6745 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6754 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6756 smc_check_reset_syn(tp); in tcp_rcv_synsent_state_process()
6762 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && in tcp_rcv_synsent_state_process()
6787 tcp_send_ack_reflect_ect(sk, tcp_ecn_mode_accecn(tp)); in tcp_rcv_synsent_state_process()
6804 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && in tcp_rcv_synsent_state_process()
6805 tcp_paws_reject(&tp->rx_opt, 0)) { in tcp_rcv_synsent_state_process()
6817 ao = rcu_dereference_protected(tp->ao_info, in tcp_rcv_synsent_state_process()
6826 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6827 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6828 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6829 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6832 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6835 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6836 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6837 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6842 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6843 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
6844 tp->max_window = tp->snd_wnd; in tcp_rcv_synsent_state_process()
6846 tcp_ecn_rcv_syn(tp, th, skb); in tcp_rcv_synsent_state_process()
6875 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6876 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6881 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6882 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6889 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synrecv_state_fastopen() local
6895 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out) in tcp_rcv_synrecv_state_fastopen()
6898 tcp_update_rto_time(tp); in tcp_rcv_synrecv_state_fastopen()
6913 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_synrecv_state_fastopen()
6938 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
6979 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6980 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6992 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6993 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6994 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_state_process()
7036 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ in tcp_rcv_state_process()
7037 if (!tp->srtt_us) in tcp_rcv_state_process()
7040 if (tp->rx_opt.tstamp_ok) in tcp_rcv_state_process()
7041 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_state_process()
7047 tp->retrans_stamp = 0; in tcp_rcv_state_process()
7050 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_state_process()
7064 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
7065 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; in tcp_rcv_state_process()
7066 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
7072 tp->lsndtime = tcp_jiffies32; in tcp_rcv_state_process()
7075 if (tcp_ecn_mode_accecn(tp)) in tcp_rcv_state_process()
7076 tcp_accecn_third_ack(sk, skb, tp->syn_ect_snt); in tcp_rcv_state_process()
7077 tcp_fast_path_on(tp); in tcp_rcv_state_process()
7089 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process()
7103 if (READ_ONCE(tp->linger2) < 0) { in tcp_rcv_state_process()
7109 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
7111 if (tp->syn_fastopen && th->fin) in tcp_rcv_state_process()
7137 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
7144 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
7160 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_rcv_state_process()
7177 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
7383 struct tcp_sock *tp = tcp_sk(sk); in tcp_get_syncookie_mss() local
7398 mss = tcp_parse_mss_option(th, READ_ONCE(tp->rx_opt.user_mss)); in tcp_get_syncookie_mss()
7412 const struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local
7463 tmp_opt.user_mss = READ_ONCE(tp->rx_opt.user_mss); in tcp_conn_request()