1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2659a8ad5SYuchung Cheng #include <linux/tcp.h>
3659a8ad5SYuchung Cheng #include <net/tcp.h>
4659a8ad5SYuchung Cheng
tcp_rack_reo_wnd(const struct sock * sk)51f7455c3Skbuild test robot static u32 tcp_rack_reo_wnd(const struct sock *sk)
620b654dfSYuchung Cheng {
7e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk);
820b654dfSYuchung Cheng
97ec65372SWei Wang if (!tp->reord_seen) {
1020b654dfSYuchung Cheng /* If reordering has not been observed, be aggressive during
1120b654dfSYuchung Cheng * the recovery or starting the recovery by DUPACK threshold.
1220b654dfSYuchung Cheng */
1320b654dfSYuchung Cheng if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
1420b654dfSYuchung Cheng return 0;
1520b654dfSYuchung Cheng
1620b654dfSYuchung Cheng if (tp->sacked_out >= tp->reordering &&
17e7d2ef83SKuniyuki Iwashima !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
18e7d2ef83SKuniyuki Iwashima TCP_RACK_NO_DUPTHRESH))
1920b654dfSYuchung Cheng return 0;
2020b654dfSYuchung Cheng }
2120b654dfSYuchung Cheng
2220b654dfSYuchung Cheng /* To be more reordering resilient, allow min_rtt/4 settling delay.
2320b654dfSYuchung Cheng * Use min_rtt instead of the smoothed RTT because reordering is
2420b654dfSYuchung Cheng * often a path property and less related to queuing or delayed ACKs.
2520b654dfSYuchung Cheng * Upon receiving DSACKs, linearly increase the window up to the
2620b654dfSYuchung Cheng * smoothed RTT.
2720b654dfSYuchung Cheng */
2820b654dfSYuchung Cheng return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
2920b654dfSYuchung Cheng tp->srtt_us >> 3);
3020b654dfSYuchung Cheng }
3120b654dfSYuchung Cheng
tcp_rack_skb_timeout(struct tcp_sock * tp,struct sk_buff * skb,u32 reo_wnd)32b8fef65aSYuchung Cheng s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
33b8fef65aSYuchung Cheng {
34b8fef65aSYuchung Cheng return tp->rack.rtt_us + reo_wnd -
352fd66ffbSEric Dumazet tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
36b8fef65aSYuchung Cheng }
37b8fef65aSYuchung Cheng
38a0370b3fSYuchung Cheng /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
39a0370b3fSYuchung Cheng *
40a0370b3fSYuchung Cheng * Marks a packet lost, if some packet sent later has been (s)acked.
414f41b1c5SYuchung Cheng * The underlying idea is similar to the traditional dupthresh and FACK
424f41b1c5SYuchung Cheng * but they look at different metrics:
434f41b1c5SYuchung Cheng *
444f41b1c5SYuchung Cheng * dupthresh: 3 OOO packets delivered (packet count)
454f41b1c5SYuchung Cheng * FACK: sequence delta to highest sacked sequence (sequence space)
464f41b1c5SYuchung Cheng * RACK: sent time delta to the latest delivered packet (time domain)
474f41b1c5SYuchung Cheng *
484f41b1c5SYuchung Cheng * The advantage of RACK is it applies to both original and retransmitted
494f41b1c5SYuchung Cheng * packet and therefore is robust against tail losses. Another advantage
504f41b1c5SYuchung Cheng * is being more resilient to reordering by simply allowing some
514f41b1c5SYuchung Cheng * "settling delay", instead of tweaking the dupthresh.
524f41b1c5SYuchung Cheng *
53a0370b3fSYuchung Cheng * When tcp_rack_detect_loss() detects some packets are lost and we
54a0370b3fSYuchung Cheng * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
55a0370b3fSYuchung Cheng * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
56a0370b3fSYuchung Cheng * make us enter the CA_Recovery state.
574f41b1c5SYuchung Cheng */
tcp_rack_detect_loss(struct sock * sk,u32 * reo_timeout)587c1c7308SEric Dumazet static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
594f41b1c5SYuchung Cheng {
604f41b1c5SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk);
61043b87d7SYuchung Cheng struct sk_buff *skb, *n;
62e636f8b0SYuchung Cheng u32 reo_wnd;
634f41b1c5SYuchung Cheng
6457dde7f7SYuchung Cheng *reo_timeout = 0;
6520b654dfSYuchung Cheng reo_wnd = tcp_rack_reo_wnd(sk);
66043b87d7SYuchung Cheng list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
67043b87d7SYuchung Cheng tcp_tsorted_anchor) {
684f41b1c5SYuchung Cheng struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
69bef06223SYuchung Cheng s32 remaining;
7057dde7f7SYuchung Cheng
7157dde7f7SYuchung Cheng /* Skip ones marked lost but not yet retransmitted */
7257dde7f7SYuchung Cheng if ((scb->sacked & TCPCB_LOST) &&
7357dde7f7SYuchung Cheng !(scb->sacked & TCPCB_SACKED_RETRANS))
7457dde7f7SYuchung Cheng continue;
7557dde7f7SYuchung Cheng
765a8ad1ceSPengcheng Yang if (!tcp_skb_sent_after(tp->rack.mstamp,
772fd66ffbSEric Dumazet tcp_skb_timestamp_us(skb),
78bef06223SYuchung Cheng tp->rack.end_seq, scb->end_seq))
79bef06223SYuchung Cheng break;
80bef06223SYuchung Cheng
81bef06223SYuchung Cheng /* A packet is lost if it has not been s/acked beyond
82bef06223SYuchung Cheng * the recent RTT plus the reordering window.
83bef06223SYuchung Cheng */
84b8fef65aSYuchung Cheng remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
85428aec5eSYuchung Cheng if (remaining <= 0) {
86d716bfdbSYuchung Cheng tcp_mark_skb_lost(sk, skb);
87bef06223SYuchung Cheng list_del_init(&skb->tcp_tsorted_anchor);
88bef06223SYuchung Cheng } else {
89428aec5eSYuchung Cheng /* Record maximum wait time */
90428aec5eSYuchung Cheng *reo_timeout = max_t(u32, *reo_timeout, remaining);
914f41b1c5SYuchung Cheng }
924f41b1c5SYuchung Cheng }
93e636f8b0SYuchung Cheng }
94e636f8b0SYuchung Cheng
tcp_rack_mark_lost(struct sock * sk)9562d9f1a6SPengcheng Yang bool tcp_rack_mark_lost(struct sock *sk)
96e636f8b0SYuchung Cheng {
97e636f8b0SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk);
9857dde7f7SYuchung Cheng u32 timeout;
99e636f8b0SYuchung Cheng
100a0370b3fSYuchung Cheng if (!tp->rack.advanced)
10162d9f1a6SPengcheng Yang return false;
10257dde7f7SYuchung Cheng
103e636f8b0SYuchung Cheng /* Reset the advanced flag to avoid unnecessary queue scanning */
104e636f8b0SYuchung Cheng tp->rack.advanced = 0;
1057c1c7308SEric Dumazet tcp_rack_detect_loss(sk, &timeout);
10657dde7f7SYuchung Cheng if (timeout) {
107*1c2709cfSNeal Cardwell timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
10857dde7f7SYuchung Cheng inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
10957dde7f7SYuchung Cheng timeout, inet_csk(sk)->icsk_rto);
11057dde7f7SYuchung Cheng }
11162d9f1a6SPengcheng Yang return !!timeout;
1124f41b1c5SYuchung Cheng }
1134f41b1c5SYuchung Cheng
114deed7be7SYuchung Cheng /* Record the most recently (re)sent time among the (s)acked packets
115deed7be7SYuchung Cheng * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
116deed7be7SYuchung Cheng * draft-cheng-tcpm-rack-00.txt
117deed7be7SYuchung Cheng */
tcp_rack_advance(struct tcp_sock * tp,u8 sacked,u32 end_seq,u64 xmit_time)1181d0833dfSYuchung Cheng void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1199a568de4SEric Dumazet u64 xmit_time)
120659a8ad5SYuchung Cheng {
121deed7be7SYuchung Cheng u32 rtt_us;
122deed7be7SYuchung Cheng
1239a568de4SEric Dumazet rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
1246065fd0dSYuchung Cheng if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
125659a8ad5SYuchung Cheng /* If the sacked packet was retransmitted, it's ambiguous
126659a8ad5SYuchung Cheng * whether the retransmission or the original (or the prior
127659a8ad5SYuchung Cheng * retransmission) was sacked.
128659a8ad5SYuchung Cheng *
129659a8ad5SYuchung Cheng * If the original is lost, there is no ambiguity. Otherwise
130659a8ad5SYuchung Cheng * we assume the original can be delayed up to aRTT + min_rtt.
131659a8ad5SYuchung Cheng * the aRTT term is bounded by the fast recovery or timeout,
132659a8ad5SYuchung Cheng * so it's at least one RTT (i.e., retransmission is at least
133659a8ad5SYuchung Cheng * an RTT later).
134659a8ad5SYuchung Cheng */
135659a8ad5SYuchung Cheng return;
136659a8ad5SYuchung Cheng }
1376065fd0dSYuchung Cheng tp->rack.advanced = 1;
138deed7be7SYuchung Cheng tp->rack.rtt_us = rtt_us;
1395a8ad1ceSPengcheng Yang if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
1406065fd0dSYuchung Cheng end_seq, tp->rack.end_seq)) {
1419a568de4SEric Dumazet tp->rack.mstamp = xmit_time;
1421d0833dfSYuchung Cheng tp->rack.end_seq = end_seq;
1436065fd0dSYuchung Cheng }
144659a8ad5SYuchung Cheng }
14557dde7f7SYuchung Cheng
14657dde7f7SYuchung Cheng /* We have waited long enough to accommodate reordering. Mark the expired
14757dde7f7SYuchung Cheng * packets lost and retransmit them.
14857dde7f7SYuchung Cheng */
tcp_rack_reo_timeout(struct sock * sk)14957dde7f7SYuchung Cheng void tcp_rack_reo_timeout(struct sock *sk)
15057dde7f7SYuchung Cheng {
15157dde7f7SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk);
15257dde7f7SYuchung Cheng u32 timeout, prior_inflight;
1537e901ee7SYuchung Cheng u32 lost = tp->lost;
15457dde7f7SYuchung Cheng
15557dde7f7SYuchung Cheng prior_inflight = tcp_packets_in_flight(tp);
1567c1c7308SEric Dumazet tcp_rack_detect_loss(sk, &timeout);
15757dde7f7SYuchung Cheng if (prior_inflight != tcp_packets_in_flight(tp)) {
15857dde7f7SYuchung Cheng if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
15957dde7f7SYuchung Cheng tcp_enter_recovery(sk, false);
16057dde7f7SYuchung Cheng if (!inet_csk(sk)->icsk_ca_ops->cong_control)
1617e901ee7SYuchung Cheng tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
16257dde7f7SYuchung Cheng }
16357dde7f7SYuchung Cheng tcp_xmit_retransmit_queue(sk);
16457dde7f7SYuchung Cheng }
16557dde7f7SYuchung Cheng if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
16657dde7f7SYuchung Cheng tcp_rearm_rto(sk);
16757dde7f7SYuchung Cheng }
1681f255691SPriyaranjan Jha
1691f255691SPriyaranjan Jha /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
1701f255691SPriyaranjan Jha *
171a657db03SNeal Cardwell * If a DSACK is received that seems like it may have been due to reordering
172a657db03SNeal Cardwell * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded
1731f255691SPriyaranjan Jha * by srtt), since there is possibility that spurious retransmission was
1741f255691SPriyaranjan Jha * due to reordering delay longer than reo_wnd.
1751f255691SPriyaranjan Jha *
1761f255691SPriyaranjan Jha * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
1771f255691SPriyaranjan Jha * no. of successful recoveries (accounts for full DSACK-based loss
1781f255691SPriyaranjan Jha * recovery undo). After that, reset it to default (min_rtt/4).
1791f255691SPriyaranjan Jha *
1801f255691SPriyaranjan Jha * At max, reo_wnd is incremented only once per rtt. So that the new
1811f255691SPriyaranjan Jha * DSACK on which we are reacting, is due to the spurious retx (approx)
1821f255691SPriyaranjan Jha * after the reo_wnd has been updated last time.
1831f255691SPriyaranjan Jha *
1841f255691SPriyaranjan Jha * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
1851f255691SPriyaranjan Jha * absolute value to account for change in rtt.
1861f255691SPriyaranjan Jha */
tcp_rack_update_reo_wnd(struct sock * sk,struct rate_sample * rs)1871f255691SPriyaranjan Jha void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
1881f255691SPriyaranjan Jha {
1891f255691SPriyaranjan Jha struct tcp_sock *tp = tcp_sk(sk);
1901f255691SPriyaranjan Jha
191e7d2ef83SKuniyuki Iwashima if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
192e7d2ef83SKuniyuki Iwashima TCP_RACK_STATIC_REO_WND) ||
1931f255691SPriyaranjan Jha !rs->prior_delivered)
1941f255691SPriyaranjan Jha return;
1951f255691SPriyaranjan Jha
1961f255691SPriyaranjan Jha /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
1971f255691SPriyaranjan Jha if (before(rs->prior_delivered, tp->rack.last_delivered))
1981f255691SPriyaranjan Jha tp->rack.dsack_seen = 0;
1991f255691SPriyaranjan Jha
2001f255691SPriyaranjan Jha /* Adjust the reo_wnd if update is pending */
2011f255691SPriyaranjan Jha if (tp->rack.dsack_seen) {
2021f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
2031f255691SPriyaranjan Jha tp->rack.reo_wnd_steps + 1);
2041f255691SPriyaranjan Jha tp->rack.dsack_seen = 0;
2051f255691SPriyaranjan Jha tp->rack.last_delivered = tp->delivered;
2061f255691SPriyaranjan Jha tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
2071f255691SPriyaranjan Jha } else if (!tp->rack.reo_wnd_persist) {
2081f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1;
2091f255691SPriyaranjan Jha }
2101f255691SPriyaranjan Jha }
2116ac06ecdSYuchung Cheng
2126ac06ecdSYuchung Cheng /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
2136ac06ecdSYuchung Cheng * the next unacked packet upon receiving
2146ac06ecdSYuchung Cheng * a) three or more DUPACKs to start the fast recovery
2156ac06ecdSYuchung Cheng * b) an ACK acknowledging new data during the fast recovery.
2166ac06ecdSYuchung Cheng */
tcp_newreno_mark_lost(struct sock * sk,bool snd_una_advanced)2176ac06ecdSYuchung Cheng void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
2186ac06ecdSYuchung Cheng {
2196ac06ecdSYuchung Cheng const u8 state = inet_csk(sk)->icsk_ca_state;
2206ac06ecdSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk);
2216ac06ecdSYuchung Cheng
2226ac06ecdSYuchung Cheng if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
2236ac06ecdSYuchung Cheng (state == TCP_CA_Recovery && snd_una_advanced)) {
2246ac06ecdSYuchung Cheng struct sk_buff *skb = tcp_rtx_queue_head(sk);
2256ac06ecdSYuchung Cheng u32 mss;
2266ac06ecdSYuchung Cheng
2276ac06ecdSYuchung Cheng if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
2286ac06ecdSYuchung Cheng return;
2296ac06ecdSYuchung Cheng
2306ac06ecdSYuchung Cheng mss = tcp_skb_mss(skb);
2316ac06ecdSYuchung Cheng if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
2326ac06ecdSYuchung Cheng tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2336ac06ecdSYuchung Cheng mss, mss, GFP_ATOMIC);
2346ac06ecdSYuchung Cheng
235179ac35fSYuchung Cheng tcp_mark_skb_lost(sk, skb);
2366ac06ecdSYuchung Cheng }
2376ac06ecdSYuchung Cheng }
238