1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2659a8ad5SYuchung Cheng #include <linux/tcp.h> 3659a8ad5SYuchung Cheng #include <net/tcp.h> 4659a8ad5SYuchung Cheng 5d716bfdbSYuchung Cheng void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) 6db8da6bbSYuchung Cheng { 7db8da6bbSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 8db8da6bbSYuchung Cheng 9db8da6bbSYuchung Cheng tcp_skb_mark_lost_uncond_verify(tp, skb); 10db8da6bbSYuchung Cheng if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 11db8da6bbSYuchung Cheng /* Account for retransmits that are lost again */ 12db8da6bbSYuchung Cheng TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 13db8da6bbSYuchung Cheng tp->retrans_out -= tcp_skb_pcount(skb); 14ecde8f36SYuchung Cheng NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, 15ecde8f36SYuchung Cheng tcp_skb_pcount(skb)); 16db8da6bbSYuchung Cheng } 17db8da6bbSYuchung Cheng } 18db8da6bbSYuchung Cheng 199a568de4SEric Dumazet static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) 201d0833dfSYuchung Cheng { 219a568de4SEric Dumazet return t1 > t2 || (t1 == t2 && after(seq1, seq2)); 221d0833dfSYuchung Cheng } 231d0833dfSYuchung Cheng 241f7455c3Skbuild test robot static u32 tcp_rack_reo_wnd(const struct sock *sk) 2520b654dfSYuchung Cheng { 2620b654dfSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 2720b654dfSYuchung Cheng 287ec65372SWei Wang if (!tp->reord_seen) { 2920b654dfSYuchung Cheng /* If reordering has not been observed, be aggressive during 3020b654dfSYuchung Cheng * the recovery or starting the recovery by DUPACK threshold. 3120b654dfSYuchung Cheng */ 3220b654dfSYuchung Cheng if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) 3320b654dfSYuchung Cheng return 0; 3420b654dfSYuchung Cheng 3520b654dfSYuchung Cheng if (tp->sacked_out >= tp->reordering && 3620b654dfSYuchung Cheng !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH)) 3720b654dfSYuchung Cheng return 0; 3820b654dfSYuchung Cheng } 3920b654dfSYuchung Cheng 4020b654dfSYuchung Cheng /* To be more reordering resilient, allow min_rtt/4 settling delay. 4120b654dfSYuchung Cheng * Use min_rtt instead of the smoothed RTT because reordering is 4220b654dfSYuchung Cheng * often a path property and less related to queuing or delayed ACKs. 4320b654dfSYuchung Cheng * Upon receiving DSACKs, linearly increase the window up to the 4420b654dfSYuchung Cheng * smoothed RTT. 4520b654dfSYuchung Cheng */ 4620b654dfSYuchung Cheng return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, 4720b654dfSYuchung Cheng tp->srtt_us >> 3); 4820b654dfSYuchung Cheng } 4920b654dfSYuchung Cheng 50b8fef65aSYuchung Cheng s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) 51b8fef65aSYuchung Cheng { 52b8fef65aSYuchung Cheng return tp->rack.rtt_us + reo_wnd - 53*2fd66ffbSEric Dumazet tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); 54b8fef65aSYuchung Cheng } 55b8fef65aSYuchung Cheng 56a0370b3fSYuchung Cheng /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): 57a0370b3fSYuchung Cheng * 58a0370b3fSYuchung Cheng * Marks a packet lost, if some packet sent later has been (s)acked. 594f41b1c5SYuchung Cheng * The underlying idea is similar to the traditional dupthresh and FACK 604f41b1c5SYuchung Cheng * but they look at different metrics: 614f41b1c5SYuchung Cheng * 624f41b1c5SYuchung Cheng * dupthresh: 3 OOO packets delivered (packet count) 634f41b1c5SYuchung Cheng * FACK: sequence delta to highest sacked sequence (sequence space) 644f41b1c5SYuchung Cheng * RACK: sent time delta to the latest delivered packet (time domain) 654f41b1c5SYuchung Cheng * 664f41b1c5SYuchung Cheng * The advantage of RACK is it applies to both original and retransmitted 674f41b1c5SYuchung Cheng * packet and therefore is robust against tail losses. Another advantage 684f41b1c5SYuchung Cheng * is being more resilient to reordering by simply allowing some 694f41b1c5SYuchung Cheng * "settling delay", instead of tweaking the dupthresh. 704f41b1c5SYuchung Cheng * 71a0370b3fSYuchung Cheng * When tcp_rack_detect_loss() detects some packets are lost and we 72a0370b3fSYuchung Cheng * are not already in the CA_Recovery state, either tcp_rack_reo_timeout() 73a0370b3fSYuchung Cheng * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will 74a0370b3fSYuchung Cheng * make us enter the CA_Recovery state. 754f41b1c5SYuchung Cheng */ 767c1c7308SEric Dumazet static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) 774f41b1c5SYuchung Cheng { 784f41b1c5SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 79043b87d7SYuchung Cheng struct sk_buff *skb, *n; 80e636f8b0SYuchung Cheng u32 reo_wnd; 814f41b1c5SYuchung Cheng 8257dde7f7SYuchung Cheng *reo_timeout = 0; 8320b654dfSYuchung Cheng reo_wnd = tcp_rack_reo_wnd(sk); 84043b87d7SYuchung Cheng list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, 85043b87d7SYuchung Cheng tcp_tsorted_anchor) { 864f41b1c5SYuchung Cheng struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 87bef06223SYuchung Cheng s32 remaining; 8857dde7f7SYuchung Cheng 8957dde7f7SYuchung Cheng /* Skip ones marked lost but not yet retransmitted */ 9057dde7f7SYuchung Cheng if ((scb->sacked & TCPCB_LOST) && 9157dde7f7SYuchung Cheng !(scb->sacked & TCPCB_SACKED_RETRANS)) 9257dde7f7SYuchung Cheng continue; 9357dde7f7SYuchung Cheng 94*2fd66ffbSEric Dumazet if (!tcp_rack_sent_after(tp->rack.mstamp, 95*2fd66ffbSEric Dumazet tcp_skb_timestamp_us(skb), 96bef06223SYuchung Cheng tp->rack.end_seq, scb->end_seq)) 97bef06223SYuchung Cheng break; 98bef06223SYuchung Cheng 99bef06223SYuchung Cheng /* A packet is lost if it has not been s/acked beyond 100bef06223SYuchung Cheng * the recent RTT plus the reordering window. 101bef06223SYuchung Cheng */ 102b8fef65aSYuchung Cheng remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd); 103428aec5eSYuchung Cheng if (remaining <= 0) { 104d716bfdbSYuchung Cheng tcp_mark_skb_lost(sk, skb); 105bef06223SYuchung Cheng list_del_init(&skb->tcp_tsorted_anchor); 106bef06223SYuchung Cheng } else { 107428aec5eSYuchung Cheng /* Record maximum wait time */ 108428aec5eSYuchung Cheng *reo_timeout = max_t(u32, *reo_timeout, remaining); 1094f41b1c5SYuchung Cheng } 1104f41b1c5SYuchung Cheng } 111e636f8b0SYuchung Cheng } 112e636f8b0SYuchung Cheng 113128eda86SEric Dumazet void tcp_rack_mark_lost(struct sock *sk) 114e636f8b0SYuchung Cheng { 115e636f8b0SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 11657dde7f7SYuchung Cheng u32 timeout; 117e636f8b0SYuchung Cheng 118a0370b3fSYuchung Cheng if (!tp->rack.advanced) 119e636f8b0SYuchung Cheng return; 12057dde7f7SYuchung Cheng 121e636f8b0SYuchung Cheng /* Reset the advanced flag to avoid unnecessary queue scanning */ 122e636f8b0SYuchung Cheng tp->rack.advanced = 0; 1237c1c7308SEric Dumazet tcp_rack_detect_loss(sk, &timeout); 12457dde7f7SYuchung Cheng if (timeout) { 125bb4d991aSYuchung Cheng timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN; 12657dde7f7SYuchung Cheng inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, 12757dde7f7SYuchung Cheng timeout, inet_csk(sk)->icsk_rto); 12857dde7f7SYuchung Cheng } 1294f41b1c5SYuchung Cheng } 1304f41b1c5SYuchung Cheng 131deed7be7SYuchung Cheng /* Record the most recently (re)sent time among the (s)acked packets 132deed7be7SYuchung Cheng * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from 133deed7be7SYuchung Cheng * draft-cheng-tcpm-rack-00.txt 134deed7be7SYuchung Cheng */ 1351d0833dfSYuchung Cheng void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 1369a568de4SEric Dumazet u64 xmit_time) 137659a8ad5SYuchung Cheng { 138deed7be7SYuchung Cheng u32 rtt_us; 139deed7be7SYuchung Cheng 1409a568de4SEric Dumazet rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); 1416065fd0dSYuchung Cheng if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { 142659a8ad5SYuchung Cheng /* If the sacked packet was retransmitted, it's ambiguous 143659a8ad5SYuchung Cheng * whether the retransmission or the original (or the prior 144659a8ad5SYuchung Cheng * retransmission) was sacked. 145659a8ad5SYuchung Cheng * 146659a8ad5SYuchung Cheng * If the original is lost, there is no ambiguity. Otherwise 147659a8ad5SYuchung Cheng * we assume the original can be delayed up to aRTT + min_rtt. 148659a8ad5SYuchung Cheng * the aRTT term is bounded by the fast recovery or timeout, 149659a8ad5SYuchung Cheng * so it's at least one RTT (i.e., retransmission is at least 150659a8ad5SYuchung Cheng * an RTT later). 151659a8ad5SYuchung Cheng */ 152659a8ad5SYuchung Cheng return; 153659a8ad5SYuchung Cheng } 1546065fd0dSYuchung Cheng tp->rack.advanced = 1; 155deed7be7SYuchung Cheng tp->rack.rtt_us = rtt_us; 1566065fd0dSYuchung Cheng if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp, 1576065fd0dSYuchung Cheng end_seq, tp->rack.end_seq)) { 1589a568de4SEric Dumazet tp->rack.mstamp = xmit_time; 1591d0833dfSYuchung Cheng tp->rack.end_seq = end_seq; 1606065fd0dSYuchung Cheng } 161659a8ad5SYuchung Cheng } 16257dde7f7SYuchung Cheng 16357dde7f7SYuchung Cheng /* We have waited long enough to accommodate reordering. Mark the expired 16457dde7f7SYuchung Cheng * packets lost and retransmit them. 16557dde7f7SYuchung Cheng */ 16657dde7f7SYuchung Cheng void tcp_rack_reo_timeout(struct sock *sk) 16757dde7f7SYuchung Cheng { 16857dde7f7SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 16957dde7f7SYuchung Cheng u32 timeout, prior_inflight; 17057dde7f7SYuchung Cheng 17157dde7f7SYuchung Cheng prior_inflight = tcp_packets_in_flight(tp); 1727c1c7308SEric Dumazet tcp_rack_detect_loss(sk, &timeout); 17357dde7f7SYuchung Cheng if (prior_inflight != tcp_packets_in_flight(tp)) { 17457dde7f7SYuchung Cheng if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { 17557dde7f7SYuchung Cheng tcp_enter_recovery(sk, false); 17657dde7f7SYuchung Cheng if (!inet_csk(sk)->icsk_ca_ops->cong_control) 17757dde7f7SYuchung Cheng tcp_cwnd_reduction(sk, 1, 0); 17857dde7f7SYuchung Cheng } 17957dde7f7SYuchung Cheng tcp_xmit_retransmit_queue(sk); 18057dde7f7SYuchung Cheng } 18157dde7f7SYuchung Cheng if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) 18257dde7f7SYuchung Cheng tcp_rearm_rto(sk); 18357dde7f7SYuchung Cheng } 1841f255691SPriyaranjan Jha 1851f255691SPriyaranjan Jha /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries. 1861f255691SPriyaranjan Jha * 1871f255691SPriyaranjan Jha * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded 1881f255691SPriyaranjan Jha * by srtt), since there is possibility that spurious retransmission was 1891f255691SPriyaranjan Jha * due to reordering delay longer than reo_wnd. 1901f255691SPriyaranjan Jha * 1911f255691SPriyaranjan Jha * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16) 1921f255691SPriyaranjan Jha * no. of successful recoveries (accounts for full DSACK-based loss 1931f255691SPriyaranjan Jha * recovery undo). After that, reset it to default (min_rtt/4). 1941f255691SPriyaranjan Jha * 1951f255691SPriyaranjan Jha * At max, reo_wnd is incremented only once per rtt. So that the new 1961f255691SPriyaranjan Jha * DSACK on which we are reacting, is due to the spurious retx (approx) 1971f255691SPriyaranjan Jha * after the reo_wnd has been updated last time. 1981f255691SPriyaranjan Jha * 1991f255691SPriyaranjan Jha * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than 2001f255691SPriyaranjan Jha * absolute value to account for change in rtt. 2011f255691SPriyaranjan Jha */ 2021f255691SPriyaranjan Jha void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) 2031f255691SPriyaranjan Jha { 2041f255691SPriyaranjan Jha struct tcp_sock *tp = tcp_sk(sk); 2051f255691SPriyaranjan Jha 2061f255691SPriyaranjan Jha if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || 2071f255691SPriyaranjan Jha !rs->prior_delivered) 2081f255691SPriyaranjan Jha return; 2091f255691SPriyaranjan Jha 2101f255691SPriyaranjan Jha /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */ 2111f255691SPriyaranjan Jha if (before(rs->prior_delivered, tp->rack.last_delivered)) 2121f255691SPriyaranjan Jha tp->rack.dsack_seen = 0; 2131f255691SPriyaranjan Jha 2141f255691SPriyaranjan Jha /* Adjust the reo_wnd if update is pending */ 2151f255691SPriyaranjan Jha if (tp->rack.dsack_seen) { 2161f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = min_t(u32, 0xFF, 2171f255691SPriyaranjan Jha tp->rack.reo_wnd_steps + 1); 2181f255691SPriyaranjan Jha tp->rack.dsack_seen = 0; 2191f255691SPriyaranjan Jha tp->rack.last_delivered = tp->delivered; 2201f255691SPriyaranjan Jha tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; 2211f255691SPriyaranjan Jha } else if (!tp->rack.reo_wnd_persist) { 2221f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1; 2231f255691SPriyaranjan Jha } 2241f255691SPriyaranjan Jha } 2256ac06ecdSYuchung Cheng 2266ac06ecdSYuchung Cheng /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits 2276ac06ecdSYuchung Cheng * the next unacked packet upon receiving 2286ac06ecdSYuchung Cheng * a) three or more DUPACKs to start the fast recovery 2296ac06ecdSYuchung Cheng * b) an ACK acknowledging new data during the fast recovery. 2306ac06ecdSYuchung Cheng */ 2316ac06ecdSYuchung Cheng void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced) 2326ac06ecdSYuchung Cheng { 2336ac06ecdSYuchung Cheng const u8 state = inet_csk(sk)->icsk_ca_state; 2346ac06ecdSYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 2356ac06ecdSYuchung Cheng 2366ac06ecdSYuchung Cheng if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || 2376ac06ecdSYuchung Cheng (state == TCP_CA_Recovery && snd_una_advanced)) { 2386ac06ecdSYuchung Cheng struct sk_buff *skb = tcp_rtx_queue_head(sk); 2396ac06ecdSYuchung Cheng u32 mss; 2406ac06ecdSYuchung Cheng 2416ac06ecdSYuchung Cheng if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 2426ac06ecdSYuchung Cheng return; 2436ac06ecdSYuchung Cheng 2446ac06ecdSYuchung Cheng mss = tcp_skb_mss(skb); 2456ac06ecdSYuchung Cheng if (tcp_skb_pcount(skb) > 1 && skb->len > mss) 2466ac06ecdSYuchung Cheng tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 2476ac06ecdSYuchung Cheng mss, mss, GFP_ATOMIC); 2486ac06ecdSYuchung Cheng 2496ac06ecdSYuchung Cheng tcp_skb_mark_lost_uncond_verify(tp, skb); 2506ac06ecdSYuchung Cheng } 2516ac06ecdSYuchung Cheng } 252