xref: /linux/net/ipv4/tcp_recovery.c (revision e636f8b0104d6622aaaed6aa5ef17dfbf165bc51)
1659a8ad5SYuchung Cheng #include <linux/tcp.h>
2659a8ad5SYuchung Cheng #include <net/tcp.h>
3659a8ad5SYuchung Cheng 
44f41b1c5SYuchung Cheng int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
54f41b1c5SYuchung Cheng 
6db8da6bbSYuchung Cheng static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
7db8da6bbSYuchung Cheng {
8db8da6bbSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
9db8da6bbSYuchung Cheng 
10db8da6bbSYuchung Cheng 	tcp_skb_mark_lost_uncond_verify(tp, skb);
11db8da6bbSYuchung Cheng 	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12db8da6bbSYuchung Cheng 		/* Account for retransmits that are lost again */
13db8da6bbSYuchung Cheng 		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14db8da6bbSYuchung Cheng 		tp->retrans_out -= tcp_skb_pcount(skb);
15db8da6bbSYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
16db8da6bbSYuchung Cheng 	}
17db8da6bbSYuchung Cheng }
18db8da6bbSYuchung Cheng 
194f41b1c5SYuchung Cheng /* Marks a packet lost, if some packet sent later has been (s)acked.
204f41b1c5SYuchung Cheng  * The underlying idea is similar to the traditional dupthresh and FACK
214f41b1c5SYuchung Cheng  * but they look at different metrics:
224f41b1c5SYuchung Cheng  *
234f41b1c5SYuchung Cheng  * dupthresh: 3 OOO packets delivered (packet count)
244f41b1c5SYuchung Cheng  * FACK: sequence delta to highest sacked sequence (sequence space)
254f41b1c5SYuchung Cheng  * RACK: sent time delta to the latest delivered packet (time domain)
264f41b1c5SYuchung Cheng  *
274f41b1c5SYuchung Cheng  * The advantage of RACK is it applies to both original and retransmitted
284f41b1c5SYuchung Cheng  * packet and therefore is robust against tail losses. Another advantage
294f41b1c5SYuchung Cheng  * is being more resilient to reordering by simply allowing some
304f41b1c5SYuchung Cheng  * "settling delay", instead of tweaking the dupthresh.
314f41b1c5SYuchung Cheng  *
324f41b1c5SYuchung Cheng  * The current version is only used after recovery starts but can be
334f41b1c5SYuchung Cheng  * easily extended to detect the first loss.
344f41b1c5SYuchung Cheng  */
35*e636f8b0SYuchung Cheng static void tcp_rack_detect_loss(struct sock *sk)
364f41b1c5SYuchung Cheng {
374f41b1c5SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
384f41b1c5SYuchung Cheng 	struct sk_buff *skb;
39*e636f8b0SYuchung Cheng 	u32 reo_wnd;
404f41b1c5SYuchung Cheng 
414f41b1c5SYuchung Cheng 	/* To be more reordering resilient, allow min_rtt/4 settling delay
424f41b1c5SYuchung Cheng 	 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
434f41b1c5SYuchung Cheng 	 * RTT because reordering is often a path property and less related
444f41b1c5SYuchung Cheng 	 * to queuing or delayed ACKs.
454f41b1c5SYuchung Cheng 	 *
464f41b1c5SYuchung Cheng 	 * TODO: measure and adapt to the observed reordering delay, and
474f41b1c5SYuchung Cheng 	 * use a timer to retransmit like the delayed early retransmit.
484f41b1c5SYuchung Cheng 	 */
494f41b1c5SYuchung Cheng 	reo_wnd = 1000;
504f41b1c5SYuchung Cheng 	if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
514f41b1c5SYuchung Cheng 		reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
524f41b1c5SYuchung Cheng 
534f41b1c5SYuchung Cheng 	tcp_for_write_queue(skb, sk) {
544f41b1c5SYuchung Cheng 		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
554f41b1c5SYuchung Cheng 
564f41b1c5SYuchung Cheng 		if (skb == tcp_send_head(sk))
574f41b1c5SYuchung Cheng 			break;
584f41b1c5SYuchung Cheng 
594f41b1c5SYuchung Cheng 		/* Skip ones already (s)acked */
604f41b1c5SYuchung Cheng 		if (!after(scb->end_seq, tp->snd_una) ||
614f41b1c5SYuchung Cheng 		    scb->sacked & TCPCB_SACKED_ACKED)
624f41b1c5SYuchung Cheng 			continue;
634f41b1c5SYuchung Cheng 
644f41b1c5SYuchung Cheng 		if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
654f41b1c5SYuchung Cheng 
664f41b1c5SYuchung Cheng 			if (skb_mstamp_us_delta(&tp->rack.mstamp,
674f41b1c5SYuchung Cheng 						&skb->skb_mstamp) <= reo_wnd)
684f41b1c5SYuchung Cheng 				continue;
694f41b1c5SYuchung Cheng 
704f41b1c5SYuchung Cheng 			/* skb is lost if packet sent later is sacked */
71db8da6bbSYuchung Cheng 			tcp_rack_mark_skb_lost(sk, skb);
724f41b1c5SYuchung Cheng 		} else if (!(scb->sacked & TCPCB_RETRANS)) {
734f41b1c5SYuchung Cheng 			/* Original data are sent sequentially so stop early
744f41b1c5SYuchung Cheng 			 * b/c the rest are all sent after rack_sent
754f41b1c5SYuchung Cheng 			 */
764f41b1c5SYuchung Cheng 			break;
774f41b1c5SYuchung Cheng 		}
784f41b1c5SYuchung Cheng 	}
79*e636f8b0SYuchung Cheng }
80*e636f8b0SYuchung Cheng 
81*e636f8b0SYuchung Cheng void tcp_rack_mark_lost(struct sock *sk)
82*e636f8b0SYuchung Cheng {
83*e636f8b0SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
84*e636f8b0SYuchung Cheng 
85*e636f8b0SYuchung Cheng 	if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
86*e636f8b0SYuchung Cheng 		return;
87*e636f8b0SYuchung Cheng 	/* Reset the advanced flag to avoid unnecessary queue scanning */
88*e636f8b0SYuchung Cheng 	tp->rack.advanced = 0;
89*e636f8b0SYuchung Cheng 	tcp_rack_detect_loss(sk);
904f41b1c5SYuchung Cheng }
914f41b1c5SYuchung Cheng 
92659a8ad5SYuchung Cheng /* Record the most recently (re)sent time among the (s)acked packets */
93659a8ad5SYuchung Cheng void tcp_rack_advance(struct tcp_sock *tp,
94659a8ad5SYuchung Cheng 		      const struct skb_mstamp *xmit_time, u8 sacked)
95659a8ad5SYuchung Cheng {
96659a8ad5SYuchung Cheng 	if (tp->rack.mstamp.v64 &&
97659a8ad5SYuchung Cheng 	    !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
98659a8ad5SYuchung Cheng 		return;
99659a8ad5SYuchung Cheng 
100659a8ad5SYuchung Cheng 	if (sacked & TCPCB_RETRANS) {
101659a8ad5SYuchung Cheng 		struct skb_mstamp now;
102659a8ad5SYuchung Cheng 
103659a8ad5SYuchung Cheng 		/* If the sacked packet was retransmitted, it's ambiguous
104659a8ad5SYuchung Cheng 		 * whether the retransmission or the original (or the prior
105659a8ad5SYuchung Cheng 		 * retransmission) was sacked.
106659a8ad5SYuchung Cheng 		 *
107659a8ad5SYuchung Cheng 		 * If the original is lost, there is no ambiguity. Otherwise
108659a8ad5SYuchung Cheng 		 * we assume the original can be delayed up to aRTT + min_rtt.
109659a8ad5SYuchung Cheng 		 * the aRTT term is bounded by the fast recovery or timeout,
110659a8ad5SYuchung Cheng 		 * so it's at least one RTT (i.e., retransmission is at least
111659a8ad5SYuchung Cheng 		 * an RTT later).
112659a8ad5SYuchung Cheng 		 */
113659a8ad5SYuchung Cheng 		skb_mstamp_get(&now);
114659a8ad5SYuchung Cheng 		if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
115659a8ad5SYuchung Cheng 			return;
116659a8ad5SYuchung Cheng 	}
117659a8ad5SYuchung Cheng 
118659a8ad5SYuchung Cheng 	tp->rack.mstamp = *xmit_time;
119659a8ad5SYuchung Cheng 	tp->rack.advanced = 1;
120659a8ad5SYuchung Cheng }
121