xref: /linux/net/ipv4/tcp_timer.c (revision a6a6a98094116b60e5523a571d9443c53325f5b1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Implementation of the Transmission Control Protocol(TCP).
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche, <flla@stud.uni-sb.de>
14  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17  *		Matthew Dillon, <dillon@apollo.west.oic.com>
18  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19  *		Jorge Cwik, <jorge@laser.satlink.net>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
25 #include <net/rstreason.h>
26 
27 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
28 {
29 	const struct inet_connection_sock *icsk = inet_csk(sk);
30 	const struct tcp_sock *tp = tcp_sk(sk);
31 	u32 elapsed, user_timeout;
32 	s32 remaining;
33 
34 	user_timeout = READ_ONCE(icsk->icsk_user_timeout);
35 	if (!user_timeout)
36 		return icsk->icsk_rto;
37 
38 	elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp;
39 	if (tp->tcp_usec_ts)
40 		elapsed /= USEC_PER_MSEC;
41 
42 	remaining = user_timeout - elapsed;
43 	if (remaining <= 0)
44 		return 1; /* user timeout has passed; fire ASAP */
45 
46 	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
47 }
48 
49 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
50 {
51 	const struct inet_connection_sock *icsk = inet_csk(sk);
52 	u32 remaining, user_timeout;
53 	s32 elapsed;
54 
55 	user_timeout = READ_ONCE(icsk->icsk_user_timeout);
56 	if (!user_timeout || !icsk->icsk_probes_tstamp)
57 		return when;
58 
59 	elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
60 	if (unlikely(elapsed < 0))
61 		elapsed = 0;
62 	remaining = msecs_to_jiffies(user_timeout) - elapsed;
63 	remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
64 
65 	return min_t(u32, remaining, when);
66 }
67 
68 /**
69  *  tcp_write_err() - close socket and save error info
70  *  @sk:  The socket the error has appeared on.
71  *
72  *  Returns: Nothing (void)
73  */
74 
75 static void tcp_write_err(struct sock *sk)
76 {
77 	tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
78 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
79 }
80 
81 /**
82  *  tcp_out_of_resources() - Close socket if out of resources
83  *  @sk:        pointer to current socket
84  *  @do_reset:  send a last packet with reset flag
85  *
86  *  Do not allow orphaned sockets to eat all our resources.
87  *  This is direct violation of TCP specs, but it is required
88  *  to prevent DoS attacks. It is called when a retransmission timeout
89  *  or zero probe timeout occurs on orphaned socket.
90  *
91  *  Also close if our net namespace is exiting; in that case there is no
92  *  hope of ever communicating again since all netns interfaces are already
93  *  down (or about to be down), and we need to release our dst references,
94  *  which have been moved to the netns loopback interface, so the namespace
95  *  can finish exiting.  This condition is only possible if we are a kernel
96  *  socket, as those do not hold references to the namespace.
97  *
98  *  Criteria is still not confirmed experimentally and may change.
99  *  We kill the socket, if:
100  *  1. If number of orphaned sockets exceeds an administratively configured
101  *     limit.
102  *  2. If we have strong memory pressure.
103  *  3. If our net namespace is exiting.
104  */
105 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
106 {
107 	struct tcp_sock *tp = tcp_sk(sk);
108 	int shift = 0;
109 
110 	/* If peer does not open window for long time, or did not transmit
111 	 * anything for long time, penalize it. */
112 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
113 		shift++;
114 
115 	/* If some dubious ICMP arrived, penalize even more. */
116 	if (READ_ONCE(sk->sk_err_soft))
117 		shift++;
118 
119 	if (tcp_check_oom(sk, shift)) {
120 		/* Catch exceptional cases, when connection requires reset.
121 		 *      1. Last segment was sent recently. */
122 		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
123 		    /*  2. Window is closed. */
124 		    (!tp->snd_wnd && !tp->packets_out))
125 			do_reset = true;
126 		if (do_reset)
127 			tcp_send_active_reset(sk, GFP_ATOMIC,
128 					      SK_RST_REASON_NOT_SPECIFIED);
129 		tcp_done(sk);
130 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
131 		return 1;
132 	}
133 
134 	if (!check_net(sock_net(sk))) {
135 		/* Not possible to send reset; just close */
136 		tcp_done(sk);
137 		return 1;
138 	}
139 
140 	return 0;
141 }
142 
143 /**
144  *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
145  *  @sk:    Pointer to the current socket.
146  *  @alive: bool, socket alive state
147  */
148 static int tcp_orphan_retries(struct sock *sk, bool alive)
149 {
150 	int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
151 
152 	/* We know from an ICMP that something is wrong. */
153 	if (READ_ONCE(sk->sk_err_soft) && !alive)
154 		retries = 0;
155 
156 	/* However, if socket sent something recently, select some safe
157 	 * number of retries. 8 corresponds to >100 seconds with minimal
158 	 * RTO of 200msec. */
159 	if (retries == 0 && alive)
160 		retries = 8;
161 	return retries;
162 }
163 
164 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
165 {
166 	const struct net *net = sock_net(sk);
167 	int mss;
168 
169 	/* Black hole detection */
170 	if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
171 		return;
172 
173 	if (!icsk->icsk_mtup.enabled) {
174 		icsk->icsk_mtup.enabled = 1;
175 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
176 	} else {
177 		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
178 		mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
179 		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
180 		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
181 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
182 	}
183 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
184 }
185 
186 static unsigned int tcp_model_timeout(struct sock *sk,
187 				      unsigned int boundary,
188 				      unsigned int rto_base)
189 {
190 	unsigned int linear_backoff_thresh, timeout;
191 
192 	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
193 	if (boundary <= linear_backoff_thresh)
194 		timeout = ((2 << boundary) - 1) * rto_base;
195 	else
196 		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
197 			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
198 	return jiffies_to_msecs(timeout);
199 }
200 /**
201  *  retransmits_timed_out() - returns true if this connection has timed out
202  *  @sk:       The current socket
203  *  @boundary: max number of retransmissions
204  *  @timeout:  A custom timeout value.
205  *             If set to 0 the default timeout is calculated and used.
206  *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
207  *
208  * The default "timeout" value this function can calculate and use
209  * is equivalent to the timeout of a TCP Connection
210  * after "boundary" unsuccessful, exponentially backed-off
211  * retransmissions with an initial RTO of TCP_RTO_MIN.
212  */
213 static bool retransmits_timed_out(struct sock *sk,
214 				  unsigned int boundary,
215 				  unsigned int timeout)
216 {
217 	struct tcp_sock *tp = tcp_sk(sk);
218 	unsigned int start_ts, delta;
219 
220 	if (!inet_csk(sk)->icsk_retransmits)
221 		return false;
222 
223 	start_ts = tp->retrans_stamp;
224 	if (likely(timeout == 0)) {
225 		unsigned int rto_base = TCP_RTO_MIN;
226 
227 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
228 			rto_base = tcp_timeout_init(sk);
229 		timeout = tcp_model_timeout(sk, boundary, rto_base);
230 	}
231 
232 	if (tp->tcp_usec_ts) {
233 		/* delta maybe off up to a jiffy due to timer granularity. */
234 		delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1);
235 		return (s32)(delta - timeout * USEC_PER_MSEC) >= 0;
236 	}
237 	return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0;
238 }
239 
240 /* A write timeout has occurred. Process the after effects. */
241 static int tcp_write_timeout(struct sock *sk)
242 {
243 	struct inet_connection_sock *icsk = inet_csk(sk);
244 	struct tcp_sock *tp = tcp_sk(sk);
245 	struct net *net = sock_net(sk);
246 	bool expired = false, do_reset;
247 	int retry_until, max_retransmits;
248 
249 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
250 		if (icsk->icsk_retransmits)
251 			__dst_negative_advice(sk);
252 		/* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
253 		retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
254 			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
255 
256 		max_retransmits = retry_until;
257 		if (sk->sk_state == TCP_SYN_SENT)
258 			max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
259 
260 		expired = icsk->icsk_retransmits >= max_retransmits;
261 	} else {
262 		if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
263 			/* Black hole detection */
264 			tcp_mtu_probing(icsk, sk);
265 
266 			__dst_negative_advice(sk);
267 		}
268 
269 		retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
270 		if (sock_flag(sk, SOCK_DEAD)) {
271 			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
272 
273 			retry_until = tcp_orphan_retries(sk, alive);
274 			do_reset = alive ||
275 				!retransmits_timed_out(sk, retry_until, 0);
276 
277 			if (tcp_out_of_resources(sk, do_reset))
278 				return 1;
279 		}
280 	}
281 	if (!expired)
282 		expired = retransmits_timed_out(sk, retry_until,
283 						READ_ONCE(icsk->icsk_user_timeout));
284 	tcp_fastopen_active_detect_blackhole(sk, expired);
285 
286 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
287 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
288 				  icsk->icsk_retransmits,
289 				  icsk->icsk_rto, (int)expired);
290 
291 	if (expired) {
292 		/* Has it gone just too far? */
293 		tcp_write_err(sk);
294 		return 1;
295 	}
296 
297 	if (sk_rethink_txhash(sk)) {
298 		tp->timeout_rehash++;
299 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
300 	}
301 
302 	return 0;
303 }
304 
305 /* Called with BH disabled */
306 void tcp_delack_timer_handler(struct sock *sk)
307 {
308 	struct inet_connection_sock *icsk = inet_csk(sk);
309 	struct tcp_sock *tp = tcp_sk(sk);
310 
311 	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
312 		return;
313 
314 	/* Handling the sack compression case */
315 	if (tp->compressed_ack) {
316 		tcp_mstamp_refresh(tp);
317 		tcp_sack_compress_send_ack(sk);
318 		return;
319 	}
320 
321 	if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
322 		return;
323 
324 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
325 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
326 		return;
327 	}
328 	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
329 
330 	if (inet_csk_ack_scheduled(sk)) {
331 		if (!inet_csk_in_pingpong_mode(sk)) {
332 			/* Delayed ACK missed: inflate ATO. */
333 			icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto);
334 		} else {
335 			/* Delayed ACK missed: leave pingpong mode and
336 			 * deflate ATO.
337 			 */
338 			inet_csk_exit_pingpong_mode(sk);
339 			icsk->icsk_ack.ato      = TCP_ATO_MIN;
340 		}
341 		tcp_mstamp_refresh(tp);
342 		tcp_send_ack(sk);
343 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
344 	}
345 }
346 
347 
348 /**
349  *  tcp_delack_timer() - The TCP delayed ACK timeout handler
350  *  @t:  Pointer to the timer. (gets casted to struct sock *)
351  *
352  *  This function gets (indirectly) called when the kernel timer for a TCP packet
353  *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
354  *
355  *  Returns: Nothing (void)
356  */
357 static void tcp_delack_timer(struct timer_list *t)
358 {
359 	struct inet_connection_sock *icsk =
360 			from_timer(icsk, t, icsk_delack_timer);
361 	struct sock *sk = &icsk->icsk_inet.sk;
362 
363 	bh_lock_sock(sk);
364 	if (!sock_owned_by_user(sk)) {
365 		tcp_delack_timer_handler(sk);
366 	} else {
367 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
368 		/* deleguate our work to tcp_release_cb() */
369 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
370 			sock_hold(sk);
371 	}
372 	bh_unlock_sock(sk);
373 	sock_put(sk);
374 }
375 
376 static void tcp_probe_timer(struct sock *sk)
377 {
378 	struct inet_connection_sock *icsk = inet_csk(sk);
379 	struct sk_buff *skb = tcp_send_head(sk);
380 	struct tcp_sock *tp = tcp_sk(sk);
381 	int max_probes;
382 
383 	if (tp->packets_out || !skb) {
384 		icsk->icsk_probes_out = 0;
385 		icsk->icsk_probes_tstamp = 0;
386 		return;
387 	}
388 
389 	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
390 	 * long as the receiver continues to respond probes. We support this by
391 	 * default and reset icsk_probes_out with incoming ACKs. But if the
392 	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
393 	 * kill the socket when the retry count and the time exceeds the
394 	 * corresponding system limit. We also implement similar policy when
395 	 * we use RTO to probe window in tcp_retransmit_timer().
396 	 */
397 	if (!icsk->icsk_probes_tstamp) {
398 		icsk->icsk_probes_tstamp = tcp_jiffies32;
399 	} else {
400 		u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
401 
402 		if (user_timeout &&
403 		    (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
404 		     msecs_to_jiffies(user_timeout))
405 			goto abort;
406 	}
407 	max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
408 	if (sock_flag(sk, SOCK_DEAD)) {
409 		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
410 
411 		max_probes = tcp_orphan_retries(sk, alive);
412 		if (!alive && icsk->icsk_backoff >= max_probes)
413 			goto abort;
414 		if (tcp_out_of_resources(sk, true))
415 			return;
416 	}
417 
418 	if (icsk->icsk_probes_out >= max_probes) {
419 abort:		tcp_write_err(sk);
420 	} else {
421 		/* Only send another probe if we didn't close things up. */
422 		tcp_send_probe0(sk);
423 	}
424 }
425 
426 static void tcp_update_rto_stats(struct sock *sk)
427 {
428 	struct inet_connection_sock *icsk = inet_csk(sk);
429 	struct tcp_sock *tp = tcp_sk(sk);
430 
431 	if (!icsk->icsk_retransmits) {
432 		tp->total_rto_recoveries++;
433 		tp->rto_stamp = tcp_time_stamp_ms(tp);
434 	}
435 	icsk->icsk_retransmits++;
436 	tp->total_rto++;
437 }
438 
439 /*
440  *	Timer for Fast Open socket to retransmit SYNACK. Note that the
441  *	sk here is the child socket, not the parent (listener) socket.
442  */
443 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
444 {
445 	struct inet_connection_sock *icsk = inet_csk(sk);
446 	struct tcp_sock *tp = tcp_sk(sk);
447 	int max_retries;
448 
449 	req->rsk_ops->syn_ack_timeout(req);
450 
451 	/* Add one more retry for fastopen.
452 	 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
453 	 */
454 	max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
455 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
456 
457 	if (req->num_timeout >= max_retries) {
458 		tcp_write_err(sk);
459 		return;
460 	}
461 	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
462 	if (icsk->icsk_retransmits == 1)
463 		tcp_enter_loss(sk);
464 	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
465 	 * returned from rtx_syn_ack() to make it more persistent like
466 	 * regular retransmit because if the child socket has been accepted
467 	 * it's not good to give up too easily.
468 	 */
469 	inet_rtx_syn_ack(sk, req);
470 	req->num_timeout++;
471 	tcp_update_rto_stats(sk);
472 	if (!tp->retrans_stamp)
473 		tp->retrans_stamp = tcp_time_stamp_ts(tp);
474 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
475 			  req->timeout << req->num_timeout, TCP_RTO_MAX);
476 }
477 
478 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
479 				     const struct sk_buff *skb,
480 				     u32 rtx_delta)
481 {
482 	const struct tcp_sock *tp = tcp_sk(sk);
483 	const int timeout = TCP_RTO_MAX * 2;
484 	s32 rcv_delta;
485 
486 	/* Note: timer interrupt might have been delayed by at least one jiffy,
487 	 * and tp->rcv_tstamp might very well have been written recently.
488 	 * rcv_delta can thus be negative.
489 	 */
490 	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
491 	if (rcv_delta <= timeout)
492 		return false;
493 
494 	return msecs_to_jiffies(rtx_delta) > timeout;
495 }
496 
497 /**
498  *  tcp_retransmit_timer() - The TCP retransmit timeout handler
499  *  @sk:  Pointer to the current socket.
500  *
501  *  This function gets called when the kernel timer for a TCP packet
502  *  of this socket expires.
503  *
504  *  It handles retransmission, timer adjustment and other necessary measures.
505  *
506  *  Returns: Nothing (void)
507  */
508 void tcp_retransmit_timer(struct sock *sk)
509 {
510 	struct tcp_sock *tp = tcp_sk(sk);
511 	struct net *net = sock_net(sk);
512 	struct inet_connection_sock *icsk = inet_csk(sk);
513 	struct request_sock *req;
514 	struct sk_buff *skb;
515 
516 	req = rcu_dereference_protected(tp->fastopen_rsk,
517 					lockdep_sock_is_held(sk));
518 	if (req) {
519 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
520 			     sk->sk_state != TCP_FIN_WAIT1);
521 		tcp_fastopen_synack_timer(sk, req);
522 		/* Before we receive ACK to our SYN-ACK don't retransmit
523 		 * anything else (e.g., data or FIN segments).
524 		 */
525 		return;
526 	}
527 
528 	if (!tp->packets_out)
529 		return;
530 
531 	skb = tcp_rtx_queue_head(sk);
532 	if (WARN_ON_ONCE(!skb))
533 		return;
534 
535 	tp->tlp_high_seq = 0;
536 
537 	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
538 	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
539 		/* Receiver dastardly shrinks window. Our retransmits
540 		 * become zero probes, but we should not timeout this
541 		 * connection. If the socket is an orphan, time it out,
542 		 * we cannot allow such beasts to hang infinitely.
543 		 */
544 		struct inet_sock *inet = inet_sk(sk);
545 		u32 rtx_delta;
546 
547 		rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?:
548 				tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
549 		if (tp->tcp_usec_ts)
550 			rtx_delta /= USEC_PER_MSEC;
551 
552 		if (sk->sk_family == AF_INET) {
553 			net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
554 				&inet->inet_daddr, ntohs(inet->inet_dport),
555 				inet->inet_num, tp->snd_una, tp->snd_nxt,
556 				jiffies_to_msecs(jiffies - tp->rcv_tstamp),
557 				rtx_delta);
558 		}
559 #if IS_ENABLED(CONFIG_IPV6)
560 		else if (sk->sk_family == AF_INET6) {
561 			net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
562 				&sk->sk_v6_daddr, ntohs(inet->inet_dport),
563 				inet->inet_num, tp->snd_una, tp->snd_nxt,
564 				jiffies_to_msecs(jiffies - tp->rcv_tstamp),
565 				rtx_delta);
566 		}
567 #endif
568 		if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) {
569 			tcp_write_err(sk);
570 			goto out;
571 		}
572 		tcp_enter_loss(sk);
573 		tcp_retransmit_skb(sk, skb, 1);
574 		__sk_dst_reset(sk);
575 		goto out_reset_timer;
576 	}
577 
578 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
579 	if (tcp_write_timeout(sk))
580 		goto out;
581 
582 	if (icsk->icsk_retransmits == 0) {
583 		int mib_idx = 0;
584 
585 		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
586 			if (tcp_is_sack(tp))
587 				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
588 			else
589 				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
590 		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
591 			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
592 		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
593 			   tp->sacked_out) {
594 			if (tcp_is_sack(tp))
595 				mib_idx = LINUX_MIB_TCPSACKFAILURES;
596 			else
597 				mib_idx = LINUX_MIB_TCPRENOFAILURES;
598 		}
599 		if (mib_idx)
600 			__NET_INC_STATS(sock_net(sk), mib_idx);
601 	}
602 
603 	tcp_enter_loss(sk);
604 
605 	tcp_update_rto_stats(sk);
606 	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
607 		/* Retransmission failed because of local congestion,
608 		 * Let senders fight for local resources conservatively.
609 		 */
610 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
611 					  TCP_RESOURCE_PROBE_INTERVAL,
612 					  TCP_RTO_MAX);
613 		goto out;
614 	}
615 
616 	/* Increase the timeout each time we retransmit.  Note that
617 	 * we do not increase the rtt estimate.  rto is initialized
618 	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
619 	 * that doubling rto each time is the least we can get away with.
620 	 * In KA9Q, Karn uses this for the first few times, and then
621 	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
622 	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
623 	 * defined in the protocol as the maximum possible RTT.  I guess
624 	 * we'll have to use something other than TCP to talk to the
625 	 * University of Mars.
626 	 *
627 	 * PAWS allows us longer timeouts and large windows, so once
628 	 * implemented ftp to mars will work nicely. We will have to fix
629 	 * the 120 second clamps though!
630 	 */
631 
632 out_reset_timer:
633 	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
634 	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
635 	 * might be increased if the stream oscillates between thin and thick,
636 	 * thus the old value might already be too high compared to the value
637 	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
638 	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
639 	 * exponential backoff behaviour to avoid continue hammering
640 	 * linear-timeout retransmissions into a black hole
641 	 */
642 	if (sk->sk_state == TCP_ESTABLISHED &&
643 	    (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
644 	    tcp_stream_is_thin(tp) &&
645 	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
646 		icsk->icsk_backoff = 0;
647 		icsk->icsk_rto = clamp(__tcp_set_rto(tp),
648 				       tcp_rto_min(sk),
649 				       TCP_RTO_MAX);
650 	} else if (sk->sk_state != TCP_SYN_SENT ||
651 		   tp->total_rto >
652 		   READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
653 		/* Use normal (exponential) backoff unless linear timeouts are
654 		 * activated.
655 		 */
656 		icsk->icsk_backoff++;
657 		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
658 	}
659 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
660 				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
661 	if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
662 		__sk_dst_reset(sk);
663 
664 out:;
665 }
666 
667 /* Called with bottom-half processing disabled.
668    Called by tcp_write_timer() */
669 void tcp_write_timer_handler(struct sock *sk)
670 {
671 	struct inet_connection_sock *icsk = inet_csk(sk);
672 	int event;
673 
674 	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
675 	    !icsk->icsk_pending)
676 		return;
677 
678 	if (time_after(icsk->icsk_timeout, jiffies)) {
679 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
680 		return;
681 	}
682 
683 	tcp_mstamp_refresh(tcp_sk(sk));
684 	event = icsk->icsk_pending;
685 
686 	switch (event) {
687 	case ICSK_TIME_REO_TIMEOUT:
688 		tcp_rack_reo_timeout(sk);
689 		break;
690 	case ICSK_TIME_LOSS_PROBE:
691 		tcp_send_loss_probe(sk);
692 		break;
693 	case ICSK_TIME_RETRANS:
694 		icsk->icsk_pending = 0;
695 		tcp_retransmit_timer(sk);
696 		break;
697 	case ICSK_TIME_PROBE0:
698 		icsk->icsk_pending = 0;
699 		tcp_probe_timer(sk);
700 		break;
701 	}
702 }
703 
704 static void tcp_write_timer(struct timer_list *t)
705 {
706 	struct inet_connection_sock *icsk =
707 			from_timer(icsk, t, icsk_retransmit_timer);
708 	struct sock *sk = &icsk->icsk_inet.sk;
709 
710 	bh_lock_sock(sk);
711 	if (!sock_owned_by_user(sk)) {
712 		tcp_write_timer_handler(sk);
713 	} else {
714 		/* delegate our work to tcp_release_cb() */
715 		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
716 			sock_hold(sk);
717 	}
718 	bh_unlock_sock(sk);
719 	sock_put(sk);
720 }
721 
722 void tcp_syn_ack_timeout(const struct request_sock *req)
723 {
724 	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
725 
726 	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
727 }
728 EXPORT_SYMBOL(tcp_syn_ack_timeout);
729 
730 void tcp_set_keepalive(struct sock *sk, int val)
731 {
732 	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
733 		return;
734 
735 	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
736 		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
737 	else if (!val)
738 		inet_csk_delete_keepalive_timer(sk);
739 }
740 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
741 
742 
743 static void tcp_keepalive_timer (struct timer_list *t)
744 {
745 	struct sock *sk = from_timer(sk, t, sk_timer);
746 	struct inet_connection_sock *icsk = inet_csk(sk);
747 	struct tcp_sock *tp = tcp_sk(sk);
748 	u32 elapsed;
749 
750 	/* Only process if socket is not in use. */
751 	bh_lock_sock(sk);
752 	if (sock_owned_by_user(sk)) {
753 		/* Try again later. */
754 		inet_csk_reset_keepalive_timer (sk, HZ/20);
755 		goto out;
756 	}
757 
758 	if (sk->sk_state == TCP_LISTEN) {
759 		pr_err("Hmm... keepalive on a LISTEN ???\n");
760 		goto out;
761 	}
762 
763 	tcp_mstamp_refresh(tp);
764 	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
765 		if (READ_ONCE(tp->linger2) >= 0) {
766 			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
767 
768 			if (tmo > 0) {
769 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
770 				goto out;
771 			}
772 		}
773 		tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_NOT_SPECIFIED);
774 		goto death;
775 	}
776 
777 	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
778 	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
779 		goto out;
780 
781 	elapsed = keepalive_time_when(tp);
782 
783 	/* It is alive without keepalive 8) */
784 	if (tp->packets_out || !tcp_write_queue_empty(sk))
785 		goto resched;
786 
787 	elapsed = keepalive_time_elapsed(tp);
788 
789 	if (elapsed >= keepalive_time_when(tp)) {
790 		u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
791 
792 		/* If the TCP_USER_TIMEOUT option is enabled, use that
793 		 * to determine when to timeout instead.
794 		 */
795 		if ((user_timeout != 0 &&
796 		    elapsed >= msecs_to_jiffies(user_timeout) &&
797 		    icsk->icsk_probes_out > 0) ||
798 		    (user_timeout == 0 &&
799 		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
800 			tcp_send_active_reset(sk, GFP_ATOMIC,
801 					      SK_RST_REASON_NOT_SPECIFIED);
802 			tcp_write_err(sk);
803 			goto out;
804 		}
805 		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
806 			icsk->icsk_probes_out++;
807 			elapsed = keepalive_intvl_when(tp);
808 		} else {
809 			/* If keepalive was lost due to local congestion,
810 			 * try harder.
811 			 */
812 			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
813 		}
814 	} else {
815 		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
816 		elapsed = keepalive_time_when(tp) - elapsed;
817 	}
818 
819 resched:
820 	inet_csk_reset_keepalive_timer (sk, elapsed);
821 	goto out;
822 
823 death:
824 	tcp_done(sk);
825 
826 out:
827 	bh_unlock_sock(sk);
828 	sock_put(sk);
829 }
830 
831 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
832 {
833 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
834 	struct sock *sk = (struct sock *)tp;
835 
836 	bh_lock_sock(sk);
837 	if (!sock_owned_by_user(sk)) {
838 		if (tp->compressed_ack) {
839 			/* Since we have to send one ack finally,
840 			 * subtract one from tp->compressed_ack to keep
841 			 * LINUX_MIB_TCPACKCOMPRESSED accurate.
842 			 */
843 			tp->compressed_ack--;
844 			tcp_send_ack(sk);
845 		}
846 	} else {
847 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
848 				      &sk->sk_tsq_flags))
849 			sock_hold(sk);
850 	}
851 	bh_unlock_sock(sk);
852 
853 	sock_put(sk);
854 
855 	return HRTIMER_NORESTART;
856 }
857 
858 void tcp_init_xmit_timers(struct sock *sk)
859 {
860 	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
861 				  &tcp_keepalive_timer);
862 	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
863 		     HRTIMER_MODE_ABS_PINNED_SOFT);
864 	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
865 
866 	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
867 		     HRTIMER_MODE_REL_PINNED_SOFT);
868 	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
869 }
870