1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
25 #include <net/tcp_ecn.h>
26 #include <net/rstreason.h>
27
tcp_clamp_rto_to_user_timeout(const struct sock * sk)28 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
29 {
30 const struct inet_connection_sock *icsk = inet_csk(sk);
31 const struct tcp_sock *tp = tcp_sk(sk);
32 u32 elapsed, user_timeout;
33 s32 remaining;
34
35 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
36 if (!user_timeout)
37 return icsk->icsk_rto;
38
39 elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp;
40 if (tp->tcp_usec_ts)
41 elapsed /= USEC_PER_MSEC;
42
43 remaining = user_timeout - elapsed;
44 if (remaining <= 0)
45 return 1; /* user timeout has passed; fire ASAP */
46
47 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
48 }
49
tcp_clamp_probe0_to_user_timeout(const struct sock * sk,u32 when)50 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
51 {
52 const struct inet_connection_sock *icsk = inet_csk(sk);
53 u32 user_timeout;
54 s32 remaining;
55 s32 elapsed;
56
57 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
58 if (!user_timeout || !icsk->icsk_probes_tstamp)
59 return when;
60
61 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
62 if (unlikely(elapsed < 0))
63 elapsed = 0;
64 remaining = msecs_to_jiffies(user_timeout) - elapsed;
65 remaining = max_t(int, remaining, TCP_TIMEOUT_MIN);
66
67 return min_t(u32, remaining, when);
68 }
69
70 /**
71 * tcp_write_err() - close socket and save error info
72 * @sk: The socket the error has appeared on.
73 *
74 * Returns: Nothing (void)
75 */
76
tcp_write_err(struct sock * sk)77 static void tcp_write_err(struct sock *sk)
78 {
79 tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
80 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
81 }
82
83 /**
84 * tcp_out_of_resources() - Close socket if out of resources
85 * @sk: pointer to current socket
86 * @do_reset: send a last packet with reset flag
87 *
88 * Do not allow orphaned sockets to eat all our resources.
89 * This is direct violation of TCP specs, but it is required
90 * to prevent DoS attacks. It is called when a retransmission timeout
91 * or zero probe timeout occurs on orphaned socket.
92 *
93 * Also close if our net namespace is exiting; in that case there is no
94 * hope of ever communicating again since all netns interfaces are already
95 * down (or about to be down), and we need to release our dst references,
96 * which have been moved to the netns loopback interface, so the namespace
97 * can finish exiting. This condition is only possible if we are a kernel
98 * socket, as those do not hold references to the namespace.
99 *
100 * Criteria is still not confirmed experimentally and may change.
101 * We kill the socket, if:
102 * 1. If number of orphaned sockets exceeds an administratively configured
103 * limit.
104 * 2. If we have strong memory pressure.
105 * 3. If our net namespace is exiting.
106 */
tcp_out_of_resources(struct sock * sk,bool do_reset)107 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
108 {
109 struct tcp_sock *tp = tcp_sk(sk);
110 int shift = 0;
111
112 /* If peer does not open window for long time, or did not transmit
113 * anything for long time, penalize it. */
114 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*tcp_rto_max(sk) || !do_reset)
115 shift++;
116
117 /* If some dubious ICMP arrived, penalize even more. */
118 if (READ_ONCE(sk->sk_err_soft))
119 shift++;
120
121 if (tcp_check_oom(sk, shift)) {
122 /* Catch exceptional cases, when connection requires reset.
123 * 1. Last segment was sent recently. */
124 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
125 /* 2. Window is closed. */
126 (!tp->snd_wnd && !tp->packets_out))
127 do_reset = true;
128 if (do_reset)
129 tcp_send_active_reset(sk, GFP_ATOMIC,
130 SK_RST_REASON_TCP_ABORT_ON_MEMORY);
131 tcp_done(sk);
132 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
133 return 1;
134 }
135
136 if (!check_net(sock_net(sk))) {
137 /* Not possible to send reset; just close */
138 tcp_done(sk);
139 return 1;
140 }
141
142 return 0;
143 }
144
145 /**
146 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
147 * @sk: Pointer to the current socket.
148 * @alive: bool, socket alive state
149 */
tcp_orphan_retries(struct sock * sk,bool alive)150 static int tcp_orphan_retries(struct sock *sk, bool alive)
151 {
152 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
153
154 /* We know from an ICMP that something is wrong. */
155 if (READ_ONCE(sk->sk_err_soft) && !alive)
156 retries = 0;
157
158 /* However, if socket sent something recently, select some safe
159 * number of retries. 8 corresponds to >100 seconds with minimal
160 * RTO of 200msec. */
161 if (retries == 0 && alive)
162 retries = 8;
163 return retries;
164 }
165
tcp_mtu_probing(struct inet_connection_sock * icsk,struct sock * sk)166 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
167 {
168 const struct net *net = sock_net(sk);
169 int mss;
170
171 /* Black hole detection */
172 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
173 return;
174
175 if (!icsk->icsk_mtup.enabled) {
176 icsk->icsk_mtup.enabled = 1;
177 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
178 } else {
179 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
180 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
181 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
182 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
183 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
184 }
185 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
186 }
187
tcp_model_timeout(struct sock * sk,unsigned int boundary,unsigned int rto_base)188 static unsigned int tcp_model_timeout(struct sock *sk,
189 unsigned int boundary,
190 unsigned int rto_base)
191 {
192 unsigned int linear_backoff_thresh, timeout;
193
194 linear_backoff_thresh = ilog2(tcp_rto_max(sk) / rto_base);
195 if (boundary <= linear_backoff_thresh)
196 timeout = ((2 << boundary) - 1) * rto_base;
197 else
198 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
199 (boundary - linear_backoff_thresh) * tcp_rto_max(sk);
200 return jiffies_to_msecs(timeout);
201 }
202 /**
203 * retransmits_timed_out() - returns true if this connection has timed out
204 * @sk: The current socket
205 * @boundary: max number of retransmissions
206 * @timeout: A custom timeout value.
207 * If set to 0 the default timeout is calculated and used.
208 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
209 *
210 * The default "timeout" value this function can calculate and use
211 * is equivalent to the timeout of a TCP Connection
212 * after "boundary" unsuccessful, exponentially backed-off
213 * retransmissions with an initial RTO of TCP_RTO_MIN.
214 */
retransmits_timed_out(struct sock * sk,unsigned int boundary,unsigned int timeout)215 static bool retransmits_timed_out(struct sock *sk,
216 unsigned int boundary,
217 unsigned int timeout)
218 {
219 struct tcp_sock *tp = tcp_sk(sk);
220 unsigned int start_ts, delta;
221
222 if (!inet_csk(sk)->icsk_retransmits)
223 return false;
224
225 start_ts = tp->retrans_stamp;
226 if (likely(timeout == 0)) {
227 unsigned int rto_base = TCP_RTO_MIN;
228
229 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
230 rto_base = tcp_timeout_init(sk);
231 timeout = tcp_model_timeout(sk, boundary, rto_base);
232 }
233
234 if (tp->tcp_usec_ts) {
235 /* delta maybe off up to a jiffy due to timer granularity. */
236 delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(1);
237 return (s32)(delta - timeout * USEC_PER_MSEC) >= 0;
238 }
239 return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0;
240 }
241
242 /* A write timeout has occurred. Process the after effects. */
tcp_write_timeout(struct sock * sk)243 static int tcp_write_timeout(struct sock *sk)
244 {
245 struct inet_connection_sock *icsk = inet_csk(sk);
246 struct tcp_sock *tp = tcp_sk(sk);
247 struct net *net = sock_net(sk);
248 bool expired = false, do_reset;
249 int retry_until, max_retransmits;
250
251 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
252 if (icsk->icsk_retransmits)
253 __dst_negative_advice(sk);
254 /* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
255 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
256 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
257
258 max_retransmits = retry_until;
259 if (sk->sk_state == TCP_SYN_SENT)
260 max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
261
262 expired = icsk->icsk_retransmits >= max_retransmits;
263 } else {
264 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
265 /* Black hole detection */
266 tcp_mtu_probing(icsk, sk);
267
268 __dst_negative_advice(sk);
269 }
270
271 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
272 if (sock_flag(sk, SOCK_DEAD)) {
273 const bool alive = icsk->icsk_rto < tcp_rto_max(sk);
274
275 retry_until = tcp_orphan_retries(sk, alive);
276 do_reset = alive ||
277 !retransmits_timed_out(sk, retry_until, 0);
278
279 if (tcp_out_of_resources(sk, do_reset))
280 return 1;
281 }
282 }
283 if (!expired)
284 expired = retransmits_timed_out(sk, retry_until,
285 READ_ONCE(icsk->icsk_user_timeout));
286 tcp_fastopen_active_detect_blackhole(sk, expired);
287 mptcp_active_detect_blackhole(sk, expired);
288
289 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
290 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
291 icsk->icsk_retransmits,
292 icsk->icsk_rto, (int)expired);
293
294 if (expired) {
295 /* Has it gone just too far? */
296 tcp_write_err(sk);
297 return 1;
298 }
299
300 if (sk_rethink_txhash(sk)) {
301 WRITE_ONCE(tp->timeout_rehash, tp->timeout_rehash + 1);
302 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
303 }
304
305 return 0;
306 }
307
308 /* Called with BH disabled */
tcp_delack_timer_handler(struct sock * sk)309 void tcp_delack_timer_handler(struct sock *sk)
310 {
311 struct inet_connection_sock *icsk = inet_csk(sk);
312 struct tcp_sock *tp = tcp_sk(sk);
313
314 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
315 return;
316
317 /* Handling the sack compression case */
318 if (tp->compressed_ack) {
319 tcp_mstamp_refresh(tp);
320 tcp_sack_compress_send_ack(sk);
321 return;
322 }
323
324 if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
325 return;
326
327 if (time_after(icsk_delack_timeout(icsk), jiffies)) {
328 sk_reset_timer(sk, &icsk->icsk_delack_timer,
329 icsk_delack_timeout(icsk));
330 return;
331 }
332 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
333
334 if (inet_csk_ack_scheduled(sk)) {
335 if (!inet_csk_in_pingpong_mode(sk)) {
336 /* Delayed ACK missed: inflate ATO. */
337 icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto);
338 } else {
339 /* Delayed ACK missed: leave pingpong mode and
340 * deflate ATO.
341 */
342 inet_csk_exit_pingpong_mode(sk);
343 icsk->icsk_ack.ato = TCP_ATO_MIN;
344 }
345 tcp_mstamp_refresh(tp);
346 tcp_send_ack(sk);
347 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
348 }
349 }
350
351
352 /**
353 * tcp_delack_timer() - The TCP delayed ACK timeout handler
354 * @t: Pointer to the timer. (gets casted to struct sock *)
355 *
356 * This function gets (indirectly) called when the kernel timer for a TCP packet
357 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
358 *
359 * Returns: Nothing (void)
360 */
tcp_delack_timer(struct timer_list * t)361 static void tcp_delack_timer(struct timer_list *t)
362 {
363 struct inet_connection_sock *icsk =
364 timer_container_of(icsk, t, icsk_delack_timer);
365 struct sock *sk = &icsk->icsk_inet.sk;
366
367 /* Avoid taking socket spinlock if there is no ACK to send.
368 * The compressed_ack check is racy, but a separate hrtimer
369 * will take care of it eventually.
370 */
371 if (!(smp_load_acquire(&icsk->icsk_ack.pending) & ICSK_ACK_TIMER) &&
372 !READ_ONCE(tcp_sk(sk)->compressed_ack))
373 goto out;
374
375 bh_lock_sock(sk);
376 if (!sock_owned_by_user(sk)) {
377 tcp_delack_timer_handler(sk);
378 } else {
379 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
380 /* deleguate our work to tcp_release_cb() */
381 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
382 sock_hold(sk);
383 }
384 bh_unlock_sock(sk);
385 out:
386 sock_put(sk);
387 }
388
tcp_probe_timer(struct sock * sk)389 static void tcp_probe_timer(struct sock *sk)
390 {
391 struct inet_connection_sock *icsk = inet_csk(sk);
392 struct sk_buff *skb = tcp_send_head(sk);
393 struct tcp_sock *tp = tcp_sk(sk);
394 int max_probes;
395
396 if (tp->packets_out || !skb) {
397 WRITE_ONCE(icsk->icsk_probes_out, 0);
398 icsk->icsk_probes_tstamp = 0;
399 return;
400 }
401
402 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
403 * long as the receiver continues to respond probes. We support this by
404 * default and reset icsk_probes_out with incoming ACKs. But if the
405 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
406 * kill the socket when the retry count and the time exceeds the
407 * corresponding system limit. We also implement similar policy when
408 * we use RTO to probe window in tcp_retransmit_timer().
409 */
410 if (!icsk->icsk_probes_tstamp) {
411 icsk->icsk_probes_tstamp = tcp_jiffies32;
412 } else {
413 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
414
415 if (user_timeout &&
416 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
417 msecs_to_jiffies(user_timeout))
418 goto abort;
419 }
420 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
421 if (sock_flag(sk, SOCK_DEAD)) {
422 unsigned int rto_max = tcp_rto_max(sk);
423 const bool alive = inet_csk_rto_backoff(icsk, rto_max) < rto_max;
424
425 max_probes = tcp_orphan_retries(sk, alive);
426 if (!alive && icsk->icsk_backoff >= max_probes)
427 goto abort;
428 if (tcp_out_of_resources(sk, true))
429 return;
430 }
431
432 if (icsk->icsk_probes_out >= max_probes) {
433 abort: tcp_write_err(sk);
434 } else {
435 /* Only send another probe if we didn't close things up. */
436 tcp_send_probe0(sk);
437 }
438 }
439
tcp_update_rto_stats(struct sock * sk)440 static void tcp_update_rto_stats(struct sock *sk)
441 {
442 struct inet_connection_sock *icsk = inet_csk(sk);
443 struct tcp_sock *tp = tcp_sk(sk);
444
445 if (!icsk->icsk_retransmits) {
446 tp->total_rto_recoveries++;
447 tp->rto_stamp = tcp_time_stamp_ms(tp);
448 }
449 WRITE_ONCE(icsk->icsk_retransmits, icsk->icsk_retransmits + 1);
450 tp->total_rto++;
451 }
452
453 /*
454 * Timer for Fast Open socket to retransmit SYNACK. Note that the
455 * sk here is the child socket, not the parent (listener) socket.
456 */
tcp_fastopen_synack_timer(struct sock * sk,struct request_sock * req)457 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
458 {
459 struct inet_connection_sock *icsk = inet_csk(sk);
460 struct tcp_sock *tp = tcp_sk(sk);
461 int max_retries;
462
463 tcp_syn_ack_timeout(req);
464
465 /* Add one more retry for fastopen.
466 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
467 */
468 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
469 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
470
471 if (req->num_timeout >= max_retries) {
472 tcp_write_err(sk);
473 return;
474 }
475 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
476 if (icsk->icsk_retransmits == 1)
477 tcp_enter_loss(sk);
478 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
479 * returned from rtx_syn_ack() to make it more persistent like
480 * regular retransmit because if the child socket has been accepted
481 * it's not good to give up too easily.
482 */
483 tcp_rtx_synack(sk, req);
484 if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
485 tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
486 req->num_timeout++;
487 tcp_update_rto_stats(sk);
488 if (!tp->retrans_stamp)
489 tp->retrans_stamp = tcp_time_stamp_ts(tp);
490 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
491 req->timeout << req->num_timeout, false);
492 }
493
tcp_rtx_probe0_timed_out(const struct sock * sk,const struct sk_buff * skb,u32 rtx_delta)494 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
495 const struct sk_buff *skb,
496 u32 rtx_delta)
497 {
498 const struct inet_connection_sock *icsk = inet_csk(sk);
499 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
500 const struct tcp_sock *tp = tcp_sk(sk);
501 int timeout = tcp_rto_max(sk) * 2;
502 s32 rcv_delta;
503
504 if (user_timeout) {
505 /* If user application specified a TCP_USER_TIMEOUT,
506 * it does not want win 0 packets to 'reset the timer'
507 * while retransmits are not making progress.
508 */
509 if (rtx_delta > user_timeout)
510 return true;
511 timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
512 }
513 /* Note: timer interrupt might have been delayed by at least one jiffy,
514 * and tp->rcv_tstamp might very well have been written recently.
515 * rcv_delta can thus be negative.
516 */
517 rcv_delta = tcp_timeout_expires(sk) - tp->rcv_tstamp;
518 if (rcv_delta <= timeout)
519 return false;
520
521 return msecs_to_jiffies(rtx_delta) > timeout;
522 }
523
524 /**
525 * tcp_retransmit_timer() - The TCP retransmit timeout handler
526 * @sk: Pointer to the current socket.
527 *
528 * This function gets called when the kernel timer for a TCP packet
529 * of this socket expires.
530 *
531 * It handles retransmission, timer adjustment and other necessary measures.
532 *
533 * Returns: Nothing (void)
534 */
tcp_retransmit_timer(struct sock * sk)535 void tcp_retransmit_timer(struct sock *sk)
536 {
537 struct tcp_sock *tp = tcp_sk(sk);
538 struct net *net = sock_net(sk);
539 struct inet_connection_sock *icsk = inet_csk(sk);
540 struct request_sock *req;
541 struct sk_buff *skb;
542
543 req = rcu_dereference_protected(tp->fastopen_rsk,
544 lockdep_sock_is_held(sk));
545 if (req) {
546 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
547 sk->sk_state != TCP_FIN_WAIT1);
548 tcp_fastopen_synack_timer(sk, req);
549 /* Before we receive ACK to our SYN-ACK don't retransmit
550 * anything else (e.g., data or FIN segments).
551 */
552 return;
553 }
554
555 if (!tp->packets_out)
556 return;
557
558 skb = tcp_rtx_queue_head(sk);
559 if (WARN_ON_ONCE(!skb))
560 return;
561
562 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
563 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
564 /* Receiver dastardly shrinks window. Our retransmits
565 * become zero probes, but we should not timeout this
566 * connection. If the socket is an orphan, time it out,
567 * we cannot allow such beasts to hang infinitely.
568 */
569 struct inet_sock *inet = inet_sk(sk);
570 u32 rtx_delta;
571
572 rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?:
573 tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
574 if (tp->tcp_usec_ts)
575 rtx_delta /= USEC_PER_MSEC;
576
577 if (sk->sk_family == AF_INET) {
578 net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
579 &inet->inet_daddr, ntohs(inet->inet_dport),
580 inet->inet_num, tp->snd_una, tp->snd_nxt,
581 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
582 rtx_delta);
583 }
584 #if IS_ENABLED(CONFIG_IPV6)
585 else if (sk->sk_family == AF_INET6) {
586 net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
587 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
588 inet->inet_num, tp->snd_una, tp->snd_nxt,
589 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
590 rtx_delta);
591 }
592 #endif
593 if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) {
594 tcp_write_err(sk);
595 goto out;
596 }
597 tcp_enter_loss(sk);
598 tcp_retransmit_skb(sk, skb, 1);
599 __sk_dst_reset(sk);
600 goto out_reset_timer;
601 }
602
603 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
604 if (tcp_write_timeout(sk))
605 goto out;
606
607 if (icsk->icsk_retransmits == 0) {
608 int mib_idx = 0;
609
610 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
611 if (tcp_is_sack(tp))
612 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
613 else
614 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
615 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
616 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
617 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
618 tp->sacked_out) {
619 if (tcp_is_sack(tp))
620 mib_idx = LINUX_MIB_TCPSACKFAILURES;
621 else
622 mib_idx = LINUX_MIB_TCPRENOFAILURES;
623 }
624 if (mib_idx)
625 __NET_INC_STATS(sock_net(sk), mib_idx);
626 }
627
628 tcp_enter_loss(sk);
629
630 tcp_update_rto_stats(sk);
631 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
632 /* Retransmission failed because of local congestion,
633 * Let senders fight for local resources conservatively.
634 */
635 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
636 TCP_RESOURCE_PROBE_INTERVAL,
637 false);
638 goto out;
639 }
640
641 /* Increase the timeout each time we retransmit. Note that
642 * we do not increase the rtt estimate. rto is initialized
643 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
644 * that doubling rto each time is the least we can get away with.
645 * In KA9Q, Karn uses this for the first few times, and then
646 * goes to quadratic. netBSD doubles, but only goes up to *64,
647 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
648 * defined in the protocol as the maximum possible RTT. I guess
649 * we'll have to use something other than TCP to talk to the
650 * University of Mars.
651 *
652 * PAWS allows us longer timeouts and large windows, so once
653 * implemented ftp to mars will work nicely. We will have to fix
654 * the 120 second clamps though!
655 */
656
657 out_reset_timer:
658 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
659 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
660 * might be increased if the stream oscillates between thin and thick,
661 * thus the old value might already be too high compared to the value
662 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
663 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
664 * exponential backoff behaviour to avoid continue hammering
665 * linear-timeout retransmissions into a black hole
666 */
667 if (sk->sk_state == TCP_ESTABLISHED &&
668 (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
669 tcp_stream_is_thin(tp) &&
670 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
671 icsk->icsk_backoff = 0;
672 icsk->icsk_rto = clamp(__tcp_set_rto(tp),
673 tcp_rto_min(sk),
674 tcp_rto_max(sk));
675 } else if (sk->sk_state != TCP_SYN_SENT ||
676 tp->total_rto >
677 READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
678 /* Use normal (exponential) backoff unless linear timeouts are
679 * activated.
680 */
681 icsk->icsk_backoff++;
682 icsk->icsk_rto = min(icsk->icsk_rto << 1, tcp_rto_max(sk));
683 }
684 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
685 tcp_clamp_rto_to_user_timeout(sk), false);
686 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
687 __sk_dst_reset(sk);
688
689 out:;
690 }
691
692 /* Called with bottom-half processing disabled.
693 * Called by tcp_write_timer() and tcp_release_cb().
694 */
tcp_write_timer_handler(struct sock * sk)695 void tcp_write_timer_handler(struct sock *sk)
696 {
697 struct inet_connection_sock *icsk = inet_csk(sk);
698 int event;
699
700 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
701 !icsk->icsk_pending)
702 return;
703
704 if (time_after(tcp_timeout_expires(sk), jiffies)) {
705 sk_reset_timer(sk, &sk->tcp_retransmit_timer,
706 tcp_timeout_expires(sk));
707 return;
708 }
709 tcp_mstamp_refresh(tcp_sk(sk));
710 event = icsk->icsk_pending;
711
712 switch (event) {
713 case ICSK_TIME_REO_TIMEOUT:
714 tcp_rack_reo_timeout(sk);
715 break;
716 case ICSK_TIME_LOSS_PROBE:
717 tcp_send_loss_probe(sk);
718 break;
719 case ICSK_TIME_RETRANS:
720 smp_store_release(&icsk->icsk_pending, 0);
721 tcp_retransmit_timer(sk);
722 break;
723 case ICSK_TIME_PROBE0:
724 smp_store_release(&icsk->icsk_pending, 0);
725 tcp_probe_timer(sk);
726 break;
727 }
728 }
729
tcp_write_timer(struct timer_list * t)730 static void tcp_write_timer(struct timer_list *t)
731 {
732 struct sock *sk = timer_container_of(sk, t, tcp_retransmit_timer);
733
734 /* Avoid locking the socket when there is no pending event. */
735 if (!smp_load_acquire(&inet_csk(sk)->icsk_pending))
736 goto out;
737
738 bh_lock_sock(sk);
739 if (!sock_owned_by_user(sk)) {
740 tcp_write_timer_handler(sk);
741 } else {
742 /* delegate our work to tcp_release_cb() */
743 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
744 sock_hold(sk);
745 }
746 bh_unlock_sock(sk);
747 out:
748 sock_put(sk);
749 }
750
tcp_syn_ack_timeout(const struct request_sock * req)751 void tcp_syn_ack_timeout(const struct request_sock *req)
752 {
753 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
754
755 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
756 }
757
tcp_reset_keepalive_timer(struct sock * sk,unsigned long len)758 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len)
759 {
760 sk_reset_timer(sk, &inet_csk(sk)->icsk_keepalive_timer, jiffies + len);
761 }
762
tcp_delete_keepalive_timer(struct sock * sk)763 static void tcp_delete_keepalive_timer(struct sock *sk)
764 {
765 sk_stop_timer(sk, &inet_csk(sk)->icsk_keepalive_timer);
766 }
767
tcp_set_keepalive(struct sock * sk,int val)768 void tcp_set_keepalive(struct sock *sk, int val)
769 {
770 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
771 return;
772
773 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
774 tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
775 else if (!val)
776 tcp_delete_keepalive_timer(sk);
777 }
778
tcp_keepalive_timer(struct timer_list * t)779 static void tcp_keepalive_timer(struct timer_list *t)
780 {
781 struct inet_connection_sock *icsk =
782 timer_container_of(icsk, t, icsk_keepalive_timer);
783 struct sock *sk = &icsk->icsk_inet.sk;
784 struct tcp_sock *tp = tcp_sk(sk);
785 u32 elapsed;
786
787 /* Only process if socket is not in use. */
788 bh_lock_sock(sk);
789 if (sock_owned_by_user(sk)) {
790 /* Try again later. */
791 tcp_reset_keepalive_timer(sk, HZ/20);
792 goto out;
793 }
794
795 if (sk->sk_state == TCP_LISTEN) {
796 pr_err("Hmm... keepalive on a LISTEN ???\n");
797 goto out;
798 }
799
800 tcp_mstamp_refresh(tp);
801 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
802 if (READ_ONCE(tp->linger2) >= 0) {
803 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
804
805 if (tmo > 0) {
806 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
807 goto out;
808 }
809 }
810 tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_TCP_STATE);
811 goto death;
812 }
813
814 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
815 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
816 goto out;
817
818 elapsed = keepalive_time_when(tp);
819
820 /* It is alive without keepalive 8) */
821 if (tp->packets_out || !tcp_write_queue_empty(sk))
822 goto resched;
823
824 elapsed = keepalive_time_elapsed(tp);
825
826 if (elapsed >= keepalive_time_when(tp)) {
827 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
828
829 /* If the TCP_USER_TIMEOUT option is enabled, use that
830 * to determine when to timeout instead.
831 */
832 if ((user_timeout != 0 &&
833 elapsed >= msecs_to_jiffies(user_timeout) &&
834 icsk->icsk_probes_out > 0) ||
835 (user_timeout == 0 &&
836 icsk->icsk_probes_out >= keepalive_probes(tp))) {
837 tcp_send_active_reset(sk, GFP_ATOMIC,
838 SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT);
839 tcp_write_err(sk);
840 goto out;
841 }
842 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
843 WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1);
844 elapsed = keepalive_intvl_when(tp);
845 } else {
846 /* If keepalive was lost due to local congestion,
847 * try harder.
848 */
849 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
850 }
851 } else {
852 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
853 elapsed = keepalive_time_when(tp) - elapsed;
854 }
855
856 resched:
857 tcp_reset_keepalive_timer(sk, elapsed);
858 goto out;
859
860 death:
861 tcp_done(sk);
862
863 out:
864 bh_unlock_sock(sk);
865 sock_put(sk);
866 }
867
tcp_compressed_ack_kick(struct hrtimer * timer)868 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
869 {
870 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
871 struct sock *sk = (struct sock *)tp;
872
873 bh_lock_sock(sk);
874 if (!sock_owned_by_user(sk)) {
875 if (tp->compressed_ack) {
876 /* Since we have to send one ack finally,
877 * subtract one from tp->compressed_ack to keep
878 * LINUX_MIB_TCPACKCOMPRESSED accurate.
879 */
880 tp->compressed_ack--;
881 tcp_mstamp_refresh(tp);
882 tcp_send_ack(sk);
883 }
884 } else {
885 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
886 &sk->sk_tsq_flags))
887 sock_hold(sk);
888 }
889 bh_unlock_sock(sk);
890
891 sock_put(sk);
892
893 return HRTIMER_NORESTART;
894 }
895
tcp_init_xmit_timers(struct sock * sk)896 void tcp_init_xmit_timers(struct sock *sk)
897 {
898 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
899 &tcp_keepalive_timer);
900 hrtimer_setup(&tcp_sk(sk)->pacing_timer, tcp_pace_kick, CLOCK_MONOTONIC,
901 HRTIMER_MODE_ABS_PINNED_SOFT);
902
903 hrtimer_setup(&tcp_sk(sk)->compressed_ack_timer, tcp_compressed_ack_kick, CLOCK_MONOTONIC,
904 HRTIMER_MODE_REL_PINNED_SOFT);
905 }
906