1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 /*
23 * Changes:
24 * Pedro Roque : Fast Retransmit/Recovery.
25 * Two receive queues.
26 * Retransmit queue handled by TCP.
27 * Better retransmit timer handling.
28 * New congestion avoidance.
29 * Header prediction.
30 * Variable renaming.
31 *
32 * Eric : Fast Retransmit.
33 * Randy Scott : MSS option defines.
34 * Eric Schenk : Fixes to slow start algorithm.
35 * Eric Schenk : Yet another double ACK bug.
36 * Eric Schenk : Delayed ACK bug fixes.
37 * Eric Schenk : Floyd style fast retrans war avoidance.
38 * David S. Miller : Don't allow zero congestion window.
39 * Eric Schenk : Fix retransmitter so that it sends
40 * next packet on ack of previous packet.
41 * Andi Kleen : Moved open_request checking here
42 * and process RSTs for open_requests.
43 * Andi Kleen : Better prune_queue, and other fixes.
44 * Andrey Savochkin: Fix RTT measurements in the presence of
45 * timestamps.
46 * Andrey Savochkin: Check sequence numbers correctly when
47 * removing SACKs due to in sequence incoming
48 * data segments.
49 * Andi Kleen: Make sure we never ack data there is not
50 * enough room for. Also make this condition
51 * a fatal error if it might still happen.
52 * Andi Kleen: Add tcp_measure_rcv_mss to make
53 * connections with MSS<min(MTU,ann. MSS)
54 * work without delayed acks.
55 * Andi Kleen: Process packets with PSH set in the
56 * fast path.
57 * J Hadi Salim: ECN support
58 * Andrei Gurtov,
59 * Pasi Sarolahti,
60 * Panu Kuhlberg: Experimental audit of TCP (re)transmission
61 * engine. Lots of bugs are found.
62 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
63 */
64
65 #define pr_fmt(fmt) "TCP: " fmt
66
67 #include <linux/mm.h>
68 #include <linux/slab.h>
69 #include <linux/module.h>
70 #include <linux/sysctl.h>
71 #include <linux/kernel.h>
72 #include <linux/prefetch.h>
73 #include <linux/bitops.h>
74 #include <net/dst.h>
75 #include <net/tcp.h>
76 #include <net/tcp_ecn.h>
77 #include <net/proto_memory.h>
78 #include <net/inet_common.h>
79 #include <linux/ipsec.h>
80 #include <linux/unaligned.h>
81 #include <linux/errqueue.h>
82 #include <trace/events/tcp.h>
83 #include <linux/jump_label_ratelimit.h>
84 #include <net/busy_poll.h>
85 #include <net/mptcp.h>
86
87 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
88
89 #define FLAG_DATA 0x01 /* Incoming frame contained data. */
90 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
91 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
92 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
93 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
94 #define FLAG_DATA_SACKED 0x20 /* New SACK. */
95 #define FLAG_ECE 0x40 /* ECE in this ACK */
96 #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */
97 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
98 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
99 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
100 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
101 #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
102 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
103 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
104 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
105 #define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */
106 #define FLAG_DSACK_TLP 0x20000 /* DSACK for tail loss probe */
107 #define FLAG_TS_PROGRESS 0x40000 /* Positive timestamp delta */
108
109 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
110 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
111 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
112 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
113
114 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
115 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
116
117 #define REXMIT_NONE 0 /* no loss recovery to do */
118 #define REXMIT_LOST 1 /* retransmit packets marked lost */
119 #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
120
121 #if IS_ENABLED(CONFIG_TLS_DEVICE)
122 static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ);
123
clean_acked_data_enable(struct tcp_sock * tp,void (* cad)(struct sock * sk,u32 ack_seq))124 void clean_acked_data_enable(struct tcp_sock *tp,
125 void (*cad)(struct sock *sk, u32 ack_seq))
126 {
127 tp->tcp_clean_acked = cad;
128 static_branch_deferred_inc(&clean_acked_data_enabled);
129 }
130 EXPORT_SYMBOL_GPL(clean_acked_data_enable);
131
clean_acked_data_disable(struct tcp_sock * tp)132 void clean_acked_data_disable(struct tcp_sock *tp)
133 {
134 static_branch_slow_dec_deferred(&clean_acked_data_enabled);
135 tp->tcp_clean_acked = NULL;
136 }
137 EXPORT_SYMBOL_GPL(clean_acked_data_disable);
138
clean_acked_data_flush(void)139 void clean_acked_data_flush(void)
140 {
141 static_key_deferred_flush(&clean_acked_data_enabled);
142 }
143 EXPORT_SYMBOL_GPL(clean_acked_data_flush);
144 #endif
145
146 #ifdef CONFIG_CGROUP_BPF
bpf_skops_parse_hdr(struct sock * sk,struct sk_buff * skb)147 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
148 {
149 bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown &&
150 BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
151 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG);
152 bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
153 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
154 struct bpf_sock_ops_kern sock_ops;
155
156 if (likely(!unknown_opt && !parse_all_opt))
157 return;
158
159 /* The skb will be handled in the
160 * bpf_skops_established() or
161 * bpf_skops_write_hdr_opt().
162 */
163 switch (sk->sk_state) {
164 case TCP_SYN_RECV:
165 case TCP_SYN_SENT:
166 case TCP_LISTEN:
167 return;
168 }
169
170 sock_owned_by_me(sk);
171
172 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
173 sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
174 sock_ops.is_fullsock = 1;
175 sock_ops.is_locked_tcp_sock = 1;
176 sock_ops.sk = sk;
177 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
178
179 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
180 }
181
bpf_skops_established(struct sock * sk,int bpf_op,struct sk_buff * skb)182 static void bpf_skops_established(struct sock *sk, int bpf_op,
183 struct sk_buff *skb)
184 {
185 struct bpf_sock_ops_kern sock_ops;
186
187 sock_owned_by_me(sk);
188
189 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
190 sock_ops.op = bpf_op;
191 sock_ops.is_fullsock = 1;
192 sock_ops.is_locked_tcp_sock = 1;
193 sock_ops.sk = sk;
194 /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
195 if (skb)
196 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
197
198 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
199 }
200 #else
bpf_skops_parse_hdr(struct sock * sk,struct sk_buff * skb)201 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
202 {
203 }
204
bpf_skops_established(struct sock * sk,int bpf_op,struct sk_buff * skb)205 static void bpf_skops_established(struct sock *sk, int bpf_op,
206 struct sk_buff *skb)
207 {
208 }
209 #endif
210
tcp_gro_dev_warn(const struct sock * sk,const struct sk_buff * skb,unsigned int len)211 static __cold void tcp_gro_dev_warn(const struct sock *sk, const struct sk_buff *skb,
212 unsigned int len)
213 {
214 struct net_device *dev;
215
216 rcu_read_lock();
217 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
218 if (!dev || len >= READ_ONCE(dev->mtu))
219 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
220 dev ? dev->name : "Unknown driver");
221 rcu_read_unlock();
222 }
223
224 /* Adapt the MSS value used to make delayed ack decision to the
225 * real world.
226 */
tcp_measure_rcv_mss(struct sock * sk,const struct sk_buff * skb)227 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
228 {
229 struct inet_connection_sock *icsk = inet_csk(sk);
230 const unsigned int lss = icsk->icsk_ack.last_seg_size;
231 unsigned int len;
232
233 icsk->icsk_ack.last_seg_size = 0;
234
235 /* skb->len may jitter because of SACKs, even if peer
236 * sends good full-sized frames.
237 */
238 len = skb_shinfo(skb)->gso_size ? : skb->len;
239 if (len >= icsk->icsk_ack.rcv_mss) {
240 /* Note: divides are still a bit expensive.
241 * For the moment, only adjust scaling_ratio
242 * when we update icsk_ack.rcv_mss.
243 */
244 if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
245 u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
246 u8 old_ratio = tcp_sk(sk)->scaling_ratio;
247
248 do_div(val, skb->truesize);
249 tcp_sk(sk)->scaling_ratio = val ? val : 1;
250
251 if (old_ratio != tcp_sk(sk)->scaling_ratio) {
252 struct tcp_sock *tp = tcp_sk(sk);
253
254 val = tcp_win_from_space(sk, sk->sk_rcvbuf);
255 tcp_set_window_clamp(sk, val);
256
257 if (tp->window_clamp < tp->rcvq_space.space)
258 tp->rcvq_space.space = tp->window_clamp;
259 }
260 }
261 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
262 tcp_sk(sk)->advmss);
263 /* Account for possibly-removed options */
264 DO_ONCE_LITE_IF(len > icsk->icsk_ack.rcv_mss + MAX_TCP_OPTION_SPACE,
265 tcp_gro_dev_warn, sk, skb, len);
266 /* If the skb has a len of exactly 1*MSS and has the PSH bit
267 * set then it is likely the end of an application write. So
268 * more data may not be arriving soon, and yet the data sender
269 * may be waiting for an ACK if cwnd-bound or using TX zero
270 * copy. So we set ICSK_ACK_PUSHED here so that
271 * tcp_cleanup_rbuf() will send an ACK immediately if the app
272 * reads all of the data and is not ping-pong. If len > MSS
273 * then this logic does not matter (and does not hurt) because
274 * tcp_cleanup_rbuf() will always ACK immediately if the app
275 * reads data and there is more than an MSS of unACKed data.
276 */
277 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
278 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
279 } else {
280 /* Otherwise, we make more careful check taking into account,
281 * that SACKs block is variable.
282 *
283 * "len" is invariant segment length, including TCP header.
284 */
285 len += skb->data - skb_transport_header(skb);
286 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
287 /* If PSH is not set, packet should be
288 * full sized, provided peer TCP is not badly broken.
289 * This observation (if it is correct 8)) allows
290 * to handle super-low mtu links fairly.
291 */
292 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
293 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
294 /* Subtract also invariant (if peer is RFC compliant),
295 * tcp header plus fixed timestamp option length.
296 * Resulting "len" is MSS free of SACK jitter.
297 */
298 len -= tcp_sk(sk)->tcp_header_len;
299 icsk->icsk_ack.last_seg_size = len;
300 if (len == lss) {
301 icsk->icsk_ack.rcv_mss = len;
302 return;
303 }
304 }
305 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
306 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
307 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
308 }
309 }
310
tcp_incr_quickack(struct sock * sk,unsigned int max_quickacks)311 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
312 {
313 struct inet_connection_sock *icsk = inet_csk(sk);
314 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
315
316 if (quickacks == 0)
317 quickacks = 2;
318 quickacks = min(quickacks, max_quickacks);
319 if (quickacks > icsk->icsk_ack.quick)
320 icsk->icsk_ack.quick = quickacks;
321 }
322
tcp_enter_quickack_mode(struct sock * sk,unsigned int max_quickacks)323 static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
324 {
325 struct inet_connection_sock *icsk = inet_csk(sk);
326
327 tcp_incr_quickack(sk, max_quickacks);
328 inet_csk_exit_pingpong_mode(sk);
329 icsk->icsk_ack.ato = TCP_ATO_MIN;
330 }
331
332 /* Send ACKs quickly, if "quick" count is not exhausted
333 * and the session is not interactive.
334 */
335
tcp_in_quickack_mode(struct sock * sk)336 static bool tcp_in_quickack_mode(struct sock *sk)
337 {
338 const struct inet_connection_sock *icsk = inet_csk(sk);
339
340 return icsk->icsk_ack.dst_quick_ack ||
341 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
342 }
343
tcp_data_ecn_check(struct sock * sk,const struct sk_buff * skb)344 static void tcp_data_ecn_check(struct sock *sk, const struct sk_buff *skb)
345 {
346 struct tcp_sock *tp = tcp_sk(sk);
347
348 if (tcp_ecn_disabled(tp))
349 return;
350
351 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
352 case INET_ECN_NOT_ECT:
353 /* Funny extension: if ECT is not set on a segment,
354 * and we already seen ECT on a previous segment,
355 * it is probably a retransmit.
356 */
357 if (tp->ecn_flags & TCP_ECN_SEEN)
358 tcp_enter_quickack_mode(sk, 2);
359 break;
360 case INET_ECN_CE:
361 if (tcp_ca_needs_ecn(sk))
362 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
363
364 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR) &&
365 tcp_ecn_mode_rfc3168(tp)) {
366 /* Better not delay acks, sender can have a very low cwnd */
367 tcp_enter_quickack_mode(sk, 2);
368 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
369 }
370 /* As for RFC3168 ECN, the TCP_ECN_SEEN flag is set by
371 * tcp_data_ecn_check() when the ECN codepoint of
372 * received TCP data contains ECT(0), ECT(1), or CE.
373 */
374 if (!tcp_ecn_mode_rfc3168(tp))
375 break;
376 tp->ecn_flags |= TCP_ECN_SEEN;
377 break;
378 default:
379 if (tcp_ca_needs_ecn(sk))
380 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
381 if (!tcp_ecn_mode_rfc3168(tp))
382 break;
383 tp->ecn_flags |= TCP_ECN_SEEN;
384 break;
385 }
386 }
387
388 /* Returns true if the byte counters can be used */
tcp_accecn_process_option(struct tcp_sock * tp,const struct sk_buff * skb,u32 delivered_bytes,int flag)389 static bool tcp_accecn_process_option(struct tcp_sock *tp,
390 const struct sk_buff *skb,
391 u32 delivered_bytes, int flag)
392 {
393 u8 estimate_ecnfield = tp->est_ecnfield;
394 bool ambiguous_ecn_bytes_incr = false;
395 bool first_changed = false;
396 unsigned int optlen;
397 bool order1, res;
398 unsigned int i;
399 u8 *ptr;
400
401 if (tcp_accecn_opt_fail_recv(tp))
402 return false;
403
404 if (!(flag & FLAG_SLOWPATH) || !tp->rx_opt.accecn) {
405 if (!tp->saw_accecn_opt) {
406 /* Too late to enable after this point due to
407 * potential counter wraps
408 */
409 if (tp->bytes_sent >= (1 << 23) - 1) {
410 u8 saw_opt = TCP_ACCECN_OPT_FAIL_SEEN;
411
412 tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
413 }
414 return false;
415 }
416
417 if (estimate_ecnfield) {
418 u8 ecnfield = estimate_ecnfield - 1;
419
420 tp->delivered_ecn_bytes[ecnfield] += delivered_bytes;
421 return true;
422 }
423 return false;
424 }
425
426 ptr = skb_transport_header(skb) + tp->rx_opt.accecn;
427 optlen = ptr[1] - 2;
428 if (WARN_ON_ONCE(ptr[0] != TCPOPT_ACCECN0 && ptr[0] != TCPOPT_ACCECN1))
429 return false;
430 order1 = (ptr[0] == TCPOPT_ACCECN1);
431 ptr += 2;
432
433 if (tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
434 tp->saw_accecn_opt = tcp_accecn_option_init(skb,
435 tp->rx_opt.accecn);
436 if (tp->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN)
437 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_RECV);
438 }
439
440 res = !!estimate_ecnfield;
441 for (i = 0; i < 3; i++) {
442 u32 init_offset;
443 u8 ecnfield;
444 s32 delta;
445 u32 *cnt;
446
447 if (optlen < TCPOLEN_ACCECN_PERFIELD)
448 break;
449
450 ecnfield = tcp_accecn_optfield_to_ecnfield(i, order1);
451 init_offset = tcp_accecn_field_init_offset(ecnfield);
452 cnt = &tp->delivered_ecn_bytes[ecnfield - 1];
453 delta = tcp_update_ecn_bytes(cnt, ptr, init_offset);
454 if (delta && delta < 0) {
455 res = false;
456 ambiguous_ecn_bytes_incr = true;
457 }
458 if (delta && ecnfield != estimate_ecnfield) {
459 if (!first_changed) {
460 tp->est_ecnfield = ecnfield;
461 first_changed = true;
462 } else {
463 res = false;
464 ambiguous_ecn_bytes_incr = true;
465 }
466 }
467
468 optlen -= TCPOLEN_ACCECN_PERFIELD;
469 ptr += TCPOLEN_ACCECN_PERFIELD;
470 }
471 if (ambiguous_ecn_bytes_incr)
472 tp->est_ecnfield = 0;
473
474 return res;
475 }
476
tcp_count_delivered_ce(struct tcp_sock * tp,u32 ecn_count)477 static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
478 {
479 WRITE_ONCE(tp->delivered_ce, tp->delivered_ce + ecn_count);
480 }
481
482 /* Updates the delivered and delivered_ce counts */
tcp_count_delivered(struct tcp_sock * tp,u32 delivered,bool ece_ack)483 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
484 bool ece_ack)
485 {
486 WRITE_ONCE(tp->delivered, tp->delivered + delivered);
487 if (tcp_ecn_mode_rfc3168(tp) && ece_ack)
488 tcp_count_delivered_ce(tp, delivered);
489 }
490
491 #define PKTS_ACKED_WEIGHT 6
492 #define PKTS_ACKED_PREC 6
493 #define ACK_COMP_THRESH 4
494
495 /* Returns the ECN CE delta */
__tcp_accecn_process(struct sock * sk,const struct sk_buff * skb,u32 delivered_pkts,u32 delivered_bytes,int flag)496 static u32 __tcp_accecn_process(struct sock *sk, const struct sk_buff *skb,
497 u32 delivered_pkts, u32 delivered_bytes,
498 int flag)
499 {
500 u32 old_ceb = tcp_sk(sk)->delivered_ecn_bytes[INET_ECN_CE - 1];
501 const struct tcphdr *th = tcp_hdr(skb);
502 struct tcp_sock *tp = tcp_sk(sk);
503 u32 delta, safe_delta, d_ceb;
504 bool opt_deltas_valid;
505 u32 corrected_ace;
506 u32 ewma;
507
508 /* Reordered ACK or uncertain due to lack of data to send and ts */
509 if (!(flag & (FLAG_FORWARD_PROGRESS | FLAG_TS_PROGRESS)))
510 return 0;
511
512 opt_deltas_valid = tcp_accecn_process_option(tp, skb,
513 delivered_bytes, flag);
514
515 if (delivered_pkts) {
516 if (!tp->pkts_acked_ewma) {
517 ewma = delivered_pkts << PKTS_ACKED_PREC;
518 } else {
519 ewma = tp->pkts_acked_ewma;
520 ewma = (((ewma << PKTS_ACKED_WEIGHT) - ewma) +
521 (delivered_pkts << PKTS_ACKED_PREC)) >>
522 PKTS_ACKED_WEIGHT;
523 }
524 tp->pkts_acked_ewma = min_t(u32, ewma, 0xFFFFU);
525 }
526
527 if (!(flag & FLAG_SLOWPATH)) {
528 /* AccECN counter might overflow on large ACKs */
529 if (delivered_pkts <= TCP_ACCECN_CEP_ACE_MASK)
530 return 0;
531 }
532
533 /* ACE field is not available during handshake */
534 if (flag & FLAG_SYN_ACKED)
535 return 0;
536
537 if (tp->received_ce_pending >= TCP_ACCECN_ACE_MAX_DELTA)
538 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
539
540 corrected_ace = tcp_accecn_ace(th) - TCP_ACCECN_CEP_INIT_OFFSET;
541 delta = (corrected_ace - tp->delivered_ce) & TCP_ACCECN_CEP_ACE_MASK;
542 if (delivered_pkts <= TCP_ACCECN_CEP_ACE_MASK)
543 return delta;
544
545 safe_delta = delivered_pkts -
546 ((delivered_pkts - delta) & TCP_ACCECN_CEP_ACE_MASK);
547
548 if (opt_deltas_valid) {
549 d_ceb = tp->delivered_ecn_bytes[INET_ECN_CE - 1] - old_ceb;
550 if (!d_ceb)
551 return delta;
552
553 if ((delivered_pkts >= (TCP_ACCECN_CEP_ACE_MASK + 1) * 2) &&
554 (tcp_is_sack(tp) ||
555 ((1 << inet_csk(sk)->icsk_ca_state) &
556 (TCPF_CA_Open | TCPF_CA_CWR)))) {
557 u32 est_d_cep;
558
559 if (delivered_bytes <= d_ceb)
560 return safe_delta;
561
562 est_d_cep = DIV_ROUND_UP_ULL((u64)d_ceb *
563 delivered_pkts,
564 delivered_bytes);
565 return min(safe_delta,
566 delta +
567 (est_d_cep & ~TCP_ACCECN_CEP_ACE_MASK));
568 }
569
570 if (d_ceb > delta * tp->mss_cache)
571 return safe_delta;
572 if (d_ceb <
573 safe_delta * tp->mss_cache >> TCP_ACCECN_SAFETY_SHIFT)
574 return delta;
575 } else if (tp->pkts_acked_ewma > (ACK_COMP_THRESH << PKTS_ACKED_PREC))
576 return delta;
577
578 return safe_delta;
579 }
580
tcp_accecn_process(struct sock * sk,const struct sk_buff * skb,u32 delivered_pkts,u32 delivered_bytes,int * flag)581 static u32 tcp_accecn_process(struct sock *sk, const struct sk_buff *skb,
582 u32 delivered_pkts, u32 delivered_bytes,
583 int *flag)
584 {
585 struct tcp_sock *tp = tcp_sk(sk);
586 u32 delta;
587
588 delta = __tcp_accecn_process(sk, skb, delivered_pkts,
589 delivered_bytes, *flag);
590 if (delta > 0) {
591 tcp_count_delivered_ce(tp, delta);
592 *flag |= FLAG_ECE;
593 /* Recalculate header predictor */
594 if (tp->pred_flags)
595 tcp_fast_path_on(tp);
596 }
597 return delta;
598 }
599
600 /* Buffer size and advertised window tuning.
601 *
602 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
603 */
604
tcp_sndbuf_expand(struct sock * sk)605 static void tcp_sndbuf_expand(struct sock *sk)
606 {
607 const struct tcp_sock *tp = tcp_sk(sk);
608 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
609 int sndmem, per_mss;
610 u32 nr_segs;
611
612 /* Worst case is non GSO/TSO : each frame consumes one skb
613 * and skb->head is kmalloced using power of two area of memory
614 */
615 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
616 MAX_TCP_HEADER +
617 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
618
619 per_mss = roundup_pow_of_two(per_mss) +
620 SKB_DATA_ALIGN(sizeof(struct sk_buff));
621
622 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
623 nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
624
625 /* Fast Recovery (RFC 5681 3.2) :
626 * Cubic needs 1.7 factor, rounded to 2 to include
627 * extra cushion (application might react slowly to EPOLLOUT)
628 */
629 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
630 sndmem *= nr_segs * per_mss;
631
632 if (sk->sk_sndbuf < sndmem)
633 WRITE_ONCE(sk->sk_sndbuf,
634 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
635 }
636
637 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
638 *
639 * All tcp_full_space() is split to two parts: "network" buffer, allocated
640 * forward and advertised in receiver window (tp->rcv_wnd) and
641 * "application buffer", required to isolate scheduling/application
642 * latencies from network.
643 * window_clamp is maximal advertised window. It can be less than
644 * tcp_full_space(), in this case tcp_full_space() - window_clamp
645 * is reserved for "application" buffer. The less window_clamp is
646 * the smoother our behaviour from viewpoint of network, but the lower
647 * throughput and the higher sensitivity of the connection to losses. 8)
648 *
649 * rcv_ssthresh is more strict window_clamp used at "slow start"
650 * phase to predict further behaviour of this connection.
651 * It is used for two goals:
652 * - to enforce header prediction at sender, even when application
653 * requires some significant "application buffer". It is check #1.
654 * - to prevent pruning of receive queue because of misprediction
655 * of receiver window. Check #2.
656 *
657 * The scheme does not work when sender sends good segments opening
658 * window and then starts to feed us spaghetti. But it should work
659 * in common situations. Otherwise, we have to rely on queue collapsing.
660 */
661
662 /* Slow part of check#2. */
__tcp_grow_window(const struct sock * sk,const struct sk_buff * skb,unsigned int skbtruesize)663 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
664 unsigned int skbtruesize)
665 {
666 const struct tcp_sock *tp = tcp_sk(sk);
667 /* Optimize this! */
668 int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
669 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
670
671 while (tp->rcv_ssthresh <= window) {
672 if (truesize <= skb->len)
673 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
674
675 truesize >>= 1;
676 window >>= 1;
677 }
678 return 0;
679 }
680
681 /* Even if skb appears to have a bad len/truesize ratio, TCP coalescing
682 * can play nice with us, as sk_buff and skb->head might be either
683 * freed or shared with up to MAX_SKB_FRAGS segments.
684 * Only give a boost to drivers using page frag(s) to hold the frame(s),
685 * and if no payload was pulled in skb->head before reaching us.
686 */
truesize_adjust(bool adjust,const struct sk_buff * skb)687 static u32 truesize_adjust(bool adjust, const struct sk_buff *skb)
688 {
689 u32 truesize = skb->truesize;
690
691 if (adjust && !skb_headlen(skb)) {
692 truesize -= SKB_TRUESIZE(skb_end_offset(skb));
693 /* paranoid check, some drivers might be buggy */
694 if (unlikely((int)truesize < (int)skb->len))
695 truesize = skb->truesize;
696 }
697 return truesize;
698 }
699
tcp_grow_window(struct sock * sk,const struct sk_buff * skb,bool adjust)700 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
701 bool adjust)
702 {
703 struct tcp_sock *tp = tcp_sk(sk);
704 int room;
705
706 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
707
708 if (room <= 0)
709 return;
710
711 /* Check #1 */
712 if (!tcp_under_memory_pressure(sk)) {
713 unsigned int truesize = truesize_adjust(adjust, skb);
714 int incr;
715
716 /* Check #2. Increase window, if skb with such overhead
717 * will fit to rcvbuf in future.
718 */
719 if (tcp_win_from_space(sk, truesize) <= skb->len)
720 incr = 2 * tp->advmss;
721 else
722 incr = __tcp_grow_window(sk, skb, truesize);
723
724 if (incr) {
725 incr = max_t(int, incr, 2 * skb->len);
726 tp->rcv_ssthresh += min(room, incr);
727 inet_csk(sk)->icsk_ack.quick |= 1;
728 }
729 } else {
730 /* Under pressure:
731 * Adjust rcv_ssthresh according to reserved mem
732 */
733 tcp_adjust_rcv_ssthresh(sk);
734 }
735 }
736
737 /* 3. Try to fixup all. It is made immediately after connection enters
738 * established state.
739 */
tcp_init_buffer_space(struct sock * sk)740 static void tcp_init_buffer_space(struct sock *sk)
741 {
742 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
743 struct tcp_sock *tp = tcp_sk(sk);
744 int maxwin;
745
746 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
747 tcp_sndbuf_expand(sk);
748
749 tcp_mstamp_refresh(tp);
750 tp->rcvq_space.time = tp->tcp_mstamp;
751 tp->rcvq_space.seq = tp->copied_seq;
752
753 maxwin = tcp_full_space(sk);
754
755 if (tp->window_clamp >= maxwin) {
756 WRITE_ONCE(tp->window_clamp, maxwin);
757
758 if (tcp_app_win && maxwin > 4 * tp->advmss)
759 WRITE_ONCE(tp->window_clamp,
760 max(maxwin - (maxwin >> tcp_app_win),
761 4 * tp->advmss));
762 }
763
764 /* Force reservation of one segment. */
765 if (tcp_app_win &&
766 tp->window_clamp > 2 * tp->advmss &&
767 tp->window_clamp + tp->advmss > maxwin)
768 WRITE_ONCE(tp->window_clamp,
769 max(2 * tp->advmss, maxwin - tp->advmss));
770
771 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
772 tp->snd_cwnd_stamp = tcp_jiffies32;
773 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
774 (u32)TCP_INIT_CWND * tp->advmss);
775 }
776
777 /* 4. Recalculate window clamp after socket hit its memory bounds. */
tcp_clamp_window(struct sock * sk)778 static void tcp_clamp_window(struct sock *sk)
779 {
780 struct tcp_sock *tp = tcp_sk(sk);
781 struct inet_connection_sock *icsk = inet_csk(sk);
782 struct net *net = sock_net(sk);
783 int rmem2;
784
785 icsk->icsk_ack.quick = 0;
786 rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
787
788 if (sk->sk_rcvbuf < rmem2 &&
789 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
790 !tcp_under_memory_pressure(sk) &&
791 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
792 WRITE_ONCE(sk->sk_rcvbuf,
793 min(atomic_read(&sk->sk_rmem_alloc), rmem2));
794 }
795 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
796 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
797 }
798
799 /* Initialize RCV_MSS value.
800 * RCV_MSS is an our guess about MSS used by the peer.
801 * We haven't any direct information about the MSS.
802 * It's better to underestimate the RCV_MSS rather than overestimate.
803 * Overestimations make us ACKing less frequently than needed.
804 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
805 */
tcp_initialize_rcv_mss(struct sock * sk)806 void tcp_initialize_rcv_mss(struct sock *sk)
807 {
808 const struct tcp_sock *tp = tcp_sk(sk);
809 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
810
811 hint = min(hint, tp->rcv_wnd / 2);
812 hint = min(hint, TCP_MSS_DEFAULT);
813 hint = max(hint, TCP_MIN_MSS);
814
815 inet_csk(sk)->icsk_ack.rcv_mss = hint;
816 }
817
818 /* Receiver "autotuning" code.
819 *
820 * The algorithm for RTT estimation w/o timestamps is based on
821 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
822 * <https://public.lanl.gov/radiant/pubs.html#DRS>
823 *
824 * More detail on this code can be found at
825 * <http://staff.psc.edu/jheffner/>,
826 * though this reference is out of date. A new paper
827 * is pending.
828 */
tcp_rcv_rtt_update(struct tcp_sock * tp,u32 sample,int win_dep)829 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
830 {
831 u32 new_sample, old_sample = tp->rcv_rtt_est.rtt_us;
832 long m = sample << 3;
833
834 if (old_sample == 0 || m < old_sample) {
835 new_sample = m;
836 } else {
837 /* If we sample in larger samples in the non-timestamp
838 * case, we could grossly overestimate the RTT especially
839 * with chatty applications or bulk transfer apps which
840 * are stalled on filesystem I/O.
841 *
842 * Also, since we are only going for a minimum in the
843 * non-timestamp case, we do not smooth things out
844 * else with timestamps disabled convergence takes too
845 * long.
846 */
847 if (win_dep)
848 return;
849 /* Do not use this sample if receive queue is not empty. */
850 if (tp->rcv_nxt != tp->copied_seq)
851 return;
852 new_sample = old_sample - (old_sample >> 3) + sample;
853 }
854
855 tp->rcv_rtt_est.rtt_us = new_sample;
856 }
857
tcp_rcv_rtt_measure(struct tcp_sock * tp)858 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
859 {
860 u32 delta_us;
861
862 if (tp->rcv_rtt_est.time == 0)
863 goto new_measure;
864 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
865 return;
866 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
867 if (!delta_us)
868 delta_us = 1;
869 tcp_rcv_rtt_update(tp, delta_us, 1);
870
871 new_measure:
872 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
873 tp->rcv_rtt_est.time = tp->tcp_mstamp;
874 }
875
tcp_rtt_tsopt_us(const struct tcp_sock * tp,u32 min_delta)876 static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp, u32 min_delta)
877 {
878 u32 delta, delta_us;
879
880 delta = tcp_time_stamp_ts(tp) - tp->rx_opt.rcv_tsecr;
881 if (tp->tcp_usec_ts)
882 return delta;
883
884 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
885 if (!delta)
886 delta = min_delta;
887 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
888 return delta_us;
889 }
890 return -1;
891 }
892
tcp_rcv_rtt_measure_ts(struct sock * sk,const struct sk_buff * skb)893 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
894 const struct sk_buff *skb)
895 {
896 struct tcp_sock *tp = tcp_sk(sk);
897
898 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
899 return;
900 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
901
902 if (TCP_SKB_CB(skb)->end_seq -
903 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
904 s32 delta = tcp_rtt_tsopt_us(tp, 0);
905
906 if (delta > 0)
907 tcp_rcv_rtt_update(tp, delta, 0);
908 }
909 }
910
tcp_rcvbuf_grow(struct sock * sk,u32 newval)911 void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
912 {
913 const struct net *net = sock_net(sk);
914 struct tcp_sock *tp = tcp_sk(sk);
915 u32 rcvwin, rcvbuf, cap, oldval;
916 u32 rtt_threshold, rtt_us;
917 u64 grow;
918
919 oldval = tp->rcvq_space.space;
920 tp->rcvq_space.space = newval;
921
922 if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
923 (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
924 return;
925
926 /* DRS is always one RTT late. */
927 rcvwin = newval << 1;
928
929 rtt_us = tp->rcv_rtt_est.rtt_us >> 3;
930 rtt_threshold = READ_ONCE(net->ipv4.sysctl_tcp_rcvbuf_low_rtt);
931 if (rtt_us < rtt_threshold) {
932 /* For small RTT, we set @grow to rcvwin * rtt_us/rtt_threshold.
933 * It might take few additional ms to reach 'line rate',
934 * but will avoid sk_rcvbuf inflation and poor cache use.
935 */
936 grow = div_u64((u64)rcvwin * rtt_us, rtt_threshold);
937 } else {
938 /* slow start: allow the sender to double its rate. */
939 grow = div_u64(((u64)rcvwin << 1) * (newval - oldval), oldval);
940 }
941 rcvwin += grow;
942
943 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
944 rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
945
946 cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
947
948 rcvbuf = min_t(u32, tcp_space_from_win(sk, rcvwin), cap);
949 if (rcvbuf > sk->sk_rcvbuf) {
950 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
951 /* Make the window clamp follow along. */
952 WRITE_ONCE(tp->window_clamp,
953 tcp_win_from_space(sk, rcvbuf));
954 }
955 }
956 /*
957 * This function should be called every time data is copied to user space.
958 * It calculates the appropriate TCP receive buffer space.
959 */
tcp_rcv_space_adjust(struct sock * sk)960 void tcp_rcv_space_adjust(struct sock *sk)
961 {
962 struct tcp_sock *tp = tcp_sk(sk);
963 int time, inq, copied;
964
965 trace_tcp_rcv_space_adjust(sk);
966
967 if (unlikely(!tp->rcv_rtt_est.rtt_us))
968 return;
969
970 /* We do not refresh tp->tcp_mstamp here.
971 * Some platforms have expensive ktime_get() implementations.
972 * Using the last cached value is enough for DRS.
973 */
974 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
975 if (time < (tp->rcv_rtt_est.rtt_us >> 3))
976 return;
977
978 /* Number of bytes copied to user in last RTT */
979 copied = tp->copied_seq - tp->rcvq_space.seq;
980 /* Number of bytes in receive queue. */
981 inq = tp->rcv_nxt - tp->copied_seq;
982 copied -= inq;
983 if (copied <= tp->rcvq_space.space)
984 goto new_measure;
985
986 trace_tcp_rcvbuf_grow(sk, time);
987
988 tcp_rcvbuf_grow(sk, copied);
989
990 new_measure:
991 tp->rcvq_space.seq = tp->copied_seq;
992 tp->rcvq_space.time = tp->tcp_mstamp;
993 }
994
tcp_save_lrcv_flowlabel(struct sock * sk,const struct sk_buff * skb)995 static void tcp_save_lrcv_flowlabel(struct sock *sk, const struct sk_buff *skb)
996 {
997 #if IS_ENABLED(CONFIG_IPV6)
998 struct inet_connection_sock *icsk = inet_csk(sk);
999
1000 if (skb->protocol == htons(ETH_P_IPV6))
1001 icsk->icsk_ack.lrcv_flowlabel = ntohl(ip6_flowlabel(ipv6_hdr(skb)));
1002 #endif
1003 }
1004
1005 /* There is something which you must keep in mind when you analyze the
1006 * behavior of the tp->ato delayed ack timeout interval. When a
1007 * connection starts up, we want to ack as quickly as possible. The
1008 * problem is that "good" TCP's do slow start at the beginning of data
1009 * transmission. The means that until we send the first few ACK's the
1010 * sender will sit on his end and only queue most of his data, because
1011 * he can only send snd_cwnd unacked packets at any given time. For
1012 * each ACK we send, he increments snd_cwnd and transmits more of his
1013 * queue. -DaveM
1014 */
tcp_event_data_recv(struct sock * sk,struct sk_buff * skb)1015 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
1016 {
1017 struct tcp_sock *tp = tcp_sk(sk);
1018 struct inet_connection_sock *icsk = inet_csk(sk);
1019 u32 now;
1020
1021 inet_csk_schedule_ack(sk);
1022
1023 tcp_measure_rcv_mss(sk, skb);
1024
1025 tcp_rcv_rtt_measure(tp);
1026
1027 now = tcp_jiffies32;
1028
1029 if (!icsk->icsk_ack.ato) {
1030 /* The _first_ data packet received, initialize
1031 * delayed ACK engine.
1032 */
1033 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
1034 icsk->icsk_ack.ato = TCP_ATO_MIN;
1035 } else {
1036 int m = now - icsk->icsk_ack.lrcvtime;
1037
1038 if (m <= TCP_ATO_MIN / 2) {
1039 /* The fastest case is the first. */
1040 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
1041 } else if (m < icsk->icsk_ack.ato) {
1042 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
1043 if (icsk->icsk_ack.ato > icsk->icsk_rto)
1044 icsk->icsk_ack.ato = icsk->icsk_rto;
1045 } else if (m > icsk->icsk_rto) {
1046 /* Too long gap. Apparently sender failed to
1047 * restart window, so that we send ACKs quickly.
1048 */
1049 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
1050 }
1051 }
1052 icsk->icsk_ack.lrcvtime = now;
1053 tcp_save_lrcv_flowlabel(sk, skb);
1054
1055 tcp_data_ecn_check(sk, skb);
1056
1057 if (skb->len >= 128)
1058 tcp_grow_window(sk, skb, true);
1059 }
1060
1061 /* Called to compute a smoothed rtt estimate. The data fed to this
1062 * routine either comes from timestamps, or from segments that were
1063 * known _not_ to have been retransmitted [see Karn/Partridge
1064 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
1065 * piece by Van Jacobson.
1066 * NOTE: the next three routines used to be one big routine.
1067 * To save cycles in the RFC 1323 implementation it was better to break
1068 * it up into three procedures. -- erics
1069 */
tcp_rtt_estimator(struct sock * sk,long mrtt_us)1070 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
1071 {
1072 struct tcp_sock *tp = tcp_sk(sk);
1073 long m = mrtt_us; /* RTT */
1074 u32 srtt = tp->srtt_us;
1075
1076 /* The following amusing code comes from Jacobson's
1077 * article in SIGCOMM '88. Note that rtt and mdev
1078 * are scaled versions of rtt and mean deviation.
1079 * This is designed to be as fast as possible
1080 * m stands for "measurement".
1081 *
1082 * On a 1990 paper the rto value is changed to:
1083 * RTO = rtt + 4 * mdev
1084 *
1085 * Funny. This algorithm seems to be very broken.
1086 * These formulae increase RTO, when it should be decreased, increase
1087 * too slowly, when it should be increased quickly, decrease too quickly
1088 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
1089 * does not matter how to _calculate_ it. Seems, it was trap
1090 * that VJ failed to avoid. 8)
1091 */
1092 if (srtt != 0) {
1093 m -= (srtt >> 3); /* m is now error in rtt est */
1094 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
1095 if (m < 0) {
1096 m = -m; /* m is now abs(error) */
1097 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1098 /* This is similar to one of Eifel findings.
1099 * Eifel blocks mdev updates when rtt decreases.
1100 * This solution is a bit different: we use finer gain
1101 * for mdev in this case (alpha*beta).
1102 * Like Eifel it also prevents growth of rto,
1103 * but also it limits too fast rto decreases,
1104 * happening in pure Eifel.
1105 */
1106 if (m > 0)
1107 m >>= 3;
1108 } else {
1109 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1110 }
1111 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
1112 if (tp->mdev_us > tp->mdev_max_us) {
1113 tp->mdev_max_us = tp->mdev_us;
1114 if (tp->mdev_max_us > tp->rttvar_us)
1115 tp->rttvar_us = tp->mdev_max_us;
1116 }
1117 if (after(tp->snd_una, tp->rtt_seq)) {
1118 if (tp->mdev_max_us < tp->rttvar_us)
1119 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
1120 tp->rtt_seq = tp->snd_nxt;
1121 tp->mdev_max_us = tcp_rto_min_us(sk);
1122
1123 tcp_bpf_rtt(sk, mrtt_us, srtt);
1124 }
1125 } else {
1126 /* no previous measure. */
1127 srtt = m << 3; /* take the measured time to be rtt */
1128 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
1129 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
1130 tp->mdev_max_us = tp->rttvar_us;
1131 tp->rtt_seq = tp->snd_nxt;
1132
1133 tcp_bpf_rtt(sk, mrtt_us, srtt);
1134 }
1135 WRITE_ONCE(tp->srtt_us, max(1U, srtt));
1136 }
1137
tcp_update_pacing_rate(struct sock * sk)1138 void tcp_update_pacing_rate(struct sock *sk)
1139 {
1140 const struct tcp_sock *tp = tcp_sk(sk);
1141 u64 rate;
1142
1143 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
1144 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
1145
1146 /* current rate is (cwnd * mss) / srtt
1147 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
1148 * In Congestion Avoidance phase, set it to 120 % the current rate.
1149 *
1150 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
1151 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
1152 * end of slow start and should slow down.
1153 */
1154 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
1155 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
1156 else
1157 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
1158
1159 rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
1160
1161 if (likely(tp->srtt_us))
1162 do_div(rate, tp->srtt_us);
1163
1164 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
1165 * without any lock. We want to make sure compiler wont store
1166 * intermediate values in this location.
1167 */
1168 WRITE_ONCE(sk->sk_pacing_rate,
1169 min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate)));
1170 }
1171
1172 /* Calculate rto without backoff. This is the second half of Van Jacobson's
1173 * routine referred to above.
1174 */
tcp_set_rto(struct sock * sk)1175 void tcp_set_rto(struct sock *sk)
1176 {
1177 const struct tcp_sock *tp = tcp_sk(sk);
1178 /* Old crap is replaced with new one. 8)
1179 *
1180 * More seriously:
1181 * 1. If rtt variance happened to be less 50msec, it is hallucination.
1182 * It cannot be less due to utterly erratic ACK generation made
1183 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
1184 * to do with delayed acks, because at cwnd>2 true delack timeout
1185 * is invisible. Actually, Linux-2.4 also generates erratic
1186 * ACKs in some circumstances.
1187 */
1188 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
1189
1190 /* 2. Fixups made earlier cannot be right.
1191 * If we do not estimate RTO correctly without them,
1192 * all the algo is pure shit and should be replaced
1193 * with correct one. It is exactly, which we pretend to do.
1194 */
1195
1196 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
1197 * guarantees that rto is higher.
1198 */
1199 tcp_bound_rto(sk);
1200 }
1201
tcp_init_cwnd(const struct tcp_sock * tp,const struct dst_entry * dst)1202 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
1203 {
1204 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
1205
1206 if (!cwnd)
1207 cwnd = TCP_INIT_CWND;
1208 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
1209 }
1210
1211 struct tcp_sacktag_state {
1212 /* Timestamps for earliest and latest never-retransmitted segment
1213 * that was SACKed. RTO needs the earliest RTT to stay conservative,
1214 * but congestion control should still get an accurate delay signal.
1215 */
1216 u64 first_sackt;
1217 u64 last_sackt;
1218 u32 reord;
1219 u32 sack_delivered;
1220 u32 delivered_bytes;
1221 int flag;
1222 unsigned int mss_now;
1223 struct rate_sample *rate;
1224 };
1225
1226 /* Take a notice that peer is sending D-SACKs. Skip update of data delivery
1227 * and spurious retransmission information if this DSACK is unlikely caused by
1228 * sender's action:
1229 * - DSACKed sequence range is larger than maximum receiver's window.
1230 * - Total no. of DSACKed segments exceed the total no. of retransmitted segs.
1231 */
tcp_dsack_seen(struct tcp_sock * tp,u32 start_seq,u32 end_seq,struct tcp_sacktag_state * state)1232 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq,
1233 u32 end_seq, struct tcp_sacktag_state *state)
1234 {
1235 u32 seq_len, dup_segs = 1;
1236
1237 if (!before(start_seq, end_seq))
1238 return 0;
1239
1240 seq_len = end_seq - start_seq;
1241 /* Dubious DSACK: DSACKed range greater than maximum advertised rwnd */
1242 if (seq_len > tp->max_window)
1243 return 0;
1244 if (seq_len > tp->mss_cache)
1245 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache);
1246 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq)
1247 state->flag |= FLAG_DSACK_TLP;
1248
1249 WRITE_ONCE(tp->dsack_dups, tp->dsack_dups + dup_segs);
1250 /* Skip the DSACK if dup segs weren't retransmitted by sender */
1251 if (tp->dsack_dups > tp->total_retrans)
1252 return 0;
1253
1254 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
1255 /* We increase the RACK ordering window in rounds where we receive
1256 * DSACKs that may have been due to reordering causing RACK to trigger
1257 * a spurious fast recovery. Thus RACK ignores DSACKs that happen
1258 * without having seen reordering, or that match TLP probes (TLP
1259 * is timer-driven, not triggered by RACK).
1260 */
1261 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP))
1262 tp->rack.dsack_seen = 1;
1263
1264 state->flag |= FLAG_DSACKING_ACK;
1265 /* A spurious retransmission is delivered */
1266 state->sack_delivered += dup_segs;
1267
1268 return dup_segs;
1269 }
1270
1271 /* It's reordering when higher sequence was delivered (i.e. sacked) before
1272 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering
1273 * distance is approximated in full-mss packet distance ("reordering").
1274 */
tcp_check_sack_reordering(struct sock * sk,const u32 low_seq,const int ts)1275 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
1276 const int ts)
1277 {
1278 struct tcp_sock *tp = tcp_sk(sk);
1279 const u32 mss = tp->mss_cache;
1280 u32 fack, metric;
1281
1282 fack = tcp_highest_sack_seq(tp);
1283 if (!before(low_seq, fack))
1284 return;
1285
1286 metric = fack - low_seq;
1287 if ((metric > tp->reordering * mss) && mss) {
1288 #if FASTRETRANS_DEBUG > 1
1289 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
1290 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
1291 tp->reordering,
1292 0,
1293 tp->sacked_out,
1294 tp->undo_marker ? tp->undo_retrans : 0);
1295 #endif
1296 WRITE_ONCE(tp->reordering,
1297 min_t(u32, (metric + mss - 1) / mss,
1298 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)));
1299 }
1300
1301 /* This exciting event is worth to be remembered. 8) */
1302 WRITE_ONCE(tp->reord_seen, tp->reord_seen + 1);
1303 NET_INC_STATS(sock_net(sk),
1304 ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
1305 }
1306
1307 /* This must be called before lost_out or retrans_out are updated
1308 * on a new loss, because we want to know if all skbs previously
1309 * known to be lost have already been retransmitted, indicating
1310 * that this newly lost skb is our next skb to retransmit.
1311 */
tcp_verify_retransmit_hint(struct tcp_sock * tp,struct sk_buff * skb)1312 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
1313 {
1314 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
1315 (tp->retransmit_skb_hint &&
1316 before(TCP_SKB_CB(skb)->seq,
1317 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
1318 tp->retransmit_skb_hint = skb;
1319 }
1320
1321 /* Sum the number of packets on the wire we have marked as lost, and
1322 * notify the congestion control module that the given skb was marked lost.
1323 */
tcp_notify_skb_loss_event(struct tcp_sock * tp,const struct sk_buff * skb)1324 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
1325 {
1326 tp->lost += tcp_skb_pcount(skb);
1327 }
1328
tcp_mark_skb_lost(struct sock * sk,struct sk_buff * skb)1329 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
1330 {
1331 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1332 struct tcp_sock *tp = tcp_sk(sk);
1333
1334 if (sacked & TCPCB_SACKED_ACKED)
1335 return;
1336
1337 tcp_verify_retransmit_hint(tp, skb);
1338 if (sacked & TCPCB_LOST) {
1339 if (sacked & TCPCB_SACKED_RETRANS) {
1340 /* Account for retransmits that are lost again */
1341 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1342 tp->retrans_out -= tcp_skb_pcount(skb);
1343 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
1344 tcp_skb_pcount(skb));
1345 tcp_notify_skb_loss_event(tp, skb);
1346 }
1347 } else {
1348 tp->lost_out += tcp_skb_pcount(skb);
1349 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1350 tcp_notify_skb_loss_event(tp, skb);
1351 }
1352 }
1353
1354 /* This procedure tags the retransmission queue when SACKs arrive.
1355 *
1356 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
1357 * Packets in queue with these bits set are counted in variables
1358 * sacked_out, retrans_out and lost_out, correspondingly.
1359 *
1360 * Valid combinations are:
1361 * Tag InFlight Description
1362 * 0 1 - orig segment is in flight.
1363 * S 0 - nothing flies, orig reached receiver.
1364 * L 0 - nothing flies, orig lost by net.
1365 * R 2 - both orig and retransmit are in flight.
1366 * L|R 1 - orig is lost, retransmit is in flight.
1367 * S|R 1 - orig reached receiver, retrans is still in flight.
1368 * (L|S|R is logically valid, it could occur when L|R is sacked,
1369 * but it is equivalent to plain S and code short-circuits it to S.
1370 * L|S is logically invalid, it would mean -1 packet in flight 8))
1371 *
1372 * These 6 states form finite state machine, controlled by the following events:
1373 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1374 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1375 * 3. Loss detection event of two flavors:
1376 * A. Scoreboard estimator decided the packet is lost.
1377 * A'. Reno "three dupacks" marks head of queue lost.
1378 * B. SACK arrives sacking SND.NXT at the moment, when the
1379 * segment was retransmitted.
1380 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1381 *
1382 * It is pleasant to note, that state diagram turns out to be commutative,
1383 * so that we are allowed not to be bothered by order of our actions,
1384 * when multiple events arrive simultaneously. (see the function below).
1385 *
1386 * Reordering detection.
1387 * --------------------
1388 * Reordering metric is maximal distance, which a packet can be displaced
1389 * in packet stream. With SACKs we can estimate it:
1390 *
1391 * 1. SACK fills old hole and the corresponding segment was not
1392 * ever retransmitted -> reordering. Alas, we cannot use it
1393 * when segment was retransmitted.
1394 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1395 * for retransmitted and already SACKed segment -> reordering..
1396 * Both of these heuristics are not used in Loss state, when we cannot
1397 * account for retransmits accurately.
1398 *
1399 * SACK block validation.
1400 * ----------------------
1401 *
1402 * SACK block range validation checks that the received SACK block fits to
1403 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1404 * Note that SND.UNA is not included to the range though being valid because
1405 * it means that the receiver is rather inconsistent with itself reporting
1406 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1407 * perfectly valid, however, in light of RFC2018 which explicitly states
1408 * that "SACK block MUST reflect the newest segment. Even if the newest
1409 * segment is going to be discarded ...", not that it looks very clever
1410 * in case of head skb. Due to potentional receiver driven attacks, we
1411 * choose to avoid immediate execution of a walk in write queue due to
1412 * reneging and defer head skb's loss recovery to standard loss recovery
1413 * procedure that will eventually trigger (nothing forbids us doing this).
1414 *
1415 * Implements also blockage to start_seq wrap-around. Problem lies in the
1416 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1417 * there's no guarantee that it will be before snd_nxt (n). The problem
1418 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1419 * wrap (s_w):
1420 *
1421 * <- outs wnd -> <- wrapzone ->
1422 * u e n u_w e_w s n_w
1423 * | | | | | | |
1424 * |<------------+------+----- TCP seqno space --------------+---------->|
1425 * ...-- <2^31 ->| |<--------...
1426 * ...---- >2^31 ------>| |<--------...
1427 *
1428 * Current code wouldn't be vulnerable but it's better still to discard such
1429 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1430 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1431 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1432 * equal to the ideal case (infinite seqno space without wrap caused issues).
1433 *
1434 * With D-SACK the lower bound is extended to cover sequence space below
1435 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1436 * again, D-SACK block must not to go across snd_una (for the same reason as
1437 * for the normal SACK blocks, explained above). But there all simplicity
1438 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1439 * fully below undo_marker they do not affect behavior in anyway and can
1440 * therefore be safely ignored. In rare cases (which are more or less
1441 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1442 * fragmentation and packet reordering past skb's retransmission. To consider
1443 * them correctly, the acceptable range must be extended even more though
1444 * the exact amount is rather hard to quantify. However, tp->max_window can
1445 * be used as an exaggerated estimate.
1446 */
tcp_is_sackblock_valid(struct tcp_sock * tp,bool is_dsack,u32 start_seq,u32 end_seq)1447 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1448 u32 start_seq, u32 end_seq)
1449 {
1450 /* Too far in future, or reversed (interpretation is ambiguous) */
1451 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1452 return false;
1453
1454 /* Nasty start_seq wrap-around check (see comments above) */
1455 if (!before(start_seq, tp->snd_nxt))
1456 return false;
1457
1458 /* In outstanding window? ...This is valid exit for D-SACKs too.
1459 * start_seq == snd_una is non-sensical (see comments above)
1460 */
1461 if (after(start_seq, tp->snd_una))
1462 return true;
1463
1464 if (!is_dsack || !tp->undo_marker)
1465 return false;
1466
1467 /* ...Then it's D-SACK, and must reside below snd_una completely */
1468 if (after(end_seq, tp->snd_una))
1469 return false;
1470
1471 if (!before(start_seq, tp->undo_marker))
1472 return true;
1473
1474 /* Too old */
1475 if (!after(end_seq, tp->undo_marker))
1476 return false;
1477
1478 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1479 * start_seq < undo_marker and end_seq >= undo_marker.
1480 */
1481 return !before(start_seq, end_seq - tp->max_window);
1482 }
1483
tcp_check_dsack(struct sock * sk,const struct sk_buff * ack_skb,struct tcp_sack_block_wire * sp,int num_sacks,u32 prior_snd_una,struct tcp_sacktag_state * state)1484 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1485 struct tcp_sack_block_wire *sp, int num_sacks,
1486 u32 prior_snd_una, struct tcp_sacktag_state *state)
1487 {
1488 struct tcp_sock *tp = tcp_sk(sk);
1489 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1490 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1491 u32 dup_segs;
1492
1493 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1494 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1495 } else if (num_sacks > 1) {
1496 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1497 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1498
1499 if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1))
1500 return false;
1501 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV);
1502 } else {
1503 return false;
1504 }
1505
1506 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state);
1507 if (!dup_segs) { /* Skip dubious DSACK */
1508 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS);
1509 return false;
1510 }
1511
1512 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs);
1513
1514 /* D-SACK for already forgotten data... Do dumb counting. */
1515 if (tp->undo_marker && tp->undo_retrans > 0 &&
1516 !after(end_seq_0, prior_snd_una) &&
1517 after(end_seq_0, tp->undo_marker))
1518 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs);
1519
1520 return true;
1521 }
1522
1523 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
1524 * the incoming SACK may not exactly match but we can find smaller MSS
1525 * aligned portion of it that matches. Therefore we might need to fragment
1526 * which may fail and creates some hassle (caller must handle error case
1527 * returns).
1528 *
1529 * FIXME: this could be merged to shift decision code
1530 */
tcp_match_skb_to_sack(struct sock * sk,struct sk_buff * skb,u32 start_seq,u32 end_seq)1531 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1532 u32 start_seq, u32 end_seq)
1533 {
1534 int err;
1535 bool in_sack;
1536 unsigned int pkt_len;
1537 unsigned int mss;
1538
1539 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1540 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1541
1542 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1543 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1544 mss = tcp_skb_mss(skb);
1545 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1546
1547 if (!in_sack) {
1548 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1549 if (pkt_len < mss)
1550 pkt_len = mss;
1551 } else {
1552 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1553 if (pkt_len < mss)
1554 return -EINVAL;
1555 }
1556
1557 /* Round if necessary so that SACKs cover only full MSSes
1558 * and/or the remaining small portion (if present)
1559 */
1560 if (pkt_len > mss) {
1561 unsigned int new_len = (pkt_len / mss) * mss;
1562 if (!in_sack && new_len < pkt_len)
1563 new_len += mss;
1564 pkt_len = new_len;
1565 }
1566
1567 if (pkt_len >= skb->len && !in_sack)
1568 return 0;
1569
1570 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
1571 pkt_len, mss, GFP_ATOMIC);
1572 if (err < 0)
1573 return err;
1574 }
1575
1576 return in_sack;
1577 }
1578
1579 /* Record the most recently (re)sent time among the (s)acked packets
1580 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
1581 * draft-cheng-tcpm-rack-00.txt
1582 */
tcp_rack_advance(struct tcp_sock * tp,u8 sacked,u32 end_seq,u64 xmit_time)1583 static void tcp_rack_advance(struct tcp_sock *tp, u8 sacked,
1584 u32 end_seq, u64 xmit_time)
1585 {
1586 u32 rtt_us;
1587
1588 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
1589 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
1590 /* If the sacked packet was retransmitted, it's ambiguous
1591 * whether the retransmission or the original (or the prior
1592 * retransmission) was sacked.
1593 *
1594 * If the original is lost, there is no ambiguity. Otherwise
1595 * we assume the original can be delayed up to aRTT + min_rtt.
1596 * the aRTT term is bounded by the fast recovery or timeout,
1597 * so it's at least one RTT (i.e., retransmission is at least
1598 * an RTT later).
1599 */
1600 return;
1601 }
1602 tp->rack.advanced = 1;
1603 tp->rack.rtt_us = rtt_us;
1604 if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
1605 end_seq, tp->rack.end_seq)) {
1606 tp->rack.mstamp = xmit_time;
1607 tp->rack.end_seq = end_seq;
1608 }
1609 }
1610
1611 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */
tcp_sacktag_one(struct sock * sk,struct tcp_sacktag_state * state,u8 sacked,u32 start_seq,u32 end_seq,int dup_sack,int pcount,u32 plen,u64 xmit_time)1612 static u8 tcp_sacktag_one(struct sock *sk,
1613 struct tcp_sacktag_state *state, u8 sacked,
1614 u32 start_seq, u32 end_seq,
1615 int dup_sack, int pcount, u32 plen,
1616 u64 xmit_time)
1617 {
1618 struct tcp_sock *tp = tcp_sk(sk);
1619
1620 /* Account D-SACK for retransmitted packet. */
1621 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1622 if (tp->undo_marker && tp->undo_retrans > 0 &&
1623 after(end_seq, tp->undo_marker))
1624 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
1625 if ((sacked & TCPCB_SACKED_ACKED) &&
1626 before(start_seq, state->reord))
1627 state->reord = start_seq;
1628 }
1629
1630 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1631 if (!after(end_seq, tp->snd_una))
1632 return sacked;
1633
1634 if (!(sacked & TCPCB_SACKED_ACKED)) {
1635 tcp_rack_advance(tp, sacked, end_seq, xmit_time);
1636
1637 if (sacked & TCPCB_SACKED_RETRANS) {
1638 /* If the segment is not tagged as lost,
1639 * we do not clear RETRANS, believing
1640 * that retransmission is still in flight.
1641 */
1642 if (sacked & TCPCB_LOST) {
1643 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1644 tp->lost_out -= pcount;
1645 tp->retrans_out -= pcount;
1646 }
1647 } else {
1648 if (!(sacked & TCPCB_RETRANS)) {
1649 /* New sack for not retransmitted frame,
1650 * which was in hole. It is reordering.
1651 */
1652 if (before(start_seq,
1653 tcp_highest_sack_seq(tp)) &&
1654 before(start_seq, state->reord))
1655 state->reord = start_seq;
1656
1657 if (!after(end_seq, tp->high_seq))
1658 state->flag |= FLAG_ORIG_SACK_ACKED;
1659 if (state->first_sackt == 0)
1660 state->first_sackt = xmit_time;
1661 state->last_sackt = xmit_time;
1662 }
1663
1664 if (sacked & TCPCB_LOST) {
1665 sacked &= ~TCPCB_LOST;
1666 tp->lost_out -= pcount;
1667 }
1668 }
1669
1670 sacked |= TCPCB_SACKED_ACKED;
1671 state->flag |= FLAG_DATA_SACKED;
1672 tp->sacked_out += pcount;
1673 /* Out-of-order packets delivered */
1674 state->sack_delivered += pcount;
1675 state->delivered_bytes += plen;
1676 }
1677
1678 /* D-SACK. We can detect redundant retransmission in S|R and plain R
1679 * frames and clear it. undo_retrans is decreased above, L|R frames
1680 * are accounted above as well.
1681 */
1682 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1683 sacked &= ~TCPCB_SACKED_RETRANS;
1684 tp->retrans_out -= pcount;
1685 }
1686
1687 return sacked;
1688 }
1689
1690 /* The bandwidth estimator estimates the rate at which the network
1691 * can currently deliver outbound data packets for this flow. At a high
1692 * level, it operates by taking a delivery rate sample for each ACK.
1693 *
1694 * A rate sample records the rate at which the network delivered packets
1695 * for this flow, calculated over the time interval between the transmission
1696 * of a data packet and the acknowledgment of that packet.
1697 *
1698 * Specifically, over the interval between each transmit and corresponding ACK,
1699 * the estimator generates a delivery rate sample. Typically it uses the rate
1700 * at which packets were acknowledged. However, the approach of using only the
1701 * acknowledgment rate faces a challenge under the prevalent ACK decimation or
1702 * compression: packets can temporarily appear to be delivered much quicker
1703 * than the bottleneck rate. Since it is physically impossible to do that in a
1704 * sustained fashion, when the estimator notices that the ACK rate is faster
1705 * than the transmit rate, it uses the latter:
1706 *
1707 * send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
1708 * ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
1709 * bw = min(send_rate, ack_rate)
1710 *
1711 * Notice the estimator essentially estimates the goodput, not always the
1712 * network bottleneck link rate when the sending or receiving is limited by
1713 * other factors like applications or receiver window limits. The estimator
1714 * deliberately avoids using the inter-packet spacing approach because that
1715 * approach requires a large number of samples and sophisticated filtering.
1716 *
1717 * TCP flows can often be application-limited in request/response workloads.
1718 * The estimator marks a bandwidth sample as application-limited if there
1719 * was some moment during the sampled window of packets when there was no data
1720 * ready to send in the write queue.
1721 */
1722
1723 /* Update the connection delivery information and generate a rate sample. */
tcp_rate_gen(struct sock * sk,u32 delivered,u32 lost,bool is_sack_reneg,struct rate_sample * rs)1724 static void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1725 bool is_sack_reneg, struct rate_sample *rs)
1726 {
1727 struct tcp_sock *tp = tcp_sk(sk);
1728 u32 snd_us, ack_us;
1729
1730 /* Clear app limited if bubble is acked and gone. */
1731 if (tp->app_limited && after(tp->delivered, tp->app_limited))
1732 tp->app_limited = 0;
1733
1734 /* TODO: there are multiple places throughout tcp_ack() to get
1735 * current time. Refactor the code using a new "tcp_acktag_state"
1736 * to carry current time, flags, stats like "tcp_sacktag_state".
1737 */
1738 if (delivered)
1739 tp->delivered_mstamp = tp->tcp_mstamp;
1740
1741 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
1742 rs->losses = lost; /* freshly marked lost */
1743 /* Return an invalid sample if no timing information is available or
1744 * in recovery from loss with SACK reneging. Rate samples taken during
1745 * a SACK reneging event may overestimate bw by including packets that
1746 * were SACKed before the reneg.
1747 */
1748 if (!rs->prior_mstamp || is_sack_reneg) {
1749 rs->delivered = -1;
1750 rs->interval_us = -1;
1751 return;
1752 }
1753 rs->delivered = tp->delivered - rs->prior_delivered;
1754
1755 rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
1756 /* delivered_ce occupies less than 32 bits in the skb control block */
1757 rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
1758
1759 /* Model sending data and receiving ACKs as separate pipeline phases
1760 * for a window. Usually the ACK phase is longer, but with ACK
1761 * compression the send phase can be longer. To be safe we use the
1762 * longer phase.
1763 */
1764 snd_us = rs->interval_us; /* send phase */
1765 ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
1766 rs->prior_mstamp); /* ack phase */
1767 rs->interval_us = max(snd_us, ack_us);
1768
1769 /* Record both segment send and ack receive intervals */
1770 rs->snd_interval_us = snd_us;
1771 rs->rcv_interval_us = ack_us;
1772
1773 /* Normally we expect interval_us >= min-rtt.
1774 * Note that rate may still be over-estimated when a spuriously
1775 * retransmistted skb was first (s)acked because "interval_us"
1776 * is under-estimated (up to an RTT). However continuously
1777 * measuring the delivery rate during loss recovery is crucial
1778 * for connections suffer heavy or prolonged losses.
1779 */
1780 if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
1781 if (!rs->is_retrans)
1782 pr_debug("tcp rate: %ld %d %u %u %u\n",
1783 rs->interval_us, rs->delivered,
1784 inet_csk(sk)->icsk_ca_state,
1785 tp->rx_opt.sack_ok, tcp_min_rtt(tp));
1786 rs->interval_us = -1;
1787 return;
1788 }
1789
1790 /* Record the last non-app-limited or the highest app-limited bw */
1791 if (!rs->is_app_limited ||
1792 ((u64)rs->delivered * tp->rate_interval_us >=
1793 (u64)tp->rate_delivered * rs->interval_us)) {
1794 tp->rate_delivered = rs->delivered;
1795 tp->rate_interval_us = rs->interval_us;
1796 tp->rate_app_limited = rs->is_app_limited;
1797 }
1798 }
1799
1800 /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
1801 * delivery information when the skb was last transmitted.
1802 *
1803 * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
1804 * called multiple times. We favor the information from the most recently
1805 * sent skb, i.e., the skb with the most recently sent time and the highest
1806 * sequence.
1807 */
tcp_rate_skb_delivered(struct sock * sk,struct sk_buff * skb,struct rate_sample * rs)1808 static void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1809 struct rate_sample *rs)
1810 {
1811 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
1812 struct tcp_sock *tp = tcp_sk(sk);
1813 u64 tx_tstamp;
1814
1815 if (!scb->tx.delivered_mstamp)
1816 return;
1817
1818 tx_tstamp = tcp_skb_timestamp_us(skb);
1819 if (!rs->prior_delivered ||
1820 tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
1821 scb->end_seq, rs->last_end_seq)) {
1822 rs->prior_delivered_ce = scb->tx.delivered_ce;
1823 rs->prior_delivered = scb->tx.delivered;
1824 rs->prior_mstamp = scb->tx.delivered_mstamp;
1825 rs->is_app_limited = scb->tx.is_app_limited;
1826 rs->is_retrans = scb->sacked & TCPCB_RETRANS;
1827 rs->last_end_seq = scb->end_seq;
1828
1829 /* Record send time of most recently ACKed packet: */
1830 tp->first_tx_mstamp = tx_tstamp;
1831 /* Find the duration of the "send phase" of this window: */
1832 rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
1833 scb->tx.first_tx_mstamp);
1834
1835 }
1836 /* Mark off the skb delivered once it's sacked to avoid being
1837 * used again when it's cumulatively acked. For acked packets
1838 * we don't need to reset since it'll be freed soon.
1839 */
1840 if (scb->sacked & TCPCB_SACKED_ACKED)
1841 scb->tx.delivered_mstamp = 0;
1842 }
1843
1844 /* Shift newly-SACKed bytes from this skb to the immediately previous
1845 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1846 */
tcp_shifted_skb(struct sock * sk,struct sk_buff * prev,struct sk_buff * skb,struct tcp_sacktag_state * state,unsigned int pcount,int shifted,int mss,bool dup_sack)1847 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1848 struct sk_buff *skb,
1849 struct tcp_sacktag_state *state,
1850 unsigned int pcount, int shifted, int mss,
1851 bool dup_sack)
1852 {
1853 struct tcp_sock *tp = tcp_sk(sk);
1854 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1855 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1856
1857 BUG_ON(!pcount);
1858
1859 /* Adjust counters and hints for the newly sacked sequence
1860 * range but discard the return value since prev is already
1861 * marked. We must tag the range first because the seq
1862 * advancement below implicitly advances
1863 * tcp_highest_sack_seq() when skb is highest_sack.
1864 */
1865 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1866 start_seq, end_seq, dup_sack, pcount, skb->len,
1867 tcp_skb_timestamp_us(skb));
1868 tcp_rate_skb_delivered(sk, skb, state->rate);
1869
1870 TCP_SKB_CB(prev)->end_seq += shifted;
1871 TCP_SKB_CB(skb)->seq += shifted;
1872
1873 tcp_skb_pcount_add(prev, pcount);
1874 WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
1875 tcp_skb_pcount_add(skb, -pcount);
1876
1877 /* When we're adding to gso_segs == 1, gso_size will be zero,
1878 * in theory this shouldn't be necessary but as long as DSACK
1879 * code can come after this skb later on it's better to keep
1880 * setting gso_size to something.
1881 */
1882 if (!TCP_SKB_CB(prev)->tcp_gso_size)
1883 TCP_SKB_CB(prev)->tcp_gso_size = mss;
1884
1885 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1886 if (tcp_skb_pcount(skb) <= 1)
1887 TCP_SKB_CB(skb)->tcp_gso_size = 0;
1888
1889 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1890 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1891
1892 if (skb->len > 0) {
1893 BUG_ON(!tcp_skb_pcount(skb));
1894 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1895 return false;
1896 }
1897
1898 /* Whole SKB was eaten :-) */
1899
1900 if (skb == tp->retransmit_skb_hint)
1901 tp->retransmit_skb_hint = prev;
1902
1903 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1904 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
1905 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1906 TCP_SKB_CB(prev)->end_seq++;
1907
1908 if (skb == tcp_highest_sack(sk))
1909 tcp_advance_highest_sack(sk, skb);
1910
1911 tcp_skb_collapse_tstamp(prev, skb);
1912 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
1913 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
1914
1915 tcp_rtx_queue_unlink_and_free(skb, sk);
1916
1917 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1918
1919 return true;
1920 }
1921
1922 /* I wish gso_size would have a bit more sane initialization than
1923 * something-or-zero which complicates things
1924 */
tcp_skb_seglen(const struct sk_buff * skb)1925 static int tcp_skb_seglen(const struct sk_buff *skb)
1926 {
1927 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1928 }
1929
1930 /* Shifting pages past head area doesn't work */
skb_can_shift(const struct sk_buff * skb)1931 static int skb_can_shift(const struct sk_buff *skb)
1932 {
1933 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1934 }
1935
tcp_skb_shift(struct sk_buff * to,struct sk_buff * from,int pcount,int shiftlen)1936 int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
1937 int pcount, int shiftlen)
1938 {
1939 /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
1940 * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
1941 * to make sure not storing more than 65535 * 8 bytes per skb,
1942 * even if current MSS is bigger.
1943 */
1944 if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
1945 return 0;
1946 if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
1947 return 0;
1948 return skb_shift(to, from, shiftlen);
1949 }
1950
1951 /* Try collapsing SACK blocks spanning across multiple skbs to a single
1952 * skb.
1953 */
tcp_shift_skb_data(struct sock * sk,struct sk_buff * skb,struct tcp_sacktag_state * state,u32 start_seq,u32 end_seq,bool dup_sack)1954 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1955 struct tcp_sacktag_state *state,
1956 u32 start_seq, u32 end_seq,
1957 bool dup_sack)
1958 {
1959 struct tcp_sock *tp = tcp_sk(sk);
1960 struct sk_buff *prev;
1961 int mss;
1962 int pcount = 0;
1963 int len;
1964 int in_sack;
1965
1966 /* Normally R but no L won't result in plain S */
1967 if (!dup_sack &&
1968 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
1969 goto fallback;
1970 if (!skb_can_shift(skb))
1971 goto fallback;
1972 /* This frame is about to be dropped (was ACKed). */
1973 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1974 goto fallback;
1975
1976 /* Can only happen with delayed DSACK + discard craziness */
1977 prev = skb_rb_prev(skb);
1978 if (!prev)
1979 goto fallback;
1980
1981 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1982 goto fallback;
1983
1984 if (!tcp_skb_can_collapse(prev, skb))
1985 goto fallback;
1986
1987 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1988 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1989
1990 if (in_sack) {
1991 len = skb->len;
1992 pcount = tcp_skb_pcount(skb);
1993 mss = tcp_skb_seglen(skb);
1994
1995 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1996 * drop this restriction as unnecessary
1997 */
1998 if (mss != tcp_skb_seglen(prev))
1999 goto fallback;
2000 } else {
2001 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
2002 goto noop;
2003 /* CHECKME: This is non-MSS split case only?, this will
2004 * cause skipped skbs due to advancing loop btw, original
2005 * has that feature too
2006 */
2007 if (tcp_skb_pcount(skb) <= 1)
2008 goto noop;
2009
2010 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
2011 if (!in_sack) {
2012 /* TODO: head merge to next could be attempted here
2013 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
2014 * though it might not be worth of the additional hassle
2015 *
2016 * ...we can probably just fallback to what was done
2017 * previously. We could try merging non-SACKed ones
2018 * as well but it probably isn't going to buy off
2019 * because later SACKs might again split them, and
2020 * it would make skb timestamp tracking considerably
2021 * harder problem.
2022 */
2023 goto fallback;
2024 }
2025
2026 len = end_seq - TCP_SKB_CB(skb)->seq;
2027 BUG_ON(len < 0);
2028 BUG_ON(len > skb->len);
2029
2030 /* MSS boundaries should be honoured or else pcount will
2031 * severely break even though it makes things bit trickier.
2032 * Optimize common case to avoid most of the divides
2033 */
2034 mss = tcp_skb_mss(skb);
2035
2036 /* TODO: Fix DSACKs to not fragment already SACKed and we can
2037 * drop this restriction as unnecessary
2038 */
2039 if (mss != tcp_skb_seglen(prev))
2040 goto fallback;
2041
2042 if (len == mss) {
2043 pcount = 1;
2044 } else if (len < mss) {
2045 goto noop;
2046 } else {
2047 pcount = len / mss;
2048 len = pcount * mss;
2049 }
2050 }
2051
2052 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
2053 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
2054 goto fallback;
2055
2056 if (!tcp_skb_shift(prev, skb, pcount, len))
2057 goto fallback;
2058 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
2059 goto out;
2060
2061 /* Hole filled allows collapsing with the next as well, this is very
2062 * useful when hole on every nth skb pattern happens
2063 */
2064 skb = skb_rb_next(prev);
2065 if (!skb)
2066 goto out;
2067
2068 if (!skb_can_shift(skb) ||
2069 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
2070 (mss != tcp_skb_seglen(skb)))
2071 goto out;
2072
2073 if (!tcp_skb_can_collapse(prev, skb))
2074 goto out;
2075 len = skb->len;
2076 pcount = tcp_skb_pcount(skb);
2077 if (tcp_skb_shift(prev, skb, pcount, len))
2078 tcp_shifted_skb(sk, prev, skb, state, pcount,
2079 len, mss, 0);
2080
2081 out:
2082 return prev;
2083
2084 noop:
2085 return skb;
2086
2087 fallback:
2088 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
2089 return NULL;
2090 }
2091
tcp_sacktag_walk(struct sk_buff * skb,struct sock * sk,struct tcp_sack_block * next_dup,struct tcp_sacktag_state * state,u32 start_seq,u32 end_seq,bool dup_sack_in)2092 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
2093 struct tcp_sack_block *next_dup,
2094 struct tcp_sacktag_state *state,
2095 u32 start_seq, u32 end_seq,
2096 bool dup_sack_in)
2097 {
2098 struct tcp_sock *tp = tcp_sk(sk);
2099 struct sk_buff *tmp;
2100
2101 skb_rbtree_walk_from(skb) {
2102 int in_sack = 0;
2103 bool dup_sack = dup_sack_in;
2104
2105 /* queue is in-order => we can short-circuit the walk early */
2106 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
2107 break;
2108
2109 if (next_dup &&
2110 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
2111 in_sack = tcp_match_skb_to_sack(sk, skb,
2112 next_dup->start_seq,
2113 next_dup->end_seq);
2114 if (in_sack > 0)
2115 dup_sack = true;
2116 }
2117
2118 /* skb reference here is a bit tricky to get right, since
2119 * shifting can eat and free both this skb and the next,
2120 * so not even _safe variant of the loop is enough.
2121 */
2122 if (in_sack <= 0) {
2123 tmp = tcp_shift_skb_data(sk, skb, state,
2124 start_seq, end_seq, dup_sack);
2125 if (tmp) {
2126 if (tmp != skb) {
2127 skb = tmp;
2128 continue;
2129 }
2130
2131 in_sack = 0;
2132 } else {
2133 in_sack = tcp_match_skb_to_sack(sk, skb,
2134 start_seq,
2135 end_seq);
2136 }
2137 }
2138
2139 if (unlikely(in_sack < 0))
2140 break;
2141
2142 if (in_sack) {
2143 TCP_SKB_CB(skb)->sacked =
2144 tcp_sacktag_one(sk,
2145 state,
2146 TCP_SKB_CB(skb)->sacked,
2147 TCP_SKB_CB(skb)->seq,
2148 TCP_SKB_CB(skb)->end_seq,
2149 dup_sack,
2150 tcp_skb_pcount(skb),
2151 skb->len,
2152 tcp_skb_timestamp_us(skb));
2153 tcp_rate_skb_delivered(sk, skb, state->rate);
2154 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2155 list_del_init(&skb->tcp_tsorted_anchor);
2156
2157 if (!before(TCP_SKB_CB(skb)->seq,
2158 tcp_highest_sack_seq(tp)))
2159 tcp_advance_highest_sack(sk, skb);
2160 }
2161 }
2162 return skb;
2163 }
2164
tcp_sacktag_bsearch(struct sock * sk,u32 seq)2165 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq)
2166 {
2167 struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
2168 struct sk_buff *skb;
2169
2170 while (*p) {
2171 parent = *p;
2172 skb = rb_to_skb(parent);
2173 if (before(seq, TCP_SKB_CB(skb)->seq)) {
2174 p = &parent->rb_left;
2175 continue;
2176 }
2177 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) {
2178 p = &parent->rb_right;
2179 continue;
2180 }
2181 return skb;
2182 }
2183 return NULL;
2184 }
2185
tcp_sacktag_skip(struct sk_buff * skb,struct sock * sk,u32 skip_to_seq)2186 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
2187 u32 skip_to_seq)
2188 {
2189 if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
2190 return skb;
2191
2192 return tcp_sacktag_bsearch(sk, skip_to_seq);
2193 }
2194
tcp_maybe_skipping_dsack(struct sk_buff * skb,struct sock * sk,struct tcp_sack_block * next_dup,struct tcp_sacktag_state * state,u32 skip_to_seq)2195 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
2196 struct sock *sk,
2197 struct tcp_sack_block *next_dup,
2198 struct tcp_sacktag_state *state,
2199 u32 skip_to_seq)
2200 {
2201 if (!next_dup)
2202 return skb;
2203
2204 if (before(next_dup->start_seq, skip_to_seq)) {
2205 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
2206 skb = tcp_sacktag_walk(skb, sk, NULL, state,
2207 next_dup->start_seq, next_dup->end_seq,
2208 1);
2209 }
2210
2211 return skb;
2212 }
2213
tcp_sack_cache_ok(const struct tcp_sock * tp,const struct tcp_sack_block * cache)2214 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
2215 {
2216 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
2217 }
2218
2219 static int
tcp_sacktag_write_queue(struct sock * sk,const struct sk_buff * ack_skb,u32 prior_snd_una,struct tcp_sacktag_state * state)2220 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
2221 u32 prior_snd_una, struct tcp_sacktag_state *state)
2222 {
2223 struct tcp_sock *tp = tcp_sk(sk);
2224 const unsigned char *ptr = (skb_transport_header(ack_skb) +
2225 TCP_SKB_CB(ack_skb)->sacked);
2226 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
2227 struct tcp_sack_block sp[TCP_NUM_SACKS];
2228 struct tcp_sack_block *cache;
2229 struct sk_buff *skb;
2230 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
2231 int used_sacks;
2232 bool found_dup_sack = false;
2233 int i, j;
2234 int first_sack_index;
2235
2236 state->flag = 0;
2237 state->reord = tp->snd_nxt;
2238
2239 if (!tp->sacked_out)
2240 tcp_highest_sack_reset(sk);
2241
2242 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
2243 num_sacks, prior_snd_una, state);
2244
2245 /* Eliminate too old ACKs, but take into
2246 * account more or less fresh ones, they can
2247 * contain valid SACK info.
2248 */
2249 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
2250 return 0;
2251
2252 if (!tp->packets_out)
2253 goto out;
2254
2255 used_sacks = 0;
2256 first_sack_index = 0;
2257 for (i = 0; i < num_sacks; i++) {
2258 bool dup_sack = !i && found_dup_sack;
2259
2260 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
2261 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
2262
2263 if (!tcp_is_sackblock_valid(tp, dup_sack,
2264 sp[used_sacks].start_seq,
2265 sp[used_sacks].end_seq)) {
2266 int mib_idx;
2267
2268 if (dup_sack) {
2269 if (!tp->undo_marker)
2270 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
2271 else
2272 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
2273 } else {
2274 /* Don't count olds caused by ACK reordering */
2275 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
2276 !after(sp[used_sacks].end_seq, tp->snd_una))
2277 continue;
2278 mib_idx = LINUX_MIB_TCPSACKDISCARD;
2279 }
2280
2281 NET_INC_STATS(sock_net(sk), mib_idx);
2282 if (i == 0)
2283 first_sack_index = -1;
2284 continue;
2285 }
2286
2287 /* Ignore very old stuff early */
2288 if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
2289 if (i == 0)
2290 first_sack_index = -1;
2291 continue;
2292 }
2293
2294 used_sacks++;
2295 }
2296
2297 /* order SACK blocks to allow in order walk of the retrans queue */
2298 for (i = used_sacks - 1; i > 0; i--) {
2299 for (j = 0; j < i; j++) {
2300 if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
2301 swap(sp[j], sp[j + 1]);
2302
2303 /* Track where the first SACK block goes to */
2304 if (j == first_sack_index)
2305 first_sack_index = j + 1;
2306 }
2307 }
2308 }
2309
2310 state->mss_now = tcp_current_mss(sk);
2311 skb = NULL;
2312 i = 0;
2313
2314 if (!tp->sacked_out) {
2315 /* It's already past, so skip checking against it */
2316 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
2317 } else {
2318 cache = tp->recv_sack_cache;
2319 /* Skip empty blocks in at head of the cache */
2320 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
2321 !cache->end_seq)
2322 cache++;
2323 }
2324
2325 while (i < used_sacks) {
2326 u32 start_seq = sp[i].start_seq;
2327 u32 end_seq = sp[i].end_seq;
2328 bool dup_sack = (found_dup_sack && (i == first_sack_index));
2329 struct tcp_sack_block *next_dup = NULL;
2330
2331 if (found_dup_sack && ((i + 1) == first_sack_index))
2332 next_dup = &sp[i + 1];
2333
2334 /* Skip too early cached blocks */
2335 while (tcp_sack_cache_ok(tp, cache) &&
2336 !before(start_seq, cache->end_seq))
2337 cache++;
2338
2339 /* Can skip some work by looking recv_sack_cache? */
2340 if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
2341 after(end_seq, cache->start_seq)) {
2342
2343 /* Head todo? */
2344 if (before(start_seq, cache->start_seq)) {
2345 skb = tcp_sacktag_skip(skb, sk, start_seq);
2346 skb = tcp_sacktag_walk(skb, sk, next_dup,
2347 state,
2348 start_seq,
2349 cache->start_seq,
2350 dup_sack);
2351 }
2352
2353 /* Rest of the block already fully processed? */
2354 if (!after(end_seq, cache->end_seq))
2355 goto advance_sp;
2356
2357 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
2358 state,
2359 cache->end_seq);
2360
2361 /* ...tail remains todo... */
2362 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
2363 /* ...but better entrypoint exists! */
2364 skb = tcp_highest_sack(sk);
2365 if (!skb)
2366 break;
2367 cache++;
2368 goto walk;
2369 }
2370
2371 skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
2372 /* Check overlap against next cached too (past this one already) */
2373 cache++;
2374 continue;
2375 }
2376
2377 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
2378 skb = tcp_highest_sack(sk);
2379 if (!skb)
2380 break;
2381 }
2382 skb = tcp_sacktag_skip(skb, sk, start_seq);
2383
2384 walk:
2385 skb = tcp_sacktag_walk(skb, sk, next_dup, state,
2386 start_seq, end_seq, dup_sack);
2387
2388 advance_sp:
2389 i++;
2390 }
2391
2392 /* Clear the head of the cache sack blocks so we can skip it next time */
2393 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
2394 tp->recv_sack_cache[i].start_seq = 0;
2395 tp->recv_sack_cache[i].end_seq = 0;
2396 }
2397 for (j = 0; j < used_sacks; j++)
2398 tp->recv_sack_cache[i++] = sp[j];
2399
2400 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
2401 tcp_check_sack_reordering(sk, state->reord, 0);
2402
2403 tcp_verify_left_out(tp);
2404 out:
2405
2406 #if FASTRETRANS_DEBUG > 0
2407 WARN_ON((int)tp->sacked_out < 0);
2408 WARN_ON((int)tp->lost_out < 0);
2409 WARN_ON((int)tp->retrans_out < 0);
2410 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
2411 #endif
2412 return state->flag;
2413 }
2414
2415 /* Limits sacked_out so that sum with lost_out isn't ever larger than
2416 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
2417 */
tcp_limit_reno_sacked(struct tcp_sock * tp)2418 static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
2419 {
2420 u32 holes;
2421
2422 holes = max(tp->lost_out, 1U);
2423 holes = min(holes, tp->packets_out);
2424
2425 if ((tp->sacked_out + holes) > tp->packets_out) {
2426 tp->sacked_out = tp->packets_out - holes;
2427 return true;
2428 }
2429 return false;
2430 }
2431
2432 /* If we receive more dupacks than we expected counting segments
2433 * in assumption of absent reordering, interpret this as reordering.
2434 * The only another reason could be bug in receiver TCP.
2435 */
tcp_check_reno_reordering(struct sock * sk,const int addend)2436 static void tcp_check_reno_reordering(struct sock *sk, const int addend)
2437 {
2438 struct tcp_sock *tp = tcp_sk(sk);
2439
2440 if (!tcp_limit_reno_sacked(tp))
2441 return;
2442
2443 WRITE_ONCE(tp->reordering,
2444 min_t(u32, tp->packets_out + addend,
2445 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)));
2446 WRITE_ONCE(tp->reord_seen, tp->reord_seen + 1);
2447 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
2448 }
2449
2450 /* Emulate SACKs for SACKless connection: account for a new dupack. */
2451
tcp_add_reno_sack(struct sock * sk,int num_dupack,bool ece_ack)2452 static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
2453 {
2454 if (num_dupack) {
2455 struct tcp_sock *tp = tcp_sk(sk);
2456 u32 prior_sacked = tp->sacked_out;
2457 s32 delivered;
2458
2459 tp->sacked_out += num_dupack;
2460 tcp_check_reno_reordering(sk, 0);
2461 delivered = tp->sacked_out - prior_sacked;
2462 if (delivered > 0)
2463 tcp_count_delivered(tp, delivered, ece_ack);
2464 tcp_verify_left_out(tp);
2465 }
2466 }
2467
2468 /* Account for ACK, ACKing some data in Reno Recovery phase. */
2469
tcp_remove_reno_sacks(struct sock * sk,int acked,bool ece_ack)2470 static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
2471 {
2472 struct tcp_sock *tp = tcp_sk(sk);
2473
2474 if (acked > 0) {
2475 /* One ACK acked hole. The rest eat duplicate ACKs. */
2476 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1),
2477 ece_ack);
2478 if (acked - 1 >= tp->sacked_out)
2479 tp->sacked_out = 0;
2480 else
2481 tp->sacked_out -= acked - 1;
2482 }
2483 tcp_check_reno_reordering(sk, acked);
2484 tcp_verify_left_out(tp);
2485 }
2486
tcp_reset_reno_sack(struct tcp_sock * tp)2487 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
2488 {
2489 tp->sacked_out = 0;
2490 }
2491
tcp_clear_retrans(struct tcp_sock * tp)2492 void tcp_clear_retrans(struct tcp_sock *tp)
2493 {
2494 tp->retrans_out = 0;
2495 tp->lost_out = 0;
2496 tp->undo_marker = 0;
2497 tp->undo_retrans = -1;
2498 tp->sacked_out = 0;
2499 tp->rto_stamp = 0;
2500 tp->total_rto = 0;
2501 tp->total_rto_recoveries = 0;
2502 tp->total_rto_time = 0;
2503 }
2504
tcp_init_undo(struct tcp_sock * tp)2505 static inline void tcp_init_undo(struct tcp_sock *tp)
2506 {
2507 tp->undo_marker = tp->snd_una;
2508
2509 /* Retransmission still in flight may cause DSACKs later. */
2510 /* First, account for regular retransmits in flight: */
2511 tp->undo_retrans = tp->retrans_out;
2512 /* Next, account for TLP retransmits in flight: */
2513 if (tp->tlp_high_seq && tp->tlp_retrans)
2514 tp->undo_retrans++;
2515 /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
2516 if (!tp->undo_retrans)
2517 tp->undo_retrans = -1;
2518 }
2519
2520 /* If we detect SACK reneging, forget all SACK information
2521 * and reset tags completely, otherwise preserve SACKs. If receiver
2522 * dropped its ofo queue, we will know this due to reneging detection.
2523 */
tcp_timeout_mark_lost(struct sock * sk)2524 static void tcp_timeout_mark_lost(struct sock *sk)
2525 {
2526 struct tcp_sock *tp = tcp_sk(sk);
2527 struct sk_buff *skb, *head;
2528 bool is_reneg; /* is receiver reneging on SACKs? */
2529
2530 head = tcp_rtx_queue_head(sk);
2531 is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED);
2532 if (is_reneg) {
2533 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
2534 tp->sacked_out = 0;
2535 /* Mark SACK reneging until we recover from this loss event. */
2536 tp->is_sack_reneg = 1;
2537 } else if (tcp_is_reno(tp)) {
2538 tcp_reset_reno_sack(tp);
2539 }
2540
2541 skb = head;
2542 skb_rbtree_walk_from(skb) {
2543 if (is_reneg)
2544 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
2545 else if (skb != head && tcp_rack_skb_timeout(tp, skb, 0) > 0)
2546 continue; /* Don't mark recently sent ones lost yet */
2547 tcp_mark_skb_lost(sk, skb);
2548 }
2549 tcp_verify_left_out(tp);
2550 tcp_clear_all_retrans_hints(tp);
2551 }
2552
2553 /* Enter Loss state. */
tcp_enter_loss(struct sock * sk)2554 void tcp_enter_loss(struct sock *sk)
2555 {
2556 const struct inet_connection_sock *icsk = inet_csk(sk);
2557 struct tcp_sock *tp = tcp_sk(sk);
2558 struct net *net = sock_net(sk);
2559 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
2560 u8 reordering;
2561
2562 tcp_timeout_mark_lost(sk);
2563
2564 /* Reduce ssthresh if it has not yet been made inside this window. */
2565 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
2566 !after(tp->high_seq, tp->snd_una) ||
2567 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
2568 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2569 tp->prior_cwnd = tcp_snd_cwnd(tp);
2570 WRITE_ONCE(tp->snd_ssthresh, icsk->icsk_ca_ops->ssthresh(sk));
2571 tcp_ca_event(sk, CA_EVENT_LOSS);
2572 tcp_init_undo(tp);
2573 }
2574 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
2575 tp->snd_cwnd_cnt = 0;
2576 tp->snd_cwnd_stamp = tcp_jiffies32;
2577
2578 /* Timeout in disordered state after receiving substantial DUPACKs
2579 * suggests that the degree of reordering is over-estimated.
2580 */
2581 reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
2582 if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
2583 tp->sacked_out >= reordering)
2584 WRITE_ONCE(tp->reordering,
2585 min_t(unsigned int, tp->reordering, reordering));
2586
2587 tcp_set_ca_state(sk, TCP_CA_Loss);
2588 tp->high_seq = tp->snd_nxt;
2589 tp->tlp_high_seq = 0;
2590 tcp_ecn_queue_cwr(tp);
2591
2592 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
2593 * loss recovery is underway except recurring timeout(s) on
2594 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2595 */
2596 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
2597 (new_recovery || icsk->icsk_retransmits) &&
2598 !inet_csk(sk)->icsk_mtup.probe_size;
2599 }
2600
2601 /* If ACK arrived pointing to a remembered SACK, it means that our
2602 * remembered SACKs do not reflect real state of receiver i.e.
2603 * receiver _host_ is heavily congested (or buggy).
2604 *
2605 * To avoid big spurious retransmission bursts due to transient SACK
2606 * scoreboard oddities that look like reneging, we give the receiver a
2607 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will
2608 * restore sanity to the SACK scoreboard. If the apparent reneging
2609 * persists until this RTO then we'll clear the SACK scoreboard.
2610 */
tcp_check_sack_reneging(struct sock * sk,int * ack_flag)2611 static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
2612 {
2613 if (*ack_flag & FLAG_SACK_RENEGING &&
2614 *ack_flag & FLAG_SND_UNA_ADVANCED) {
2615 struct tcp_sock *tp = tcp_sk(sk);
2616 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
2617 msecs_to_jiffies(10));
2618
2619 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, false);
2620 *ack_flag &= ~FLAG_SET_XMIT_TIMER;
2621 return true;
2622 }
2623 return false;
2624 }
2625
2626 /* Linux NewReno/SACK/ECN state machine.
2627 * --------------------------------------
2628 *
2629 * "Open" Normal state, no dubious events, fast path.
2630 * "Disorder" In all the respects it is "Open",
2631 * but requires a bit more attention. It is entered when
2632 * we see some SACKs or dupacks. It is split of "Open"
2633 * mainly to move some processing from fast path to slow one.
2634 * "CWR" CWND was reduced due to some Congestion Notification event.
2635 * It can be ECN, ICMP source quench, local device congestion.
2636 * "Recovery" CWND was reduced, we are fast-retransmitting.
2637 * "Loss" CWND was reduced due to RTO timeout or SACK reneging.
2638 *
2639 * tcp_fastretrans_alert() is entered:
2640 * - each incoming ACK, if state is not "Open"
2641 * - when arrived ACK is unusual, namely:
2642 * * SACK
2643 * * Duplicate ACK.
2644 * * ECN ECE.
2645 *
2646 * Counting packets in flight is pretty simple.
2647 *
2648 * in_flight = packets_out - left_out + retrans_out
2649 *
2650 * packets_out is SND.NXT-SND.UNA counted in packets.
2651 *
2652 * retrans_out is number of retransmitted segments.
2653 *
2654 * left_out is number of segments left network, but not ACKed yet.
2655 *
2656 * left_out = sacked_out + lost_out
2657 *
2658 * sacked_out: Packets, which arrived to receiver out of order
2659 * and hence not ACKed. With SACKs this number is simply
2660 * amount of SACKed data. Even without SACKs
2661 * it is easy to give pretty reliable estimate of this number,
2662 * counting duplicate ACKs.
2663 *
2664 * lost_out: Packets lost by network. TCP has no explicit
2665 * "loss notification" feedback from network (for now).
2666 * It means that this number can be only _guessed_.
2667 * Actually, it is the heuristics to predict lossage that
2668 * distinguishes different algorithms.
2669 *
2670 * F.e. after RTO, when all the queue is considered as lost,
2671 * lost_out = packets_out and in_flight = retrans_out.
2672 *
2673 * Essentially, we have now a few algorithms detecting
2674 * lost packets.
2675 *
2676 * If the receiver supports SACK:
2677 *
2678 * RACK (RFC8985): RACK is a newer loss detection algorithm
2679 * (2017-) that checks timing instead of counting DUPACKs.
2680 * Essentially a packet is considered lost if it's not S/ACKed
2681 * after RTT + reordering_window, where both metrics are
2682 * dynamically measured and adjusted. This is implemented in
2683 * tcp_rack_mark_lost.
2684 *
2685 * If the receiver does not support SACK:
2686 *
2687 * NewReno (RFC6582): in Recovery we assume that one segment
2688 * is lost (classic Reno). While we are in Recovery and
2689 * a partial ACK arrives, we assume that one more packet
2690 * is lost (NewReno). This heuristics are the same in NewReno
2691 * and SACK.
2692 *
2693 * The really tricky (and requiring careful tuning) part of the algorithm
2694 * is hidden in the RACK code in tcp_recovery.c and tcp_xmit_retransmit_queue().
2695 * The first determines the moment _when_ we should reduce CWND and,
2696 * hence, slow down forward transmission. In fact, it determines the moment
2697 * when we decide that hole is caused by loss, rather than by a reorder.
2698 *
2699 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
2700 * holes, caused by lost packets.
2701 *
2702 * And the most logically complicated part of algorithm is undo
2703 * heuristics. We detect false retransmits due to both too early
2704 * fast retransmit (reordering) and underestimated RTO, analyzing
2705 * timestamps and D-SACKs. When we detect that some segments were
2706 * retransmitted by mistake and CWND reduction was wrong, we undo
2707 * window reduction and abort recovery phase. This logic is hidden
2708 * inside several functions named tcp_try_undo_<something>.
2709 */
2710
2711 /* This function decides, when we should leave Disordered state
2712 * and enter Recovery phase, reducing congestion window.
2713 *
2714 * Main question: may we further continue forward transmission
2715 * with the same cwnd?
2716 */
tcp_time_to_recover(const struct tcp_sock * tp)2717 static bool tcp_time_to_recover(const struct tcp_sock *tp)
2718 {
2719 /* Has loss detection marked at least one packet lost? */
2720 return tp->lost_out != 0;
2721 }
2722
tcp_tsopt_ecr_before(const struct tcp_sock * tp,u32 when)2723 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2724 {
2725 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2726 before(tp->rx_opt.rcv_tsecr, when);
2727 }
2728
2729 /* skb is spurious retransmitted if the returned timestamp echo
2730 * reply is prior to the skb transmission time
2731 */
tcp_skb_spurious_retrans(const struct tcp_sock * tp,const struct sk_buff * skb)2732 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
2733 const struct sk_buff *skb)
2734 {
2735 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
2736 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
2737 }
2738
2739 /* Nothing was retransmitted or returned timestamp is less
2740 * than timestamp of the first retransmission.
2741 */
tcp_packet_delayed(const struct tcp_sock * tp)2742 static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2743 {
2744 const struct sock *sk = (const struct sock *)tp;
2745
2746 /* Received an echoed timestamp before the first retransmission? */
2747 if (tp->retrans_stamp)
2748 return tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
2749
2750 /* We set tp->retrans_stamp upon the first retransmission of a loss
2751 * recovery episode, so normally if tp->retrans_stamp is 0 then no
2752 * retransmission has happened yet (likely due to TSQ, which can cause
2753 * fast retransmits to be delayed). So if snd_una advanced while
2754 * (tp->retrans_stamp is 0 then apparently a packet was merely delayed,
2755 * not lost. But there are exceptions where we retransmit but then
2756 * clear tp->retrans_stamp, so we check for those exceptions.
2757 */
2758
2759 /* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen()
2760 * clears tp->retrans_stamp when snd_una == high_seq.
2761 */
2762 if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq))
2763 return false;
2764
2765 /* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp
2766 * when setting FLAG_SYN_ACKED is set, even if the SYN was
2767 * retransmitted.
2768 */
2769 if (sk->sk_state == TCP_SYN_SENT)
2770 return false;
2771
2772 return true; /* tp->retrans_stamp is zero; no retransmit yet */
2773 }
2774
2775 /* Undo procedures. */
2776
2777 /* We can clear retrans_stamp when there are no retransmissions in the
2778 * window. It would seem that it is trivially available for us in
2779 * tp->retrans_out, however, that kind of assumptions doesn't consider
2780 * what will happen if errors occur when sending retransmission for the
2781 * second time. ...It could the that such segment has only
2782 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2783 * the head skb is enough except for some reneging corner cases that
2784 * are not worth the effort.
2785 *
2786 * Main reason for all this complexity is the fact that connection dying
2787 * time now depends on the validity of the retrans_stamp, in particular,
2788 * that successive retransmissions of a segment must not advance
2789 * retrans_stamp under any conditions.
2790 */
tcp_any_retrans_done(const struct sock * sk)2791 static bool tcp_any_retrans_done(const struct sock *sk)
2792 {
2793 const struct tcp_sock *tp = tcp_sk(sk);
2794 struct sk_buff *skb;
2795
2796 if (tp->retrans_out)
2797 return true;
2798
2799 skb = tcp_rtx_queue_head(sk);
2800 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2801 return true;
2802
2803 return false;
2804 }
2805
2806 /* If loss recovery is finished and there are no retransmits out in the
2807 * network, then we clear retrans_stamp so that upon the next loss recovery
2808 * retransmits_timed_out() and timestamp-undo are using the correct value.
2809 */
tcp_retrans_stamp_cleanup(struct sock * sk)2810 static void tcp_retrans_stamp_cleanup(struct sock *sk)
2811 {
2812 if (!tcp_any_retrans_done(sk))
2813 tcp_sk(sk)->retrans_stamp = 0;
2814 }
2815
DBGUNDO(struct sock * sk,const char * msg)2816 static void DBGUNDO(struct sock *sk, const char *msg)
2817 {
2818 #if FASTRETRANS_DEBUG > 1
2819 struct tcp_sock *tp = tcp_sk(sk);
2820 struct inet_sock *inet = inet_sk(sk);
2821
2822 if (sk->sk_family == AF_INET) {
2823 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2824 msg,
2825 &inet->inet_daddr, ntohs(inet->inet_dport),
2826 tcp_snd_cwnd(tp), tcp_left_out(tp),
2827 tp->snd_ssthresh, tp->prior_ssthresh,
2828 tp->packets_out);
2829 }
2830 #if IS_ENABLED(CONFIG_IPV6)
2831 else if (sk->sk_family == AF_INET6) {
2832 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2833 msg,
2834 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
2835 tcp_snd_cwnd(tp), tcp_left_out(tp),
2836 tp->snd_ssthresh, tp->prior_ssthresh,
2837 tp->packets_out);
2838 }
2839 #endif
2840 #endif
2841 }
2842
tcp_undo_cwnd_reduction(struct sock * sk,bool unmark_loss)2843 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2844 {
2845 struct tcp_sock *tp = tcp_sk(sk);
2846
2847 if (unmark_loss) {
2848 struct sk_buff *skb;
2849
2850 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2851 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2852 }
2853 tp->lost_out = 0;
2854 tcp_clear_all_retrans_hints(tp);
2855 }
2856
2857 if (tp->prior_ssthresh) {
2858 const struct inet_connection_sock *icsk = inet_csk(sk);
2859
2860 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
2861
2862 if (tp->prior_ssthresh > tp->snd_ssthresh) {
2863 WRITE_ONCE(tp->snd_ssthresh, tp->prior_ssthresh);
2864 tcp_ecn_withdraw_cwr(tp);
2865 }
2866 }
2867 tp->snd_cwnd_stamp = tcp_jiffies32;
2868 tp->undo_marker = 0;
2869 tp->rack.advanced = 1; /* Force RACK to re-exam losses */
2870 }
2871
tcp_may_undo(const struct tcp_sock * tp)2872 static inline bool tcp_may_undo(const struct tcp_sock *tp)
2873 {
2874 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2875 }
2876
tcp_is_non_sack_preventing_reopen(struct sock * sk)2877 static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
2878 {
2879 struct tcp_sock *tp = tcp_sk(sk);
2880
2881 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2882 /* Hold old state until something *above* high_seq
2883 * is ACKed. For Reno it is MUST to prevent false
2884 * fast retransmits (RFC2582). SACK TCP is safe. */
2885 if (!tcp_any_retrans_done(sk))
2886 tp->retrans_stamp = 0;
2887 return true;
2888 }
2889 return false;
2890 }
2891
2892 /* People celebrate: "We love our President!" */
tcp_try_undo_recovery(struct sock * sk)2893 static bool tcp_try_undo_recovery(struct sock *sk)
2894 {
2895 struct tcp_sock *tp = tcp_sk(sk);
2896
2897 if (tcp_may_undo(tp)) {
2898 int mib_idx;
2899
2900 /* Happy end! We did not retransmit anything
2901 * or our original transmission succeeded.
2902 */
2903 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2904 tcp_undo_cwnd_reduction(sk, false);
2905 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2906 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2907 else
2908 mib_idx = LINUX_MIB_TCPFULLUNDO;
2909
2910 NET_INC_STATS(sock_net(sk), mib_idx);
2911 } else if (tp->rack.reo_wnd_persist) {
2912 tp->rack.reo_wnd_persist--;
2913 }
2914 if (tcp_is_non_sack_preventing_reopen(sk))
2915 return true;
2916 tcp_set_ca_state(sk, TCP_CA_Open);
2917 tp->is_sack_reneg = 0;
2918 return false;
2919 }
2920
2921 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
tcp_try_undo_dsack(struct sock * sk)2922 static bool tcp_try_undo_dsack(struct sock *sk)
2923 {
2924 struct tcp_sock *tp = tcp_sk(sk);
2925
2926 if (tp->undo_marker && !tp->undo_retrans) {
2927 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH,
2928 tp->rack.reo_wnd_persist + 1);
2929 DBGUNDO(sk, "D-SACK");
2930 tcp_undo_cwnd_reduction(sk, false);
2931 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2932 return true;
2933 }
2934 return false;
2935 }
2936
2937 /* Undo during loss recovery after partial ACK or using F-RTO. */
tcp_try_undo_loss(struct sock * sk,bool frto_undo)2938 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2939 {
2940 struct tcp_sock *tp = tcp_sk(sk);
2941
2942 if (frto_undo || tcp_may_undo(tp)) {
2943 tcp_undo_cwnd_reduction(sk, true);
2944
2945 DBGUNDO(sk, "partial loss");
2946 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2947 if (frto_undo)
2948 NET_INC_STATS(sock_net(sk),
2949 LINUX_MIB_TCPSPURIOUSRTOS);
2950 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
2951 if (tcp_is_non_sack_preventing_reopen(sk))
2952 return true;
2953 if (frto_undo || tcp_is_sack(tp)) {
2954 tcp_set_ca_state(sk, TCP_CA_Open);
2955 tp->is_sack_reneg = 0;
2956 }
2957 return true;
2958 }
2959 return false;
2960 }
2961
2962 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
2963 * It computes the number of packets to send (sndcnt) based on packets newly
2964 * delivered:
2965 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2966 * cwnd reductions across a full RTT.
2967 * 2) Otherwise PRR uses packet conservation to send as much as delivered.
2968 * But when SND_UNA is acked without further losses,
2969 * slow starts cwnd up to ssthresh to speed up the recovery.
2970 */
tcp_init_cwnd_reduction(struct sock * sk)2971 static void tcp_init_cwnd_reduction(struct sock *sk)
2972 {
2973 struct tcp_sock *tp = tcp_sk(sk);
2974
2975 tp->high_seq = tp->snd_nxt;
2976 tp->tlp_high_seq = 0;
2977 tp->snd_cwnd_cnt = 0;
2978 tp->prior_cwnd = tcp_snd_cwnd(tp);
2979 tp->prr_delivered = 0;
2980 tp->prr_out = 0;
2981 WRITE_ONCE(tp->snd_ssthresh, inet_csk(sk)->icsk_ca_ops->ssthresh(sk));
2982 tcp_ecn_queue_cwr(tp);
2983 }
2984
tcp_cwnd_reduction(struct sock * sk,int newly_acked_sacked,int newly_lost,int flag)2985 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag)
2986 {
2987 struct tcp_sock *tp = tcp_sk(sk);
2988 int sndcnt = 0;
2989 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2990
2991 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
2992 return;
2993
2994 trace_tcp_cwnd_reduction_tp(sk, newly_acked_sacked, newly_lost, flag);
2995
2996 tp->prr_delivered += newly_acked_sacked;
2997 if (delta < 0) {
2998 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2999 tp->prior_cwnd - 1;
3000 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
3001 } else {
3002 sndcnt = max_t(int, tp->prr_delivered - tp->prr_out,
3003 newly_acked_sacked);
3004 if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
3005 sndcnt++;
3006 sndcnt = min(delta, sndcnt);
3007 }
3008 /* Force a fast retransmit upon entering fast recovery */
3009 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
3010 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
3011 }
3012
tcp_end_cwnd_reduction(struct sock * sk)3013 static inline void tcp_end_cwnd_reduction(struct sock *sk)
3014 {
3015 struct tcp_sock *tp = tcp_sk(sk);
3016
3017 if (inet_csk(sk)->icsk_ca_ops->cong_control)
3018 return;
3019
3020 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
3021 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
3022 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
3023 tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
3024 tp->snd_cwnd_stamp = tcp_jiffies32;
3025 }
3026 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
3027 }
3028
3029 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
tcp_enter_cwr(struct sock * sk)3030 void tcp_enter_cwr(struct sock *sk)
3031 {
3032 struct tcp_sock *tp = tcp_sk(sk);
3033
3034 tp->prior_ssthresh = 0;
3035 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
3036 tp->undo_marker = 0;
3037 tcp_init_cwnd_reduction(sk);
3038 tcp_set_ca_state(sk, TCP_CA_CWR);
3039 }
3040 }
3041 EXPORT_SYMBOL(tcp_enter_cwr);
3042
tcp_try_keep_open(struct sock * sk)3043 static void tcp_try_keep_open(struct sock *sk)
3044 {
3045 struct tcp_sock *tp = tcp_sk(sk);
3046 int state = TCP_CA_Open;
3047
3048 if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
3049 state = TCP_CA_Disorder;
3050
3051 if (inet_csk(sk)->icsk_ca_state != state) {
3052 tcp_set_ca_state(sk, state);
3053 tp->high_seq = tp->snd_nxt;
3054 }
3055 }
3056
tcp_try_to_open(struct sock * sk,int flag)3057 static void tcp_try_to_open(struct sock *sk, int flag)
3058 {
3059 struct tcp_sock *tp = tcp_sk(sk);
3060
3061 tcp_verify_left_out(tp);
3062
3063 if (!tcp_any_retrans_done(sk))
3064 tp->retrans_stamp = 0;
3065
3066 if (flag & FLAG_ECE)
3067 tcp_enter_cwr(sk);
3068
3069 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
3070 tcp_try_keep_open(sk);
3071 }
3072 }
3073
tcp_mtup_probe_failed(struct sock * sk)3074 static void tcp_mtup_probe_failed(struct sock *sk)
3075 {
3076 struct inet_connection_sock *icsk = inet_csk(sk);
3077
3078 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
3079 icsk->icsk_mtup.probe_size = 0;
3080 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
3081 }
3082
tcp_mtup_probe_success(struct sock * sk)3083 static void tcp_mtup_probe_success(struct sock *sk)
3084 {
3085 struct tcp_sock *tp = tcp_sk(sk);
3086 struct inet_connection_sock *icsk = inet_csk(sk);
3087 u64 val;
3088
3089 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3090
3091 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
3092 do_div(val, icsk->icsk_mtup.probe_size);
3093 DEBUG_NET_WARN_ON_ONCE((u32)val != val);
3094 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
3095
3096 tp->snd_cwnd_cnt = 0;
3097 tp->snd_cwnd_stamp = tcp_jiffies32;
3098 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3099
3100 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
3101 icsk->icsk_mtup.probe_size = 0;
3102 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
3103 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
3104 }
3105
3106 /* Sometimes we deduce that packets have been dropped due to reasons other than
3107 * congestion, like path MTU reductions or failed client TFO attempts. In these
3108 * cases we call this function to retransmit as many packets as cwnd allows,
3109 * without reducing cwnd. Given that retransmits will set retrans_stamp to a
3110 * non-zero value (and may do so in a later calling context due to TSQ), we
3111 * also enter CA_Loss so that we track when all retransmitted packets are ACKed
3112 * and clear retrans_stamp when that happens (to ensure later recurring RTOs
3113 * are using the correct retrans_stamp and don't declare ETIMEDOUT
3114 * prematurely).
3115 */
tcp_non_congestion_loss_retransmit(struct sock * sk)3116 static void tcp_non_congestion_loss_retransmit(struct sock *sk)
3117 {
3118 const struct inet_connection_sock *icsk = inet_csk(sk);
3119 struct tcp_sock *tp = tcp_sk(sk);
3120
3121 if (icsk->icsk_ca_state != TCP_CA_Loss) {
3122 tp->high_seq = tp->snd_nxt;
3123 WRITE_ONCE(tp->snd_ssthresh, tcp_current_ssthresh(sk));
3124 tp->prior_ssthresh = 0;
3125 tp->undo_marker = 0;
3126 tcp_set_ca_state(sk, TCP_CA_Loss);
3127 }
3128 tcp_xmit_retransmit_queue(sk);
3129 }
3130
3131 /* Do a simple retransmit without using the backoff mechanisms in
3132 * tcp_timer. This is used for path mtu discovery.
3133 * The socket is already locked here.
3134 */
tcp_simple_retransmit(struct sock * sk)3135 void tcp_simple_retransmit(struct sock *sk)
3136 {
3137 struct tcp_sock *tp = tcp_sk(sk);
3138 struct sk_buff *skb;
3139 int mss;
3140
3141 /* A fastopen SYN request is stored as two separate packets within
3142 * the retransmit queue, this is done by tcp_send_syn_data().
3143 * As a result simply checking the MSS of the frames in the queue
3144 * will not work for the SYN packet.
3145 *
3146 * Us being here is an indication of a path MTU issue so we can
3147 * assume that the fastopen SYN was lost and just mark all the
3148 * frames in the retransmit queue as lost. We will use an MSS of
3149 * -1 to mark all frames as lost, otherwise compute the current MSS.
3150 */
3151 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT)
3152 mss = -1;
3153 else
3154 mss = tcp_current_mss(sk);
3155
3156 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
3157 if (tcp_skb_seglen(skb) > mss)
3158 tcp_mark_skb_lost(sk, skb);
3159 }
3160
3161 if (!tp->lost_out)
3162 return;
3163
3164 if (tcp_is_reno(tp))
3165 tcp_limit_reno_sacked(tp);
3166
3167 tcp_verify_left_out(tp);
3168
3169 /* Don't muck with the congestion window here.
3170 * Reason is that we do not increase amount of _data_
3171 * in network, but units changed and effective
3172 * cwnd/ssthresh really reduced now.
3173 */
3174 tcp_non_congestion_loss_retransmit(sk);
3175 }
3176
tcp_enter_recovery(struct sock * sk,bool ece_ack)3177 void tcp_enter_recovery(struct sock *sk, bool ece_ack)
3178 {
3179 struct tcp_sock *tp = tcp_sk(sk);
3180 int mib_idx;
3181
3182 /* Start the clock with our fast retransmit, for undo and ETIMEDOUT. */
3183 tcp_retrans_stamp_cleanup(sk);
3184
3185 if (tcp_is_reno(tp))
3186 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3187 else
3188 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3189
3190 NET_INC_STATS(sock_net(sk), mib_idx);
3191
3192 tp->prior_ssthresh = 0;
3193 tcp_init_undo(tp);
3194
3195 if (!tcp_in_cwnd_reduction(sk)) {
3196 if (!ece_ack)
3197 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3198 tcp_init_cwnd_reduction(sk);
3199 }
3200 tcp_set_ca_state(sk, TCP_CA_Recovery);
3201 }
3202
tcp_update_rto_time(struct tcp_sock * tp)3203 static void tcp_update_rto_time(struct tcp_sock *tp)
3204 {
3205 if (tp->rto_stamp) {
3206 tp->total_rto_time += tcp_time_stamp_ms(tp) - tp->rto_stamp;
3207 tp->rto_stamp = 0;
3208 }
3209 }
3210
3211 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
3212 * recovered or spurious. Otherwise retransmits more on partial ACKs.
3213 */
tcp_process_loss(struct sock * sk,int flag,int num_dupack,int * rexmit)3214 static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
3215 int *rexmit)
3216 {
3217 struct tcp_sock *tp = tcp_sk(sk);
3218 bool recovered = !before(tp->snd_una, tp->high_seq);
3219
3220 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
3221 tcp_try_undo_loss(sk, false))
3222 return;
3223
3224 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
3225 /* Step 3.b. A timeout is spurious if not all data are
3226 * lost, i.e., never-retransmitted data are (s)acked.
3227 */
3228 if ((flag & FLAG_ORIG_SACK_ACKED) &&
3229 tcp_try_undo_loss(sk, true))
3230 return;
3231
3232 if (after(tp->snd_nxt, tp->high_seq)) {
3233 if (flag & FLAG_DATA_SACKED || num_dupack)
3234 tp->frto = 0; /* Step 3.a. loss was real */
3235 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
3236 tp->high_seq = tp->snd_nxt;
3237 /* Step 2.b. Try send new data (but deferred until cwnd
3238 * is updated in tcp_ack()). Otherwise fall back to
3239 * the conventional recovery.
3240 */
3241 if (!tcp_write_queue_empty(sk) &&
3242 after(tcp_wnd_end(tp), tp->snd_nxt)) {
3243 *rexmit = REXMIT_NEW;
3244 return;
3245 }
3246 tp->frto = 0;
3247 }
3248 }
3249
3250 if (recovered) {
3251 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
3252 tcp_try_undo_recovery(sk);
3253 return;
3254 }
3255 if (tcp_is_reno(tp)) {
3256 /* A Reno DUPACK means new data in F-RTO step 2.b above are
3257 * delivered. Lower inflight to clock out (re)transmissions.
3258 */
3259 if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
3260 tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
3261 else if (flag & FLAG_SND_UNA_ADVANCED)
3262 tcp_reset_reno_sack(tp);
3263 }
3264 *rexmit = REXMIT_LOST;
3265 }
3266
3267 /* Undo during fast recovery after partial ACK. */
tcp_try_undo_partial(struct sock * sk,u32 prior_snd_una)3268 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
3269 {
3270 struct tcp_sock *tp = tcp_sk(sk);
3271
3272 if (tp->undo_marker && tcp_packet_delayed(tp)) {
3273 /* Plain luck! Hole if filled with delayed
3274 * packet, rather than with a retransmit. Check reordering.
3275 */
3276 tcp_check_sack_reordering(sk, prior_snd_una, 1);
3277
3278 /* We are getting evidence that the reordering degree is higher
3279 * than we realized. If there are no retransmits out then we
3280 * can undo. Otherwise we clock out new packets but do not
3281 * mark more packets lost or retransmit more.
3282 */
3283 if (tp->retrans_out)
3284 return true;
3285
3286 if (!tcp_any_retrans_done(sk))
3287 tp->retrans_stamp = 0;
3288
3289 DBGUNDO(sk, "partial recovery");
3290 tcp_undo_cwnd_reduction(sk, true);
3291 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
3292 tcp_try_keep_open(sk);
3293 }
3294 return false;
3295 }
3296
tcp_identify_packet_loss(struct sock * sk,int * ack_flag)3297 static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
3298 {
3299 struct tcp_sock *tp = tcp_sk(sk);
3300
3301 if (tcp_rtx_queue_empty(sk))
3302 return;
3303
3304 if (unlikely(tcp_is_reno(tp))) {
3305 tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
3306 } else {
3307 u32 prior_retrans = tp->retrans_out;
3308
3309 if (tcp_rack_mark_lost(sk))
3310 *ack_flag &= ~FLAG_SET_XMIT_TIMER;
3311 if (prior_retrans > tp->retrans_out)
3312 *ack_flag |= FLAG_LOST_RETRANS;
3313 }
3314 }
3315
3316 /* Process an event, which can update packets-in-flight not trivially.
3317 * Main goal of this function is to calculate new estimate for left_out,
3318 * taking into account both packets sitting in receiver's buffer and
3319 * packets lost by network.
3320 *
3321 * Besides that it updates the congestion state when packet loss or ECN
3322 * is detected. But it does not reduce the cwnd, it is done by the
3323 * congestion control later.
3324 *
3325 * It does _not_ decide what to send, it is made in function
3326 * tcp_xmit_retransmit_queue().
3327 */
tcp_fastretrans_alert(struct sock * sk,const u32 prior_snd_una,int num_dupack,int * ack_flag,int * rexmit)3328 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
3329 int num_dupack, int *ack_flag, int *rexmit)
3330 {
3331 struct inet_connection_sock *icsk = inet_csk(sk);
3332 struct tcp_sock *tp = tcp_sk(sk);
3333 int flag = *ack_flag;
3334 bool ece_ack = flag & FLAG_ECE;
3335
3336 if (!tp->packets_out && tp->sacked_out)
3337 tp->sacked_out = 0;
3338
3339 /* Now state machine starts.
3340 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
3341 if (ece_ack)
3342 tp->prior_ssthresh = 0;
3343
3344 /* B. In all the states check for reneging SACKs. */
3345 if (tcp_check_sack_reneging(sk, ack_flag))
3346 return;
3347
3348 /* C. Check consistency of the current state. */
3349 tcp_verify_left_out(tp);
3350
3351 /* D. Check state exit conditions. State can be terminated
3352 * when high_seq is ACKed. */
3353 if (icsk->icsk_ca_state == TCP_CA_Open) {
3354 WARN_ON(tp->retrans_out != 0 && !tp->syn_data);
3355 tp->retrans_stamp = 0;
3356 } else if (!before(tp->snd_una, tp->high_seq)) {
3357 switch (icsk->icsk_ca_state) {
3358 case TCP_CA_CWR:
3359 /* CWR is to be held something *above* high_seq
3360 * is ACKed for CWR bit to reach receiver. */
3361 if (tp->snd_una != tp->high_seq) {
3362 tcp_end_cwnd_reduction(sk);
3363 tcp_set_ca_state(sk, TCP_CA_Open);
3364 }
3365 break;
3366
3367 case TCP_CA_Recovery:
3368 if (tcp_is_reno(tp))
3369 tcp_reset_reno_sack(tp);
3370 if (tcp_try_undo_recovery(sk))
3371 return;
3372 tcp_end_cwnd_reduction(sk);
3373 break;
3374 }
3375 }
3376
3377 /* E. Process state. */
3378 switch (icsk->icsk_ca_state) {
3379 case TCP_CA_Recovery:
3380 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
3381 if (tcp_is_reno(tp))
3382 tcp_add_reno_sack(sk, num_dupack, ece_ack);
3383 } else if (tcp_try_undo_partial(sk, prior_snd_una))
3384 return;
3385
3386 if (tcp_try_undo_dsack(sk))
3387 tcp_try_to_open(sk, flag);
3388
3389 tcp_identify_packet_loss(sk, ack_flag);
3390 if (icsk->icsk_ca_state != TCP_CA_Recovery) {
3391 if (!tcp_time_to_recover(tp))
3392 return;
3393 /* Undo reverts the recovery state. If loss is evident,
3394 * starts a new recovery (e.g. reordering then loss);
3395 */
3396 tcp_enter_recovery(sk, ece_ack);
3397 }
3398 break;
3399 case TCP_CA_Loss:
3400 tcp_process_loss(sk, flag, num_dupack, rexmit);
3401 if (icsk->icsk_ca_state != TCP_CA_Loss)
3402 tcp_update_rto_time(tp);
3403 tcp_identify_packet_loss(sk, ack_flag);
3404 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
3405 (*ack_flag & FLAG_LOST_RETRANS)))
3406 return;
3407 /* Change state if cwnd is undone or retransmits are lost */
3408 fallthrough;
3409 default:
3410 if (tcp_is_reno(tp)) {
3411 if (flag & FLAG_SND_UNA_ADVANCED)
3412 tcp_reset_reno_sack(tp);
3413 tcp_add_reno_sack(sk, num_dupack, ece_ack);
3414 }
3415
3416 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3417 tcp_try_undo_dsack(sk);
3418
3419 tcp_identify_packet_loss(sk, ack_flag);
3420 if (!tcp_time_to_recover(tp)) {
3421 tcp_try_to_open(sk, flag);
3422 return;
3423 }
3424
3425 /* MTU probe failure: don't reduce cwnd */
3426 if (icsk->icsk_ca_state < TCP_CA_CWR &&
3427 icsk->icsk_mtup.probe_size &&
3428 tp->snd_una == tp->mtu_probe.probe_seq_start) {
3429 tcp_mtup_probe_failed(sk);
3430 /* Restores the reduction we did in tcp_mtup_probe() */
3431 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
3432 tcp_simple_retransmit(sk);
3433 return;
3434 }
3435
3436 /* Otherwise enter Recovery state */
3437 tcp_enter_recovery(sk, ece_ack);
3438 }
3439
3440 *rexmit = REXMIT_LOST;
3441 }
3442
tcp_update_rtt_min(struct sock * sk,u32 rtt_us,const int flag)3443 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
3444 {
3445 u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
3446 struct tcp_sock *tp = tcp_sk(sk);
3447
3448 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
3449 /* If the remote keeps returning delayed ACKs, eventually
3450 * the min filter would pick it up and overestimate the
3451 * prop. delay when it expires. Skip suspected delayed ACKs.
3452 */
3453 return;
3454 }
3455 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
3456 rtt_us ? : jiffies_to_usecs(1));
3457 }
3458
tcp_ack_update_rtt(struct sock * sk,const int flag,long seq_rtt_us,long sack_rtt_us,long ca_rtt_us,struct rate_sample * rs)3459 static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
3460 long seq_rtt_us, long sack_rtt_us,
3461 long ca_rtt_us, struct rate_sample *rs)
3462 {
3463 const struct tcp_sock *tp = tcp_sk(sk);
3464
3465 /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
3466 * broken middle-boxes or peers may corrupt TS-ECR fields. But
3467 * Karn's algorithm forbids taking RTT if some retransmitted data
3468 * is acked (RFC6298).
3469 */
3470 if (seq_rtt_us < 0)
3471 seq_rtt_us = sack_rtt_us;
3472
3473 /* RTTM Rule: A TSecr value received in a segment is used to
3474 * update the averaged RTT measurement only if the segment
3475 * acknowledges some new data, i.e., only if it advances the
3476 * left edge of the send window.
3477 * See draft-ietf-tcplw-high-performance-00, section 3.3.
3478 */
3479 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp &&
3480 tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED)
3481 seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp, 1);
3482
3483 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
3484 if (seq_rtt_us < 0)
3485 return false;
3486
3487 /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
3488 * always taken together with ACK, SACK, or TS-opts. Any negative
3489 * values will be skipped with the seq_rtt_us < 0 check above.
3490 */
3491 tcp_update_rtt_min(sk, ca_rtt_us, flag);
3492 tcp_rtt_estimator(sk, seq_rtt_us);
3493 tcp_set_rto(sk);
3494
3495 /* RFC6298: only reset backoff on valid RTT measurement. */
3496 inet_csk(sk)->icsk_backoff = 0;
3497 return true;
3498 }
3499
3500 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
tcp_synack_rtt_meas(struct sock * sk,struct request_sock * req)3501 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
3502 {
3503 struct rate_sample rs;
3504 long rtt_us = -1L;
3505
3506 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
3507 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
3508
3509 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
3510 }
3511
3512
tcp_cong_avoid(struct sock * sk,u32 ack,u32 acked)3513 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3514 {
3515 const struct inet_connection_sock *icsk = inet_csk(sk);
3516
3517 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
3518 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
3519 }
3520
3521 /* Restart timer after forward progress on connection.
3522 * RFC2988 recommends to restart timer to now+rto.
3523 */
tcp_rearm_rto(struct sock * sk)3524 void tcp_rearm_rto(struct sock *sk)
3525 {
3526 const struct inet_connection_sock *icsk = inet_csk(sk);
3527 struct tcp_sock *tp = tcp_sk(sk);
3528
3529 /* If the retrans timer is currently being used by Fast Open
3530 * for SYN-ACK retrans purpose, stay put.
3531 */
3532 if (rcu_access_pointer(tp->fastopen_rsk))
3533 return;
3534
3535 if (!tp->packets_out) {
3536 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3537 } else {
3538 u32 rto = inet_csk(sk)->icsk_rto;
3539 /* Offset the time elapsed after installing regular RTO */
3540 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3541 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3542 s64 delta_us = tcp_rto_delta_us(sk);
3543 /* delta_us may not be positive if the socket is locked
3544 * when the retrans timer fires and is rescheduled.
3545 */
3546 rto = usecs_to_jiffies(max_t(int, delta_us, 1));
3547 }
3548 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, true);
3549 }
3550 }
3551
3552 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
tcp_set_xmit_timer(struct sock * sk)3553 static void tcp_set_xmit_timer(struct sock *sk)
3554 {
3555 if (!tcp_sk(sk)->packets_out || !tcp_schedule_loss_probe(sk, true))
3556 tcp_rearm_rto(sk);
3557 }
3558
3559 /* If we get here, the whole TSO packet has not been acked. */
tcp_tso_acked(struct sock * sk,struct sk_buff * skb)3560 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3561 {
3562 struct tcp_sock *tp = tcp_sk(sk);
3563 u32 packets_acked;
3564
3565 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
3566
3567 packets_acked = tcp_skb_pcount(skb);
3568 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3569 return 0;
3570 packets_acked -= tcp_skb_pcount(skb);
3571
3572 if (packets_acked) {
3573 BUG_ON(tcp_skb_pcount(skb) == 0);
3574 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
3575 }
3576
3577 return packets_acked;
3578 }
3579
tcp_ack_tstamp(struct sock * sk,struct sk_buff * skb,const struct sk_buff * ack_skb,u32 prior_snd_una)3580 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3581 const struct sk_buff *ack_skb, u32 prior_snd_una)
3582 {
3583 const struct skb_shared_info *shinfo;
3584
3585 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3586 if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
3587 return;
3588
3589 shinfo = skb_shinfo(skb);
3590 if (!before(shinfo->tskey, prior_snd_una) &&
3591 before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
3592 tcp_skb_tsorted_save(skb) {
3593 __skb_tstamp_tx(skb, ack_skb, NULL, sk, SCM_TSTAMP_ACK);
3594 } tcp_skb_tsorted_restore(skb);
3595 }
3596 }
3597
3598 /* Remove acknowledged frames from the retransmission queue. If our packet
3599 * is before the ack sequence we can discard it as it's confirmed to have
3600 * arrived at the other end.
3601 */
tcp_clean_rtx_queue(struct sock * sk,const struct sk_buff * ack_skb,u32 prior_fack,u32 prior_snd_una,struct tcp_sacktag_state * sack,bool ece_ack)3602 static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
3603 u32 prior_fack, u32 prior_snd_una,
3604 struct tcp_sacktag_state *sack, bool ece_ack)
3605 {
3606 const struct inet_connection_sock *icsk = inet_csk(sk);
3607 u64 first_ackt, last_ackt;
3608 struct tcp_sock *tp = tcp_sk(sk);
3609 u32 prior_sacked = tp->sacked_out;
3610 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */
3611 struct sk_buff *skb, *next;
3612 bool fully_acked = true;
3613 long sack_rtt_us = -1L;
3614 long seq_rtt_us = -1L;
3615 long ca_rtt_us = -1L;
3616 u32 pkts_acked = 0;
3617 bool rtt_update;
3618 int flag = 0;
3619
3620 first_ackt = 0;
3621
3622 for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
3623 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3624 const u32 start_seq = scb->seq;
3625 u8 sacked = scb->sacked;
3626 u32 acked_pcount;
3627
3628 /* Determine how many packets and what bytes were acked, tso and else */
3629 if (after(scb->end_seq, tp->snd_una)) {
3630 if (tcp_skb_pcount(skb) == 1 ||
3631 !after(tp->snd_una, scb->seq))
3632 break;
3633
3634 acked_pcount = tcp_tso_acked(sk, skb);
3635 if (!acked_pcount)
3636 break;
3637 fully_acked = false;
3638 } else {
3639 acked_pcount = tcp_skb_pcount(skb);
3640 }
3641
3642 if (unlikely(sacked & TCPCB_RETRANS)) {
3643 if (sacked & TCPCB_SACKED_RETRANS)
3644 tp->retrans_out -= acked_pcount;
3645 flag |= FLAG_RETRANS_DATA_ACKED;
3646 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3647 last_ackt = tcp_skb_timestamp_us(skb);
3648 WARN_ON_ONCE(last_ackt == 0);
3649 if (!first_ackt)
3650 first_ackt = last_ackt;
3651
3652 if (before(start_seq, reord))
3653 reord = start_seq;
3654 if (!after(scb->end_seq, tp->high_seq))
3655 flag |= FLAG_ORIG_SACK_ACKED;
3656 }
3657
3658 if (sacked & TCPCB_SACKED_ACKED) {
3659 tp->sacked_out -= acked_pcount;
3660 /* snd_una delta covers these skbs */
3661 sack->delivered_bytes -= skb->len;
3662 } else if (tcp_is_sack(tp)) {
3663 tcp_count_delivered(tp, acked_pcount, ece_ack);
3664 if (!tcp_skb_spurious_retrans(tp, skb))
3665 tcp_rack_advance(tp, sacked, scb->end_seq,
3666 tcp_skb_timestamp_us(skb));
3667 }
3668 if (sacked & TCPCB_LOST)
3669 tp->lost_out -= acked_pcount;
3670
3671 tp->packets_out -= acked_pcount;
3672 pkts_acked += acked_pcount;
3673 tcp_rate_skb_delivered(sk, skb, sack->rate);
3674
3675 /* Initial outgoing SYN's get put onto the write_queue
3676 * just like anything else we transmit. It is not
3677 * true data, and if we misinform our callers that
3678 * this ACK acks real data, we will erroneously exit
3679 * connection startup slow start one packet too
3680 * quickly. This is severely frowned upon behavior.
3681 */
3682 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) {
3683 flag |= FLAG_DATA_ACKED;
3684 } else {
3685 flag |= FLAG_SYN_ACKED;
3686 tp->retrans_stamp = 0;
3687 }
3688
3689 if (!fully_acked)
3690 break;
3691
3692 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3693
3694 next = skb_rb_next(skb);
3695 if (unlikely(skb == tp->retransmit_skb_hint))
3696 tp->retransmit_skb_hint = NULL;
3697 tcp_highest_sack_replace(sk, skb, next);
3698 tcp_rtx_queue_unlink_and_free(skb, sk);
3699 }
3700
3701 if (!skb)
3702 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3703
3704 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
3705 tp->snd_up = tp->snd_una;
3706
3707 if (skb) {
3708 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3709 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3710 flag |= FLAG_SACK_RENEGING;
3711 }
3712
3713 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3714 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
3715 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
3716
3717 if (pkts_acked == 1 && fully_acked && !prior_sacked &&
3718 (tp->snd_una - prior_snd_una) < tp->mss_cache &&
3719 sack->rate->prior_delivered + 1 == tp->delivered &&
3720 !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
3721 /* Conservatively mark a delayed ACK. It's typically
3722 * from a lone runt packet over the round trip to
3723 * a receiver w/o out-of-order or CE events.
3724 */
3725 flag |= FLAG_ACK_MAYBE_DELAYED;
3726 }
3727 }
3728 if (sack->first_sackt) {
3729 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
3730 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
3731 }
3732 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
3733 ca_rtt_us, sack->rate);
3734
3735 if (flag & FLAG_ACKED) {
3736 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3737 if (unlikely(icsk->icsk_mtup.probe_size &&
3738 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3739 tcp_mtup_probe_success(sk);
3740 }
3741
3742 if (tcp_is_reno(tp)) {
3743 tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
3744
3745 /* If any of the cumulatively ACKed segments was
3746 * retransmitted, non-SACK case cannot confirm that
3747 * progress was due to original transmission due to
3748 * lack of TCPCB_SACKED_ACKED bits even if some of
3749 * the packets may have been never retransmitted.
3750 */
3751 if (flag & FLAG_RETRANS_DATA_ACKED)
3752 flag &= ~FLAG_ORIG_SACK_ACKED;
3753 } else {
3754 /* Non-retransmitted hole got filled? That's reordering */
3755 if (before(reord, prior_fack))
3756 tcp_check_sack_reordering(sk, reord, 0);
3757 }
3758
3759 sack->delivered_bytes = (skb ?
3760 TCP_SKB_CB(skb)->seq : tp->snd_una) -
3761 prior_snd_una;
3762 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3763 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3764 tcp_skb_timestamp_us(skb))) {
3765 /* Do not re-arm RTO if the sack RTT is measured from data sent
3766 * after when the head was last (re)transmitted. Otherwise the
3767 * timeout may continue to extend in loss recovery.
3768 */
3769 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3770 }
3771
3772 if (icsk->icsk_ca_ops->pkts_acked) {
3773 struct ack_sample sample = { .pkts_acked = pkts_acked,
3774 .rtt_us = sack->rate->rtt_us };
3775
3776 sample.in_flight = tp->mss_cache *
3777 (tp->delivered - sack->rate->prior_delivered);
3778 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
3779 }
3780
3781 #if FASTRETRANS_DEBUG > 0
3782 WARN_ON((int)tp->sacked_out < 0);
3783 WARN_ON((int)tp->lost_out < 0);
3784 WARN_ON((int)tp->retrans_out < 0);
3785 if (!tp->packets_out && tcp_is_sack(tp)) {
3786 icsk = inet_csk(sk);
3787 if (tp->lost_out) {
3788 pr_debug("Leak l=%u %d\n",
3789 tp->lost_out, icsk->icsk_ca_state);
3790 tp->lost_out = 0;
3791 }
3792 if (tp->sacked_out) {
3793 pr_debug("Leak s=%u %d\n",
3794 tp->sacked_out, icsk->icsk_ca_state);
3795 tp->sacked_out = 0;
3796 }
3797 if (tp->retrans_out) {
3798 pr_debug("Leak r=%u %d\n",
3799 tp->retrans_out, icsk->icsk_ca_state);
3800 tp->retrans_out = 0;
3801 }
3802 }
3803 #endif
3804 return flag;
3805 }
3806
tcp_ack_probe(struct sock * sk)3807 static void tcp_ack_probe(struct sock *sk)
3808 {
3809 struct inet_connection_sock *icsk = inet_csk(sk);
3810 struct sk_buff *head = tcp_send_head(sk);
3811 const struct tcp_sock *tp = tcp_sk(sk);
3812
3813 /* Was it a usable window open? */
3814 if (!head)
3815 return;
3816 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
3817 icsk->icsk_backoff = 0;
3818 icsk->icsk_probes_tstamp = 0;
3819 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
3820 /* Socket must be waked up by subsequent tcp_data_snd_check().
3821 * This function is not for random using!
3822 */
3823 } else {
3824 unsigned long when = tcp_probe0_when(sk, tcp_rto_max(sk));
3825
3826 when = tcp_clamp_probe0_to_user_timeout(sk, when);
3827 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, true);
3828 }
3829 }
3830
tcp_ack_is_dubious(const struct sock * sk,const int flag)3831 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3832 {
3833 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3834 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3835 }
3836
3837 /* Decide wheather to run the increase function of congestion control. */
tcp_may_raise_cwnd(const struct sock * sk,const int flag)3838 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3839 {
3840 /* If reordering is high then always grow cwnd whenever data is
3841 * delivered regardless of its ordering. Otherwise stay conservative
3842 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
3843 * new SACK or ECE mark may first advance cwnd here and later reduce
3844 * cwnd in tcp_fastretrans_alert() based on more states.
3845 */
3846 if (tcp_sk(sk)->reordering >
3847 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
3848 return flag & FLAG_FORWARD_PROGRESS;
3849
3850 return flag & FLAG_DATA_ACKED;
3851 }
3852
3853 /* The "ultimate" congestion control function that aims to replace the rigid
3854 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
3855 * It's called toward the end of processing an ACK with precise rate
3856 * information. All transmission or retransmission are delayed afterwards.
3857 */
tcp_cong_control(struct sock * sk,u32 ack,u32 acked_sacked,int flag,const struct rate_sample * rs)3858 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
3859 int flag, const struct rate_sample *rs)
3860 {
3861 const struct inet_connection_sock *icsk = inet_csk(sk);
3862
3863 if (icsk->icsk_ca_ops->cong_control) {
3864 icsk->icsk_ca_ops->cong_control(sk, ack, flag, rs);
3865 return;
3866 }
3867
3868 if (tcp_in_cwnd_reduction(sk)) {
3869 /* Reduce cwnd if state mandates */
3870 tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag);
3871 } else if (tcp_may_raise_cwnd(sk, flag)) {
3872 /* Advance cwnd if state allows */
3873 tcp_cong_avoid(sk, ack, acked_sacked);
3874 }
3875 tcp_update_pacing_rate(sk);
3876 }
3877
3878 /* Check that window update is acceptable.
3879 * The function assumes that snd_una<=ack<=snd_next.
3880 */
tcp_may_update_window(const struct tcp_sock * tp,const u32 ack,const u32 ack_seq,const u32 nwin)3881 static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3882 const u32 ack, const u32 ack_seq,
3883 const u32 nwin)
3884 {
3885 return after(ack, tp->snd_una) ||
3886 after(ack_seq, tp->snd_wl1) ||
3887 (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin));
3888 }
3889
tcp_snd_sne_update(struct tcp_sock * tp,u32 ack)3890 static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
3891 {
3892 #ifdef CONFIG_TCP_AO
3893 struct tcp_ao_info *ao;
3894
3895 if (!static_branch_unlikely(&tcp_ao_needed.key))
3896 return;
3897
3898 ao = rcu_dereference_protected(tp->ao_info,
3899 lockdep_sock_is_held((struct sock *)tp));
3900 if (ao && ack < tp->snd_una) {
3901 ao->snd_sne++;
3902 trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne);
3903 }
3904 #endif
3905 }
3906
3907 /* If we update tp->snd_una, also update tp->bytes_acked */
tcp_snd_una_update(struct tcp_sock * tp,u32 ack)3908 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3909 {
3910 u32 delta = ack - tp->snd_una;
3911
3912 sock_owned_by_me((struct sock *)tp);
3913 tp->bytes_acked += delta;
3914 tcp_snd_sne_update(tp, ack);
3915 WRITE_ONCE(tp->snd_una, ack);
3916 }
3917
tcp_rcv_sne_update(struct tcp_sock * tp,u32 seq)3918 static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
3919 {
3920 #ifdef CONFIG_TCP_AO
3921 struct tcp_ao_info *ao;
3922
3923 if (!static_branch_unlikely(&tcp_ao_needed.key))
3924 return;
3925
3926 ao = rcu_dereference_protected(tp->ao_info,
3927 lockdep_sock_is_held((struct sock *)tp));
3928 if (ao && seq < tp->rcv_nxt) {
3929 ao->rcv_sne++;
3930 trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne);
3931 }
3932 #endif
3933 }
3934
3935 /* If we update tp->rcv_nxt, also update tp->bytes_received */
tcp_rcv_nxt_update(struct tcp_sock * tp,u32 seq)3936 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3937 {
3938 u32 delta = seq - tp->rcv_nxt;
3939
3940 sock_owned_by_me((struct sock *)tp);
3941 tp->bytes_received += delta;
3942 tcp_rcv_sne_update(tp, seq);
3943 WRITE_ONCE(tp->rcv_nxt, seq);
3944 }
3945
3946 /* Update our send window.
3947 *
3948 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3949 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3950 */
tcp_ack_update_window(struct sock * sk,const struct sk_buff * skb,u32 ack,u32 ack_seq)3951 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3952 u32 ack_seq)
3953 {
3954 struct tcp_sock *tp = tcp_sk(sk);
3955 int flag = 0;
3956 u32 nwin = ntohs(tcp_hdr(skb)->window);
3957
3958 if (likely(!tcp_hdr(skb)->syn))
3959 nwin <<= tp->rx_opt.snd_wscale;
3960
3961 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3962 flag |= FLAG_WIN_UPDATE;
3963 tcp_update_wl(tp, ack_seq);
3964
3965 if (tp->snd_wnd != nwin) {
3966 tp->snd_wnd = nwin;
3967
3968 /* Note, it is the only place, where
3969 * fast path is recovered for sending TCP.
3970 */
3971 tp->pred_flags = 0;
3972 tcp_fast_path_check(sk);
3973
3974 if (!tcp_write_queue_empty(sk))
3975 tcp_slow_start_after_idle_check(sk);
3976
3977 if (nwin > tp->max_window) {
3978 tp->max_window = nwin;
3979 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
3980 }
3981 }
3982 }
3983
3984 tcp_snd_una_update(tp, ack);
3985
3986 return flag;
3987 }
3988
__tcp_oow_rate_limited(struct net * net,int mib_idx,u32 * last_oow_ack_time)3989 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
3990 u32 *last_oow_ack_time)
3991 {
3992 /* Paired with the WRITE_ONCE() in this function. */
3993 u32 val = READ_ONCE(*last_oow_ack_time);
3994
3995 if (val) {
3996 s32 elapsed = (s32)(tcp_jiffies32 - val);
3997
3998 if (0 <= elapsed &&
3999 elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
4000 NET_INC_STATS(net, mib_idx);
4001 return true; /* rate-limited: don't send yet! */
4002 }
4003 }
4004
4005 /* Paired with the prior READ_ONCE() and with itself,
4006 * as we might be lockless.
4007 */
4008 WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
4009
4010 return false; /* not rate-limited: go ahead, send dupack now! */
4011 }
4012
4013 /* Return true if we're currently rate-limiting out-of-window ACKs and
4014 * thus shouldn't send a dupack right now. We rate-limit dupacks in
4015 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
4016 * attacks that send repeated SYNs or ACKs for the same connection. To
4017 * do this, we do not send a duplicate SYNACK or ACK if the remote
4018 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
4019 */
tcp_oow_rate_limited(struct net * net,const struct sk_buff * skb,int mib_idx,u32 * last_oow_ack_time)4020 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
4021 int mib_idx, u32 *last_oow_ack_time)
4022 {
4023 /* Data packets without SYNs are not likely part of an ACK loop. */
4024 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
4025 !tcp_hdr(skb)->syn)
4026 return false;
4027
4028 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
4029 }
4030
tcp_send_ack_reflect_ect(struct sock * sk,bool accecn_reflector)4031 static void tcp_send_ack_reflect_ect(struct sock *sk, bool accecn_reflector)
4032 {
4033 struct tcp_sock *tp = tcp_sk(sk);
4034 u16 flags = 0;
4035
4036 if (accecn_reflector)
4037 flags = tcp_accecn_reflector_flags(tp->syn_ect_rcv);
4038 __tcp_send_ack(sk, tp->rcv_nxt, flags);
4039 }
4040
4041 /* RFC 5961 7 [ACK Throttling] */
tcp_send_challenge_ack(struct sock * sk,bool accecn_reflector)4042 static void tcp_send_challenge_ack(struct sock *sk, bool accecn_reflector)
4043 {
4044 struct tcp_sock *tp = tcp_sk(sk);
4045 struct net *net = sock_net(sk);
4046 u32 count, now, ack_limit;
4047
4048 /* First check our per-socket dupack rate limit. */
4049 if (__tcp_oow_rate_limited(net,
4050 LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
4051 &tp->last_oow_ack_time))
4052 return;
4053
4054 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
4055 if (ack_limit == INT_MAX)
4056 goto send_ack;
4057
4058 /* Then check host-wide RFC 5961 rate limit. */
4059 now = jiffies / HZ;
4060 if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
4061 u32 half = (ack_limit + 1) >> 1;
4062
4063 WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
4064 WRITE_ONCE(net->ipv4.tcp_challenge_count,
4065 get_random_u32_inclusive(half, ack_limit + half - 1));
4066 }
4067 count = READ_ONCE(net->ipv4.tcp_challenge_count);
4068 if (count > 0) {
4069 WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
4070 send_ack:
4071 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
4072 tcp_send_ack_reflect_ect(sk, accecn_reflector);
4073 }
4074 }
4075
tcp_store_ts_recent(struct tcp_sock * tp)4076 static void tcp_store_ts_recent(struct tcp_sock *tp)
4077 {
4078 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
4079 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
4080 }
4081
__tcp_replace_ts_recent(struct tcp_sock * tp,s32 tstamp_delta)4082 static int __tcp_replace_ts_recent(struct tcp_sock *tp, s32 tstamp_delta)
4083 {
4084 tcp_store_ts_recent(tp);
4085 return tstamp_delta > 0 ? FLAG_TS_PROGRESS : 0;
4086 }
4087
tcp_replace_ts_recent(struct tcp_sock * tp,u32 seq)4088 static int tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
4089 {
4090 s32 delta;
4091
4092 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
4093 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
4094 * extra check below makes sure this can only happen
4095 * for pure ACK frames. -DaveM
4096 *
4097 * Not only, also it occurs for expired timestamps.
4098 */
4099
4100 if (tcp_paws_check(&tp->rx_opt, 0)) {
4101 delta = tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent;
4102 return __tcp_replace_ts_recent(tp, delta);
4103 }
4104 }
4105
4106 return 0;
4107 }
4108
4109 /* This routine deals with acks during a TLP episode and ends an episode by
4110 * resetting tlp_high_seq. Ref: TLP algorithm in RFC8985
4111 */
tcp_process_tlp_ack(struct sock * sk,u32 ack,int flag)4112 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
4113 {
4114 struct tcp_sock *tp = tcp_sk(sk);
4115
4116 if (before(ack, tp->tlp_high_seq))
4117 return;
4118
4119 if (!tp->tlp_retrans) {
4120 /* TLP of new data has been acknowledged */
4121 tp->tlp_high_seq = 0;
4122 } else if (flag & FLAG_DSACK_TLP) {
4123 /* This DSACK means original and TLP probe arrived; no loss */
4124 tp->tlp_high_seq = 0;
4125 } else if (after(ack, tp->tlp_high_seq)) {
4126 /* ACK advances: there was a loss, so reduce cwnd. Reset
4127 * tlp_high_seq in tcp_init_cwnd_reduction()
4128 */
4129 tcp_init_cwnd_reduction(sk);
4130 tcp_set_ca_state(sk, TCP_CA_CWR);
4131 tcp_end_cwnd_reduction(sk);
4132 tcp_try_keep_open(sk);
4133 NET_INC_STATS(sock_net(sk),
4134 LINUX_MIB_TCPLOSSPROBERECOVERY);
4135 } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
4136 FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
4137 /* Pure dupack: original and TLP probe arrived; no loss */
4138 tp->tlp_high_seq = 0;
4139 }
4140 }
4141
tcp_in_ack_event(struct sock * sk,int flag)4142 static void tcp_in_ack_event(struct sock *sk, int flag)
4143 {
4144 const struct inet_connection_sock *icsk = inet_csk(sk);
4145
4146 if (icsk->icsk_ca_ops->in_ack_event) {
4147 u32 ack_ev_flags = 0;
4148
4149 if (flag & FLAG_WIN_UPDATE)
4150 ack_ev_flags |= CA_ACK_WIN_UPDATE;
4151 if (flag & FLAG_SLOWPATH) {
4152 ack_ev_flags |= CA_ACK_SLOWPATH;
4153 if (flag & FLAG_ECE)
4154 ack_ev_flags |= CA_ACK_ECE;
4155 }
4156
4157 icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags);
4158 }
4159 }
4160
4161 /* Congestion control has updated the cwnd already. So if we're in
4162 * loss recovery then now we do any new sends (for FRTO) or
4163 * retransmits (for CA_Loss or CA_recovery) that make sense.
4164 */
tcp_xmit_recovery(struct sock * sk,int rexmit)4165 static void tcp_xmit_recovery(struct sock *sk, int rexmit)
4166 {
4167 struct tcp_sock *tp = tcp_sk(sk);
4168
4169 if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
4170 return;
4171
4172 if (unlikely(rexmit == REXMIT_NEW)) {
4173 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
4174 TCP_NAGLE_OFF);
4175 if (after(tp->snd_nxt, tp->high_seq))
4176 return;
4177 tp->frto = 0;
4178 }
4179 tcp_xmit_retransmit_queue(sk);
4180 }
4181
4182 /* Returns the number of packets newly acked or sacked by the current ACK */
tcp_newly_delivered(struct sock * sk,u32 prior_delivered,u32 ecn_count,int flag)4183 static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered,
4184 u32 ecn_count, int flag)
4185 {
4186 const struct net *net = sock_net(sk);
4187 struct tcp_sock *tp = tcp_sk(sk);
4188 u32 delivered;
4189
4190 delivered = tp->delivered - prior_delivered;
4191 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
4192
4193 if (flag & FLAG_ECE) {
4194 if (tcp_ecn_mode_rfc3168(tp))
4195 ecn_count = delivered;
4196 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, ecn_count);
4197 }
4198
4199 return delivered;
4200 }
4201
4202 /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
4203 *
4204 * If a DSACK is received that seems like it may have been due to reordering
4205 * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded
4206 * by srtt), since there is possibility that spurious retransmission was
4207 * due to reordering delay longer than reo_wnd.
4208 *
4209 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
4210 * no. of successful recoveries (accounts for full DSACK-based loss
4211 * recovery undo). After that, reset it to default (min_rtt/4).
4212 *
4213 * At max, reo_wnd is incremented only once per rtt. So that the new
4214 * DSACK on which we are reacting, is due to the spurious retx (approx)
4215 * after the reo_wnd has been updated last time.
4216 *
4217 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
4218 * absolute value to account for change in rtt.
4219 */
tcp_rack_update_reo_wnd(struct sock * sk,struct rate_sample * rs)4220 static void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
4221 {
4222 struct tcp_sock *tp = tcp_sk(sk);
4223
4224 if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
4225 TCP_RACK_STATIC_REO_WND) ||
4226 !rs->prior_delivered)
4227 return;
4228
4229 /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
4230 if (before(rs->prior_delivered, tp->rack.last_delivered))
4231 tp->rack.dsack_seen = 0;
4232
4233 /* Adjust the reo_wnd if update is pending */
4234 if (tp->rack.dsack_seen) {
4235 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
4236 tp->rack.reo_wnd_steps + 1);
4237 tp->rack.dsack_seen = 0;
4238 tp->rack.last_delivered = tp->delivered;
4239 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
4240 } else if (!tp->rack.reo_wnd_persist) {
4241 tp->rack.reo_wnd_steps = 1;
4242 }
4243 }
4244
4245 /* This routine deals with incoming acks, but not outgoing ones. */
tcp_ack(struct sock * sk,const struct sk_buff * skb,int flag)4246 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
4247 {
4248 struct inet_connection_sock *icsk = inet_csk(sk);
4249 struct tcp_sock *tp = tcp_sk(sk);
4250 struct tcp_sacktag_state sack_state;
4251 struct rate_sample rs = { .prior_delivered = 0 };
4252 u32 prior_snd_una = tp->snd_una;
4253 bool is_sack_reneg = tp->is_sack_reneg;
4254 u32 ack_seq = TCP_SKB_CB(skb)->seq;
4255 u32 ack = TCP_SKB_CB(skb)->ack_seq;
4256 int num_dupack = 0;
4257 int prior_packets = tp->packets_out;
4258 u32 delivered = tp->delivered;
4259 u32 lost = tp->lost;
4260 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
4261 u32 ecn_count = 0; /* Did we receive ECE/an AccECN ACE update? */
4262 u32 prior_fack;
4263
4264 sack_state.first_sackt = 0;
4265 sack_state.rate = &rs;
4266 sack_state.sack_delivered = 0;
4267 sack_state.delivered_bytes = 0;
4268
4269 /* We very likely will need to access rtx queue. */
4270 prefetch(sk->tcp_rtx_queue.rb_node);
4271
4272 /* If the ack is older than previous acks
4273 * then we can probably ignore it.
4274 */
4275 if (before(ack, prior_snd_una)) {
4276 u32 max_window;
4277
4278 /* do not accept ACK for bytes we never sent. */
4279 max_window = min_t(u64, tp->max_window, tp->bytes_acked);
4280 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
4281 if (before(ack, prior_snd_una - max_window)) {
4282 if (!(flag & FLAG_NO_CHALLENGE_ACK))
4283 tcp_send_challenge_ack(sk, false);
4284 return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
4285 }
4286 goto old_ack;
4287 }
4288
4289 /* If the ack includes data we haven't sent yet, drop the
4290 * segment. RFC 793 Section 3.9 and RFC 5961 Section 5.2
4291 * require us to send an ACK back in that case.
4292 */
4293 if (after(ack, tp->snd_nxt)) {
4294 if (!(flag & FLAG_NO_CHALLENGE_ACK))
4295 tcp_send_challenge_ack(sk, false);
4296 return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA;
4297 }
4298
4299 if (after(ack, prior_snd_una)) {
4300 flag |= FLAG_SND_UNA_ADVANCED;
4301 WRITE_ONCE(icsk->icsk_retransmits, 0);
4302
4303 #if IS_ENABLED(CONFIG_TLS_DEVICE)
4304 if (static_branch_unlikely(&clean_acked_data_enabled.key))
4305 if (tp->tcp_clean_acked)
4306 tp->tcp_clean_acked(sk, ack);
4307 #endif
4308 }
4309
4310 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
4311 rs.prior_in_flight = tcp_packets_in_flight(tp);
4312
4313 /* ts_recent update must be made after we are sure that the packet
4314 * is in window.
4315 */
4316 if (flag & FLAG_UPDATE_TS_RECENT)
4317 flag |= tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4318
4319 if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
4320 FLAG_SND_UNA_ADVANCED) {
4321 /* Window is constant, pure forward advance.
4322 * No more checks are required.
4323 * Note, we use the fact that SND.UNA>=SND.WL2.
4324 */
4325 tcp_update_wl(tp, ack_seq);
4326 tcp_snd_una_update(tp, ack);
4327 flag |= FLAG_WIN_UPDATE;
4328
4329 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
4330 } else {
4331 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
4332 flag |= FLAG_DATA;
4333 else
4334 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
4335
4336 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
4337
4338 if (TCP_SKB_CB(skb)->sacked)
4339 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4340 &sack_state);
4341
4342 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb)))
4343 flag |= FLAG_ECE;
4344
4345 if (sack_state.sack_delivered)
4346 tcp_count_delivered(tp, sack_state.sack_delivered,
4347 flag & FLAG_ECE);
4348 }
4349
4350 /* This is a deviation from RFC3168 since it states that:
4351 * "When the TCP data sender is ready to set the CWR bit after reducing
4352 * the congestion window, it SHOULD set the CWR bit only on the first
4353 * new data packet that it transmits."
4354 * We accept CWR on pure ACKs to be more robust
4355 * with widely-deployed TCP implementations that do this.
4356 */
4357 tcp_ecn_accept_cwr(sk, skb);
4358
4359 /* We passed data and got it acked, remove any soft error
4360 * log. Something worked...
4361 */
4362 if (READ_ONCE(sk->sk_err_soft))
4363 WRITE_ONCE(sk->sk_err_soft, 0);
4364 WRITE_ONCE(icsk->icsk_probes_out, 0);
4365 tp->rcv_tstamp = tcp_jiffies32;
4366 if (!prior_packets)
4367 goto no_queue;
4368
4369 /* See if we can take anything off of the retransmit queue. */
4370 flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una,
4371 &sack_state, flag & FLAG_ECE);
4372
4373 tcp_rack_update_reo_wnd(sk, &rs);
4374
4375 if (tcp_ecn_mode_accecn(tp))
4376 ecn_count = tcp_accecn_process(sk, skb,
4377 tp->delivered - delivered,
4378 sack_state.delivered_bytes,
4379 &flag);
4380
4381 tcp_in_ack_event(sk, flag);
4382
4383 if (unlikely(tp->tlp_high_seq))
4384 tcp_process_tlp_ack(sk, ack, flag);
4385
4386 if (tcp_ack_is_dubious(sk, flag)) {
4387 if (!(flag & (FLAG_SND_UNA_ADVANCED |
4388 FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
4389 num_dupack = 1;
4390 /* Consider if pure acks were aggregated in tcp_add_backlog() */
4391 if (!(flag & FLAG_DATA))
4392 num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
4393 }
4394 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4395 &rexmit);
4396 }
4397
4398 /* If needed, reset TLP/RTO timer when RACK doesn't set. */
4399 if (flag & FLAG_SET_XMIT_TIMER)
4400 tcp_set_xmit_timer(sk);
4401
4402 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
4403 sk_dst_confirm(sk);
4404
4405 delivered = tcp_newly_delivered(sk, delivered, ecn_count, flag);
4406
4407 lost = tp->lost - lost; /* freshly marked lost */
4408 rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
4409 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
4410 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
4411 tcp_xmit_recovery(sk, rexmit);
4412 return 1;
4413
4414 no_queue:
4415 if (tcp_ecn_mode_accecn(tp))
4416 ecn_count = tcp_accecn_process(sk, skb,
4417 tp->delivered - delivered,
4418 sack_state.delivered_bytes,
4419 &flag);
4420 tcp_in_ack_event(sk, flag);
4421 /* If data was DSACKed, see if we can undo a cwnd reduction. */
4422 if (flag & FLAG_DSACKING_ACK) {
4423 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4424 &rexmit);
4425 tcp_newly_delivered(sk, delivered, ecn_count, flag);
4426 }
4427 /* If this ack opens up a zero window, clear backoff. It was
4428 * being used to time the probes, and is probably far higher than
4429 * it needs to be for normal retransmission.
4430 */
4431 tcp_ack_probe(sk);
4432
4433 if (unlikely(tp->tlp_high_seq))
4434 tcp_process_tlp_ack(sk, ack, flag);
4435 return 1;
4436
4437 old_ack:
4438 /* If data was SACKed, tag it and see if we should send more data.
4439 * If data was DSACKed, see if we can undo a cwnd reduction.
4440 */
4441 if (TCP_SKB_CB(skb)->sacked) {
4442 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4443 &sack_state);
4444 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4445 &rexmit);
4446 tcp_newly_delivered(sk, delivered, ecn_count, flag);
4447 tcp_xmit_recovery(sk, rexmit);
4448 }
4449
4450 return 0;
4451 }
4452
tcp_parse_fastopen_option(int len,const unsigned char * cookie,bool syn,struct tcp_fastopen_cookie * foc,bool exp_opt)4453 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
4454 bool syn, struct tcp_fastopen_cookie *foc,
4455 bool exp_opt)
4456 {
4457 /* Valid only in SYN or SYN-ACK with an even length. */
4458 if (!foc || !syn || len < 0 || (len & 1))
4459 return;
4460
4461 if (len >= TCP_FASTOPEN_COOKIE_MIN &&
4462 len <= TCP_FASTOPEN_COOKIE_MAX)
4463 memcpy(foc->val, cookie, len);
4464 else if (len != 0)
4465 len = -1;
4466 foc->len = len;
4467 foc->exp = exp_opt;
4468 }
4469
smc_parse_options(const struct tcphdr * th,struct tcp_options_received * opt_rx,const unsigned char * ptr,int opsize)4470 static bool smc_parse_options(const struct tcphdr *th,
4471 struct tcp_options_received *opt_rx,
4472 const unsigned char *ptr,
4473 int opsize)
4474 {
4475 #if IS_ENABLED(CONFIG_SMC)
4476 if (static_branch_unlikely(&tcp_have_smc)) {
4477 if (th->syn && !(opsize & 1) &&
4478 opsize >= TCPOLEN_EXP_SMC_BASE &&
4479 get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) {
4480 opt_rx->smc_ok = 1;
4481 return true;
4482 }
4483 }
4484 #endif
4485 return false;
4486 }
4487
4488 /* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
4489 * value on success.
4490 */
tcp_parse_mss_option(const struct tcphdr * th,u16 user_mss)4491 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
4492 {
4493 const unsigned char *ptr = (const unsigned char *)(th + 1);
4494 int length = (th->doff * 4) - sizeof(struct tcphdr);
4495 u16 mss = 0;
4496
4497 while (length > 0) {
4498 int opcode = *ptr++;
4499 int opsize;
4500
4501 switch (opcode) {
4502 case TCPOPT_EOL:
4503 return mss;
4504 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
4505 length--;
4506 continue;
4507 default:
4508 if (length < 2)
4509 return mss;
4510 opsize = *ptr++;
4511 if (opsize < 2) /* "silly options" */
4512 return mss;
4513 if (opsize > length)
4514 return mss; /* fail on partial options */
4515 if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) {
4516 u16 in_mss = get_unaligned_be16(ptr);
4517
4518 if (in_mss) {
4519 if (user_mss && user_mss < in_mss)
4520 in_mss = user_mss;
4521 mss = in_mss;
4522 }
4523 }
4524 ptr += opsize - 2;
4525 length -= opsize;
4526 }
4527 }
4528 return mss;
4529 }
4530
4531 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
4532 * But, this can also be called on packets in the established flow when
4533 * the fast version below fails.
4534 */
tcp_parse_options(const struct net * net,const struct sk_buff * skb,struct tcp_options_received * opt_rx,int estab,struct tcp_fastopen_cookie * foc)4535 void tcp_parse_options(const struct net *net,
4536 const struct sk_buff *skb,
4537 struct tcp_options_received *opt_rx, int estab,
4538 struct tcp_fastopen_cookie *foc)
4539 {
4540 const unsigned char *ptr;
4541 const struct tcphdr *th = tcp_hdr(skb);
4542 int length = (th->doff * 4) - sizeof(struct tcphdr);
4543
4544 ptr = (const unsigned char *)(th + 1);
4545 opt_rx->saw_tstamp = 0;
4546 opt_rx->accecn = 0;
4547 opt_rx->saw_unknown = 0;
4548
4549 while (length > 0) {
4550 int opcode = *ptr++;
4551 int opsize;
4552
4553 switch (opcode) {
4554 case TCPOPT_EOL:
4555 return;
4556 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
4557 length--;
4558 continue;
4559 default:
4560 if (length < 2)
4561 return;
4562 opsize = *ptr++;
4563 if (opsize < 2) /* "silly options" */
4564 return;
4565 if (opsize > length)
4566 return; /* don't parse partial options */
4567 switch (opcode) {
4568 case TCPOPT_MSS:
4569 if (opsize == TCPOLEN_MSS && th->syn && !estab) {
4570 u16 in_mss = get_unaligned_be16(ptr);
4571 if (in_mss) {
4572 if (opt_rx->user_mss &&
4573 opt_rx->user_mss < in_mss)
4574 in_mss = opt_rx->user_mss;
4575 opt_rx->mss_clamp = in_mss;
4576 }
4577 }
4578 break;
4579 case TCPOPT_WINDOW:
4580 if (opsize == TCPOLEN_WINDOW && th->syn &&
4581 !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) {
4582 __u8 snd_wscale = *(__u8 *)ptr;
4583 opt_rx->wscale_ok = 1;
4584 if (snd_wscale > TCP_MAX_WSCALE) {
4585 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n",
4586 __func__,
4587 snd_wscale,
4588 TCP_MAX_WSCALE);
4589 snd_wscale = TCP_MAX_WSCALE;
4590 }
4591 opt_rx->snd_wscale = snd_wscale;
4592 }
4593 break;
4594 case TCPOPT_TIMESTAMP:
4595 if ((opsize == TCPOLEN_TIMESTAMP) &&
4596 ((estab && opt_rx->tstamp_ok) ||
4597 (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) {
4598 opt_rx->saw_tstamp = 1;
4599 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
4600 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
4601 }
4602 break;
4603 case TCPOPT_SACK_PERM:
4604 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
4605 !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) {
4606 opt_rx->sack_ok = TCP_SACK_SEEN;
4607 tcp_sack_reset(opt_rx);
4608 }
4609 break;
4610
4611 case TCPOPT_SACK:
4612 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
4613 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
4614 opt_rx->sack_ok) {
4615 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
4616 }
4617 break;
4618 #ifdef CONFIG_TCP_MD5SIG
4619 case TCPOPT_MD5SIG:
4620 /* The MD5 Hash has already been
4621 * checked (see tcp_v{4,6}_rcv()).
4622 */
4623 break;
4624 #endif
4625 #ifdef CONFIG_TCP_AO
4626 case TCPOPT_AO:
4627 /* TCP AO has already been checked
4628 * (see tcp_inbound_ao_hash()).
4629 */
4630 break;
4631 #endif
4632 case TCPOPT_FASTOPEN:
4633 tcp_parse_fastopen_option(
4634 opsize - TCPOLEN_FASTOPEN_BASE,
4635 ptr, th->syn, foc, false);
4636 break;
4637
4638 case TCPOPT_ACCECN0:
4639 case TCPOPT_ACCECN1:
4640 /* Save offset of AccECN option in TCP header */
4641 opt_rx->accecn = (ptr - 2) - (__u8 *)th;
4642 break;
4643
4644 case TCPOPT_EXP:
4645 /* Fast Open option shares code 254 using a
4646 * 16 bits magic number.
4647 */
4648 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
4649 get_unaligned_be16(ptr) ==
4650 TCPOPT_FASTOPEN_MAGIC) {
4651 tcp_parse_fastopen_option(opsize -
4652 TCPOLEN_EXP_FASTOPEN_BASE,
4653 ptr + 2, th->syn, foc, true);
4654 break;
4655 }
4656
4657 if (smc_parse_options(th, opt_rx, ptr, opsize))
4658 break;
4659
4660 opt_rx->saw_unknown = 1;
4661 break;
4662
4663 default:
4664 opt_rx->saw_unknown = 1;
4665 }
4666 ptr += opsize-2;
4667 length -= opsize;
4668 }
4669 }
4670 }
4671 EXPORT_SYMBOL(tcp_parse_options);
4672
tcp_parse_aligned_timestamp(struct tcp_sock * tp,const struct tcphdr * th)4673 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
4674 {
4675 const __be32 *ptr = (const __be32 *)(th + 1);
4676
4677 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
4678 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
4679 tp->rx_opt.saw_tstamp = 1;
4680 ++ptr;
4681 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4682 ++ptr;
4683 if (*ptr)
4684 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
4685 else
4686 tp->rx_opt.rcv_tsecr = 0;
4687 return true;
4688 }
4689 return false;
4690 }
4691
4692 /* Fast parse options. This hopes to only see timestamps.
4693 * If it is wrong it falls back on tcp_parse_options().
4694 */
tcp_fast_parse_options(const struct net * net,const struct sk_buff * skb,const struct tcphdr * th,struct tcp_sock * tp)4695 static bool tcp_fast_parse_options(const struct net *net,
4696 const struct sk_buff *skb,
4697 const struct tcphdr *th, struct tcp_sock *tp)
4698 {
4699 /* In the spirit of fast parsing, compare doff directly to constant
4700 * values. Because equality is used, short doff can be ignored here.
4701 */
4702 if (th->doff == (sizeof(*th) / 4)) {
4703 tp->rx_opt.saw_tstamp = 0;
4704 tp->rx_opt.accecn = 0;
4705 return false;
4706 } else if (tp->rx_opt.tstamp_ok &&
4707 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
4708 if (tcp_parse_aligned_timestamp(tp, th)) {
4709 tp->rx_opt.accecn = 0;
4710 return true;
4711 }
4712 }
4713
4714 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
4715 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
4716 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
4717
4718 return true;
4719 }
4720
4721 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
4722 *
4723 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
4724 * it can pass through stack. So, the following predicate verifies that
4725 * this segment is not used for anything but congestion avoidance or
4726 * fast retransmit. Moreover, we even are able to eliminate most of such
4727 * second order effects, if we apply some small "replay" window (~RTO)
4728 * to timestamp space.
4729 *
4730 * All these measures still do not guarantee that we reject wrapped ACKs
4731 * on networks with high bandwidth, when sequence space is recycled fastly,
4732 * but it guarantees that such events will be very rare and do not affect
4733 * connection seriously. This doesn't look nice, but alas, PAWS is really
4734 * buggy extension.
4735 *
4736 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
4737 * states that events when retransmit arrives after original data are rare.
4738 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
4739 * the biggest problem on large power networks even with minor reordering.
4740 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
4741 * up to bandwidth of 18Gigabit/sec. 8) ]
4742 */
4743
4744 /* Estimates max number of increments of remote peer TSval in
4745 * a replay window (based on our current RTO estimation).
4746 */
tcp_tsval_replay(const struct sock * sk)4747 static u32 tcp_tsval_replay(const struct sock *sk)
4748 {
4749 /* If we use usec TS resolution,
4750 * then expect the remote peer to use the same resolution.
4751 */
4752 if (tcp_sk(sk)->tcp_usec_ts)
4753 return inet_csk(sk)->icsk_rto * (USEC_PER_SEC / HZ);
4754
4755 /* RFC 7323 recommends a TSval clock between 1ms and 1sec.
4756 * We know that some OS (including old linux) can use 1200 Hz.
4757 */
4758 return inet_csk(sk)->icsk_rto * 1200 / HZ;
4759 }
4760
tcp_disordered_ack_check(const struct sock * sk,const struct sk_buff * skb)4761 static enum skb_drop_reason tcp_disordered_ack_check(const struct sock *sk,
4762 const struct sk_buff *skb)
4763 {
4764 const struct tcp_sock *tp = tcp_sk(sk);
4765 const struct tcphdr *th = tcp_hdr(skb);
4766 SKB_DR_INIT(reason, TCP_RFC7323_PAWS);
4767 u32 ack = TCP_SKB_CB(skb)->ack_seq;
4768 u32 seq = TCP_SKB_CB(skb)->seq;
4769
4770 /* 1. Is this not a pure ACK ? */
4771 if (!th->ack || seq != TCP_SKB_CB(skb)->end_seq)
4772 return reason;
4773
4774 /* 2. Is its sequence not the expected one ? */
4775 if (seq != tp->rcv_nxt)
4776 return before(seq, tp->rcv_nxt) ?
4777 SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK :
4778 reason;
4779
4780 /* 3. Is this not a duplicate ACK ? */
4781 if (ack != tp->snd_una)
4782 return reason;
4783
4784 /* 4. Is this updating the window ? */
4785 if (tcp_may_update_window(tp, ack, seq, ntohs(th->window) <<
4786 tp->rx_opt.snd_wscale))
4787 return reason;
4788
4789 /* 5. Is this not in the replay window ? */
4790 if ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) >
4791 tcp_tsval_replay(sk))
4792 return reason;
4793
4794 return 0;
4795 }
4796
4797 /* Check segment sequence number for validity.
4798 *
4799 * Segment controls are considered valid, if the segment
4800 * fits to the window after truncation to the window. Acceptability
4801 * of data (and SYN, FIN, of course) is checked separately.
4802 * See tcp_data_queue(), for example.
4803 *
4804 * Also, controls (RST is main one) are accepted using RCV.WUP instead
4805 * of RCV.NXT. Peer still did not advance his SND.UNA when we
4806 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
4807 * (borrowed from freebsd)
4808 */
4809
tcp_sequence(const struct sock * sk,u32 seq,u32 end_seq,const struct tcphdr * th)4810 static enum skb_drop_reason tcp_sequence(const struct sock *sk,
4811 u32 seq, u32 end_seq,
4812 const struct tcphdr *th)
4813 {
4814 const struct tcp_sock *tp = tcp_sk(sk);
4815
4816 if (before(end_seq, tp->rcv_wup))
4817 return SKB_DROP_REASON_TCP_OLD_SEQUENCE;
4818
4819 if (unlikely(after(end_seq, tp->rcv_nxt + tcp_max_receive_window(tp)))) {
4820 /* Some stacks are known to handle FIN incorrectly; allow the
4821 * FIN to extend beyond the window and check it in detail later.
4822 */
4823 if (!after(end_seq - th->fin, tp->rcv_nxt + tcp_receive_window(tp)))
4824 return SKB_NOT_DROPPED_YET;
4825
4826 if (after(seq, tp->rcv_nxt + tcp_max_receive_window(tp)))
4827 return SKB_DROP_REASON_TCP_INVALID_SEQUENCE;
4828
4829 /* Only accept this packet if receive queue is empty. */
4830 if (skb_queue_len(&sk->sk_receive_queue))
4831 return SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE;
4832 }
4833
4834 return SKB_NOT_DROPPED_YET;
4835 }
4836
4837
tcp_done_with_error(struct sock * sk,int err)4838 void tcp_done_with_error(struct sock *sk, int err)
4839 {
4840 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4841 WRITE_ONCE(sk->sk_err, err);
4842 smp_wmb();
4843
4844 tcp_write_queue_purge(sk);
4845 tcp_done(sk);
4846
4847 if (!sock_flag(sk, SOCK_DEAD))
4848 sk_error_report(sk);
4849 }
4850
4851 /* When we get a reset we do this. */
tcp_reset(struct sock * sk,struct sk_buff * skb)4852 void tcp_reset(struct sock *sk, struct sk_buff *skb)
4853 {
4854 int err;
4855
4856 trace_tcp_receive_reset(sk);
4857
4858 /* mptcp can't tell us to ignore reset pkts,
4859 * so just ignore the return value of mptcp_incoming_options().
4860 */
4861 if (sk_is_mptcp(sk))
4862 mptcp_incoming_options(sk, skb);
4863
4864 /* We want the right error as BSD sees it (and indeed as we do). */
4865 switch (sk->sk_state) {
4866 case TCP_SYN_SENT:
4867 err = ECONNREFUSED;
4868 break;
4869 case TCP_CLOSE_WAIT:
4870 err = EPIPE;
4871 break;
4872 case TCP_CLOSE:
4873 return;
4874 default:
4875 err = ECONNRESET;
4876 }
4877 tcp_done_with_error(sk, err);
4878 }
4879
4880 /*
4881 * Process the FIN bit. This now behaves as it is supposed to work
4882 * and the FIN takes effect when it is validly part of sequence
4883 * space. Not before when we get holes.
4884 *
4885 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
4886 * (and thence onto LAST-ACK and finally, CLOSE, we never enter
4887 * TIME-WAIT)
4888 *
4889 * If we are in FINWAIT-1, a received FIN indicates simultaneous
4890 * close and we go into CLOSING (and later onto TIME-WAIT)
4891 *
4892 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4893 */
tcp_fin(struct sock * sk)4894 void tcp_fin(struct sock *sk)
4895 {
4896 struct tcp_sock *tp = tcp_sk(sk);
4897
4898 inet_csk_schedule_ack(sk);
4899
4900 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
4901 sock_set_flag(sk, SOCK_DONE);
4902
4903 switch (sk->sk_state) {
4904 case TCP_SYN_RECV:
4905 case TCP_ESTABLISHED:
4906 /* Move to CLOSE_WAIT */
4907 tcp_set_state(sk, TCP_CLOSE_WAIT);
4908 inet_csk_enter_pingpong_mode(sk);
4909 break;
4910
4911 case TCP_CLOSE_WAIT:
4912 case TCP_CLOSING:
4913 /* Received a retransmission of the FIN, do
4914 * nothing.
4915 */
4916 break;
4917 case TCP_LAST_ACK:
4918 /* RFC793: Remain in the LAST-ACK state. */
4919 break;
4920
4921 case TCP_FIN_WAIT1:
4922 /* This case occurs when a simultaneous close
4923 * happens, we must ack the received FIN and
4924 * enter the CLOSING state.
4925 */
4926 tcp_send_ack(sk);
4927 tcp_set_state(sk, TCP_CLOSING);
4928 break;
4929 case TCP_FIN_WAIT2:
4930 /* Received a FIN -- send ACK and enter TIME_WAIT. */
4931 tcp_send_ack(sk);
4932 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4933 break;
4934 default:
4935 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
4936 * cases we should never reach this piece of code.
4937 */
4938 pr_err("%s: Impossible, sk->sk_state=%d\n",
4939 __func__, sk->sk_state);
4940 break;
4941 }
4942
4943 /* It _is_ possible, that we have something out-of-order _after_ FIN.
4944 * Probably, we should reset in this case. For now drop them.
4945 */
4946 skb_rbtree_purge(&tp->out_of_order_queue);
4947 if (tcp_is_sack(tp))
4948 tcp_sack_reset(&tp->rx_opt);
4949
4950 if (!sock_flag(sk, SOCK_DEAD)) {
4951 sk->sk_state_change(sk);
4952
4953 /* Do not send POLL_HUP for half duplex close. */
4954 if (sk->sk_shutdown == SHUTDOWN_MASK ||
4955 sk->sk_state == TCP_CLOSE)
4956 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
4957 else
4958 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
4959 }
4960 }
4961
tcp_sack_extend(struct tcp_sack_block * sp,u32 seq,u32 end_seq)4962 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4963 u32 end_seq)
4964 {
4965 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
4966 if (before(seq, sp->start_seq))
4967 sp->start_seq = seq;
4968 if (after(end_seq, sp->end_seq))
4969 sp->end_seq = end_seq;
4970 return true;
4971 }
4972 return false;
4973 }
4974
tcp_dsack_set(struct sock * sk,u32 seq,u32 end_seq)4975 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4976 {
4977 struct tcp_sock *tp = tcp_sk(sk);
4978
4979 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
4980 int mib_idx;
4981
4982 if (before(seq, tp->rcv_nxt))
4983 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
4984 else
4985 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4986
4987 NET_INC_STATS(sock_net(sk), mib_idx);
4988
4989 tp->rx_opt.dsack = 1;
4990 tp->duplicate_sack[0].start_seq = seq;
4991 tp->duplicate_sack[0].end_seq = end_seq;
4992 }
4993 }
4994
tcp_dsack_extend(struct sock * sk,u32 seq,u32 end_seq)4995 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4996 {
4997 struct tcp_sock *tp = tcp_sk(sk);
4998
4999 if (!tp->rx_opt.dsack)
5000 tcp_dsack_set(sk, seq, end_seq);
5001 else
5002 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
5003 }
5004
tcp_rcv_spurious_retrans(struct sock * sk,const struct sk_buff * skb)5005 static void tcp_rcv_spurious_retrans(struct sock *sk,
5006 const struct sk_buff *skb)
5007 {
5008 struct tcp_sock *tp = tcp_sk(sk);
5009
5010 /* When the ACK path fails or drops most ACKs, the sender would
5011 * timeout and spuriously retransmit the same segment repeatedly.
5012 * If it seems our ACKs are not reaching the other side,
5013 * based on receiving a duplicate data segment with new flowlabel
5014 * (suggesting the sender suffered an RTO), and we are not already
5015 * repathing due to our own RTO, then rehash the socket to repath our
5016 * packets.
5017 */
5018 #if IS_ENABLED(CONFIG_IPV6)
5019 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss &&
5020 skb->protocol == htons(ETH_P_IPV6) &&
5021 (tcp_sk(sk)->inet_conn.icsk_ack.lrcv_flowlabel !=
5022 ntohl(ip6_flowlabel(ipv6_hdr(skb)))) &&
5023 sk_rethink_txhash(sk))
5024 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
5025
5026 /* Save last flowlabel after a spurious retrans. */
5027 tcp_save_lrcv_flowlabel(sk, skb);
5028 #endif
5029 /* Check DSACK info to detect that the previous ACK carrying the
5030 * AccECN option was lost after the second retransmision, and then
5031 * stop sending AccECN option in all subsequent ACKs.
5032 */
5033 if (tcp_ecn_mode_accecn(tp) &&
5034 tp->accecn_opt_sent_w_dsack &&
5035 TCP_SKB_CB(skb)->seq == tp->duplicate_sack[0].start_seq)
5036 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_SEND);
5037 }
5038
tcp_send_dupack(struct sock * sk,const struct sk_buff * skb)5039 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
5040 {
5041 struct tcp_sock *tp = tcp_sk(sk);
5042
5043 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5044 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5045 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
5046 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5047
5048 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
5049 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
5050
5051 tcp_rcv_spurious_retrans(sk, skb);
5052 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
5053 end_seq = tp->rcv_nxt;
5054 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
5055 }
5056 }
5057
5058 tcp_send_ack(sk);
5059 }
5060
5061 /* These routines update the SACK block as out-of-order packets arrive or
5062 * in-order packets close up the sequence space.
5063 */
tcp_sack_maybe_coalesce(struct tcp_sock * tp)5064 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
5065 {
5066 int this_sack;
5067 struct tcp_sack_block *sp = &tp->selective_acks[0];
5068 struct tcp_sack_block *swalk = sp + 1;
5069
5070 /* See if the recent change to the first SACK eats into
5071 * or hits the sequence space of other SACK blocks, if so coalesce.
5072 */
5073 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
5074 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
5075 int i;
5076
5077 /* Zap SWALK, by moving every further SACK up by one slot.
5078 * Decrease num_sacks.
5079 */
5080 tp->rx_opt.num_sacks--;
5081 for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
5082 sp[i] = sp[i + 1];
5083 continue;
5084 }
5085 this_sack++;
5086 swalk++;
5087 }
5088 }
5089
tcp_sack_compress_send_ack(struct sock * sk)5090 void tcp_sack_compress_send_ack(struct sock *sk)
5091 {
5092 struct tcp_sock *tp = tcp_sk(sk);
5093
5094 if (!tp->compressed_ack)
5095 return;
5096
5097 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
5098 __sock_put(sk);
5099
5100 /* Since we have to send one ack finally,
5101 * substract one from tp->compressed_ack to keep
5102 * LINUX_MIB_TCPACKCOMPRESSED accurate.
5103 */
5104 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
5105 tp->compressed_ack - 1);
5106
5107 tp->compressed_ack = 0;
5108 tcp_send_ack(sk);
5109 }
5110
5111 /* Reasonable amount of sack blocks included in TCP SACK option
5112 * The max is 4, but this becomes 3 if TCP timestamps are there.
5113 * Given that SACK packets might be lost, be conservative and use 2.
5114 */
5115 #define TCP_SACK_BLOCKS_EXPECTED 2
5116
tcp_sack_new_ofo_skb(struct sock * sk,u32 seq,u32 end_seq)5117 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
5118 {
5119 struct tcp_sock *tp = tcp_sk(sk);
5120 struct tcp_sack_block *sp = &tp->selective_acks[0];
5121 int cur_sacks = tp->rx_opt.num_sacks;
5122 int this_sack;
5123
5124 if (!cur_sacks)
5125 goto new_sack;
5126
5127 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
5128 if (tcp_sack_extend(sp, seq, end_seq)) {
5129 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
5130 tcp_sack_compress_send_ack(sk);
5131 /* Rotate this_sack to the first one. */
5132 for (; this_sack > 0; this_sack--, sp--)
5133 swap(*sp, *(sp - 1));
5134 if (cur_sacks > 1)
5135 tcp_sack_maybe_coalesce(tp);
5136 return;
5137 }
5138 }
5139
5140 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
5141 tcp_sack_compress_send_ack(sk);
5142
5143 /* Could not find an adjacent existing SACK, build a new one,
5144 * put it at the front, and shift everyone else down. We
5145 * always know there is at least one SACK present already here.
5146 *
5147 * If the sack array is full, forget about the last one.
5148 */
5149 if (this_sack >= TCP_NUM_SACKS) {
5150 this_sack--;
5151 tp->rx_opt.num_sacks--;
5152 sp--;
5153 }
5154 for (; this_sack > 0; this_sack--, sp--)
5155 *sp = *(sp - 1);
5156
5157 new_sack:
5158 /* Build the new head SACK, and we're done. */
5159 sp->start_seq = seq;
5160 sp->end_seq = end_seq;
5161 tp->rx_opt.num_sacks++;
5162 }
5163
5164 /* RCV.NXT advances, some SACKs should be eaten. */
5165
tcp_sack_remove(struct tcp_sock * tp)5166 static void tcp_sack_remove(struct tcp_sock *tp)
5167 {
5168 struct tcp_sack_block *sp = &tp->selective_acks[0];
5169 int num_sacks = tp->rx_opt.num_sacks;
5170 int this_sack;
5171
5172 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
5173 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5174 tp->rx_opt.num_sacks = 0;
5175 return;
5176 }
5177
5178 for (this_sack = 0; this_sack < num_sacks;) {
5179 /* Check if the start of the sack is covered by RCV.NXT. */
5180 if (!before(tp->rcv_nxt, sp->start_seq)) {
5181 int i;
5182
5183 /* RCV.NXT must cover all the block! */
5184 WARN_ON(before(tp->rcv_nxt, sp->end_seq));
5185
5186 /* Zap this SACK, by moving forward any other SACKS. */
5187 for (i = this_sack+1; i < num_sacks; i++)
5188 tp->selective_acks[i-1] = tp->selective_acks[i];
5189 num_sacks--;
5190 continue;
5191 }
5192 this_sack++;
5193 sp++;
5194 }
5195 tp->rx_opt.num_sacks = num_sacks;
5196 }
5197
5198 /**
5199 * tcp_try_coalesce - try to merge skb to prior one
5200 * @sk: socket
5201 * @to: prior buffer
5202 * @from: buffer to add in queue
5203 * @fragstolen: pointer to boolean
5204 *
5205 * Before queueing skb @from after @to, try to merge them
5206 * to reduce overall memory use and queue lengths, if cost is small.
5207 * Packets in ofo or receive queues can stay a long time.
5208 * Better try to coalesce them right now to avoid future collapses.
5209 * Returns true if caller should free @from instead of queueing it
5210 */
tcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen)5211 static bool tcp_try_coalesce(struct sock *sk,
5212 struct sk_buff *to,
5213 struct sk_buff *from,
5214 bool *fragstolen)
5215 {
5216 int delta;
5217
5218 *fragstolen = false;
5219
5220 /* Its possible this segment overlaps with prior segment in queue */
5221 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
5222 return false;
5223
5224 if (!tcp_skb_can_collapse_rx(to, from))
5225 return false;
5226
5227 if (!skb_try_coalesce(to, from, fragstolen, &delta))
5228 return false;
5229
5230 atomic_add(delta, &sk->sk_rmem_alloc);
5231 sk_mem_charge(sk, delta);
5232 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
5233 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
5234 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
5235 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
5236
5237 if (TCP_SKB_CB(from)->has_rxtstamp) {
5238 TCP_SKB_CB(to)->has_rxtstamp = true;
5239 to->tstamp = from->tstamp;
5240 skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
5241 }
5242
5243 return true;
5244 }
5245
tcp_ooo_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen)5246 static bool tcp_ooo_try_coalesce(struct sock *sk,
5247 struct sk_buff *to,
5248 struct sk_buff *from,
5249 bool *fragstolen)
5250 {
5251 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
5252
5253 /* In case tcp_drop_reason() is called later, update to->gso_segs */
5254 if (res) {
5255 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
5256 max_t(u16, 1, skb_shinfo(from)->gso_segs);
5257
5258 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
5259 }
5260 return res;
5261 }
5262
5263 noinline_for_tracing static void
tcp_drop_reason(struct sock * sk,struct sk_buff * skb,enum skb_drop_reason reason)5264 tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
5265 {
5266 sk_drops_skbadd(sk, skb);
5267 sk_skb_reason_drop(sk, skb, reason);
5268 }
5269
5270 /* This one checks to see if we can put data from the
5271 * out_of_order queue into the receive_queue.
5272 */
tcp_ofo_queue(struct sock * sk)5273 static void tcp_ofo_queue(struct sock *sk)
5274 {
5275 struct tcp_sock *tp = tcp_sk(sk);
5276 __u32 dsack_high = tp->rcv_nxt;
5277 bool fin, fragstolen, eaten;
5278 struct sk_buff *skb, *tail;
5279 struct rb_node *p;
5280
5281 p = rb_first(&tp->out_of_order_queue);
5282 while (p) {
5283 skb = rb_to_skb(p);
5284 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
5285 break;
5286
5287 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
5288 __u32 dsack = dsack_high;
5289
5290 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
5291 dsack = TCP_SKB_CB(skb)->end_seq;
5292 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
5293 }
5294 p = rb_next(p);
5295 rb_erase(&skb->rbnode, &tp->out_of_order_queue);
5296
5297 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
5298 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_DROP);
5299 continue;
5300 }
5301
5302 tail = skb_peek_tail(&sk->sk_receive_queue);
5303 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
5304 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
5305 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
5306 if (!eaten)
5307 tcp_add_receive_queue(sk, skb);
5308 else
5309 kfree_skb_partial(skb, fragstolen);
5310
5311 if (unlikely(fin)) {
5312 tcp_fin(sk);
5313 /* tcp_fin() purges tp->out_of_order_queue,
5314 * so we must end this loop right now.
5315 */
5316 break;
5317 }
5318 }
5319 }
5320
5321 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
5322 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
5323
tcp_can_ingest(const struct sock * sk,const struct sk_buff * skb)5324 static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
5325 {
5326 unsigned int rmem = atomic_read(&sk->sk_rmem_alloc);
5327
5328 return rmem <= sk->sk_rcvbuf;
5329 }
5330
tcp_try_rmem_schedule(struct sock * sk,const struct sk_buff * skb,unsigned int size)5331 static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
5332 unsigned int size)
5333 {
5334 if (!tcp_can_ingest(sk, skb) ||
5335 !sk_rmem_schedule(sk, skb, size)) {
5336
5337 if (tcp_prune_queue(sk, skb) < 0)
5338 return -1;
5339
5340 while (!sk_rmem_schedule(sk, skb, size)) {
5341 if (!tcp_prune_ofo_queue(sk, skb))
5342 return -1;
5343 }
5344 }
5345 return 0;
5346 }
5347
tcp_data_queue_ofo(struct sock * sk,struct sk_buff * skb)5348 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
5349 {
5350 struct tcp_sock *tp = tcp_sk(sk);
5351 struct rb_node **p, *parent;
5352 struct sk_buff *skb1;
5353 u32 seq, end_seq;
5354 bool fragstolen;
5355
5356 tcp_save_lrcv_flowlabel(sk, skb);
5357 tcp_data_ecn_check(sk, skb);
5358
5359 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
5360 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
5361 READ_ONCE(sk->sk_data_ready)(sk);
5362 tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM);
5363 return;
5364 }
5365
5366 tcp_measure_rcv_mss(sk, skb);
5367 /* Disable header prediction. */
5368 tp->pred_flags = 0;
5369 inet_csk_schedule_ack(sk);
5370
5371 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
5372 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
5373 seq = TCP_SKB_CB(skb)->seq;
5374 end_seq = TCP_SKB_CB(skb)->end_seq;
5375
5376 p = &tp->out_of_order_queue.rb_node;
5377 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5378 /* Initial out of order segment, build 1 SACK. */
5379 if (tcp_is_sack(tp)) {
5380 tp->rx_opt.num_sacks = 1;
5381 tp->selective_acks[0].start_seq = seq;
5382 tp->selective_acks[0].end_seq = end_seq;
5383 }
5384 rb_link_node(&skb->rbnode, NULL, p);
5385 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
5386 tp->ooo_last_skb = skb;
5387 goto end;
5388 }
5389
5390 /* In the typical case, we are adding an skb to the end of the list.
5391 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
5392 */
5393 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
5394 skb, &fragstolen)) {
5395 coalesce_done:
5396 /* For non sack flows, do not grow window to force DUPACK
5397 * and trigger fast retransmit.
5398 */
5399 if (tcp_is_sack(tp))
5400 tcp_grow_window(sk, skb, true);
5401 kfree_skb_partial(skb, fragstolen);
5402 skb = NULL;
5403 goto add_sack;
5404 }
5405 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
5406 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
5407 parent = &tp->ooo_last_skb->rbnode;
5408 p = &parent->rb_right;
5409 goto insert;
5410 }
5411
5412 /* Find place to insert this segment. Handle overlaps on the way. */
5413 parent = NULL;
5414 while (*p) {
5415 parent = *p;
5416 skb1 = rb_to_skb(parent);
5417 if (before(seq, TCP_SKB_CB(skb1)->seq)) {
5418 p = &parent->rb_left;
5419 continue;
5420 }
5421 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
5422 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5423 /* All the bits are present. Drop. */
5424 NET_INC_STATS(sock_net(sk),
5425 LINUX_MIB_TCPOFOMERGE);
5426 tcp_drop_reason(sk, skb,
5427 SKB_DROP_REASON_TCP_OFOMERGE);
5428 skb = NULL;
5429 tcp_dsack_set(sk, seq, end_seq);
5430 goto add_sack;
5431 }
5432 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
5433 /* Partial overlap. */
5434 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
5435 } else {
5436 /* skb's seq == skb1's seq and skb covers skb1.
5437 * Replace skb1 with skb.
5438 */
5439 rb_replace_node(&skb1->rbnode, &skb->rbnode,
5440 &tp->out_of_order_queue);
5441 tcp_dsack_extend(sk,
5442 TCP_SKB_CB(skb1)->seq,
5443 TCP_SKB_CB(skb1)->end_seq);
5444 NET_INC_STATS(sock_net(sk),
5445 LINUX_MIB_TCPOFOMERGE);
5446 tcp_drop_reason(sk, skb1,
5447 SKB_DROP_REASON_TCP_OFOMERGE);
5448 goto merge_right;
5449 }
5450 } else if (tcp_ooo_try_coalesce(sk, skb1,
5451 skb, &fragstolen)) {
5452 goto coalesce_done;
5453 }
5454 p = &parent->rb_right;
5455 }
5456 insert:
5457 /* Insert segment into RB tree. */
5458 rb_link_node(&skb->rbnode, parent, p);
5459 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
5460
5461 merge_right:
5462 /* Remove other segments covered by skb. */
5463 while ((skb1 = skb_rb_next(skb)) != NULL) {
5464 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
5465 break;
5466 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5467 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
5468 end_seq);
5469 break;
5470 }
5471 rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
5472 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
5473 TCP_SKB_CB(skb1)->end_seq);
5474 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
5475 tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE);
5476 }
5477 /* If there is no skb after us, we are the last_skb ! */
5478 if (!skb1)
5479 tp->ooo_last_skb = skb;
5480
5481 add_sack:
5482 if (tcp_is_sack(tp))
5483 tcp_sack_new_ofo_skb(sk, seq, end_seq);
5484 end:
5485 if (skb) {
5486 /* For non sack flows, do not grow window to force DUPACK
5487 * and trigger fast retransmit.
5488 */
5489 if (tcp_is_sack(tp))
5490 tcp_grow_window(sk, skb, false);
5491 skb_condense(skb);
5492 skb_set_owner_r(skb, sk);
5493 }
5494 /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
5495 if (sk->sk_socket)
5496 tcp_rcvbuf_grow(sk, tp->rcvq_space.space);
5497 }
5498
tcp_queue_rcv(struct sock * sk,struct sk_buff * skb,bool * fragstolen)5499 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
5500 bool *fragstolen)
5501 {
5502 int eaten;
5503 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
5504
5505 eaten = (tail &&
5506 tcp_try_coalesce(sk, tail,
5507 skb, fragstolen)) ? 1 : 0;
5508 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
5509 if (!eaten) {
5510 tcp_add_receive_queue(sk, skb);
5511 skb_set_owner_r(skb, sk);
5512 }
5513 return eaten;
5514 }
5515
tcp_send_rcvq(struct sock * sk,struct msghdr * msg,size_t size)5516 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
5517 {
5518 struct sk_buff *skb;
5519 int err = -ENOMEM;
5520 int data_len = 0;
5521 bool fragstolen;
5522
5523 if (size == 0)
5524 return 0;
5525
5526 if (size > PAGE_SIZE) {
5527 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
5528
5529 data_len = npages << PAGE_SHIFT;
5530 size = data_len + (size & ~PAGE_MASK);
5531 }
5532 skb = alloc_skb_with_frags(size - data_len, data_len,
5533 PAGE_ALLOC_COSTLY_ORDER,
5534 &err, sk->sk_allocation);
5535 if (!skb)
5536 goto err;
5537
5538 skb_put(skb, size - data_len);
5539 skb->data_len = data_len;
5540 skb->len = size;
5541
5542 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5543 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5544 goto err_free;
5545 }
5546
5547 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
5548 if (err)
5549 goto err_free;
5550
5551 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
5552 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
5553 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
5554
5555 if (tcp_queue_rcv(sk, skb, &fragstolen)) {
5556 WARN_ON_ONCE(fragstolen); /* should not happen */
5557 __kfree_skb(skb);
5558 }
5559 return size;
5560
5561 err_free:
5562 kfree_skb(skb);
5563 err:
5564 return err;
5565
5566 }
5567
tcp_data_ready(struct sock * sk)5568 void tcp_data_ready(struct sock *sk)
5569 {
5570 if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE))
5571 READ_ONCE(sk->sk_data_ready)(sk);
5572 }
5573
tcp_data_queue(struct sock * sk,struct sk_buff * skb)5574 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
5575 {
5576 struct tcp_sock *tp = tcp_sk(sk);
5577 enum skb_drop_reason reason;
5578 bool fragstolen;
5579 int eaten;
5580
5581 /* If a subflow has been reset, the packet should not continue
5582 * to be processed, drop the packet.
5583 */
5584 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
5585 __kfree_skb(skb);
5586 return;
5587 }
5588
5589 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
5590 __kfree_skb(skb);
5591 return;
5592 }
5593 tcp_cleanup_skb(skb);
5594 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
5595
5596 reason = SKB_DROP_REASON_NOT_SPECIFIED;
5597 tp->rx_opt.dsack = 0;
5598
5599 /* Queue data for delivery to the user.
5600 * Packets in sequence go to the receive queue.
5601 * Out of sequence packets to the out_of_order_queue.
5602 */
5603 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
5604 if (tcp_receive_window(tp) == 0) {
5605 /* Some stacks are known to send bare FIN packets
5606 * in a loop even if we send RWIN 0 in our ACK.
5607 * Accepting this FIN does not hurt memory pressure
5608 * because the FIN flag will simply be merged to the
5609 * receive queue tail skb in most cases.
5610 */
5611 if (!skb->len &&
5612 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
5613 goto queue_and_out;
5614
5615 reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5616 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5617 goto out_of_window;
5618 }
5619
5620 /* Ok. In sequence. In window. */
5621 queue_and_out:
5622 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5623 /* TODO: maybe ratelimit these WIN 0 ACK ? */
5624 inet_csk(sk)->icsk_ack.pending |=
5625 (ICSK_ACK_NOMEM | ICSK_ACK_NOW);
5626 inet_csk_schedule_ack(sk);
5627 READ_ONCE(sk->sk_data_ready)(sk);
5628
5629 if (skb_queue_len(&sk->sk_receive_queue) && skb->len) {
5630 reason = SKB_DROP_REASON_PROTO_MEM;
5631 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5632 goto drop;
5633 }
5634 sk_forced_mem_schedule(sk, skb->truesize);
5635 }
5636
5637 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
5638 if (skb->len)
5639 tcp_event_data_recv(sk, skb);
5640 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
5641 tcp_fin(sk);
5642
5643 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5644 tcp_ofo_queue(sk);
5645
5646 /* RFC5681. 4.2. SHOULD send immediate ACK, when
5647 * gap in queue is filled.
5648 */
5649 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5650 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
5651 }
5652
5653 if (tp->rx_opt.num_sacks)
5654 tcp_sack_remove(tp);
5655
5656 tcp_fast_path_check(sk);
5657
5658 if (eaten > 0)
5659 kfree_skb_partial(skb, fragstolen);
5660 if (!sock_flag(sk, SOCK_DEAD))
5661 tcp_data_ready(sk);
5662 return;
5663 }
5664
5665 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
5666 tcp_rcv_spurious_retrans(sk, skb);
5667 /* A retransmit, 2nd most common case. Force an immediate ack. */
5668 reason = SKB_DROP_REASON_TCP_OLD_DATA;
5669 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
5670 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
5671
5672 out_of_window:
5673 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5674 inet_csk_schedule_ack(sk);
5675 drop:
5676 tcp_drop_reason(sk, skb, reason);
5677 return;
5678 }
5679
5680 /* Out of window. F.e. zero window probe. */
5681 if (!before(TCP_SKB_CB(skb)->seq,
5682 tp->rcv_nxt + tcp_receive_window(tp))) {
5683 reason = SKB_DROP_REASON_TCP_OVERWINDOW;
5684 NET_INC_STATS(sock_net(sk), LINUX_MIB_BEYOND_WINDOW);
5685 goto out_of_window;
5686 }
5687
5688 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5689 /* Partial packet, seq < rcv_next < end_seq */
5690 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
5691
5692 /* If window is closed, drop tail of packet. But after
5693 * remembering D-SACK for its head made in previous line.
5694 */
5695 if (!tcp_receive_window(tp)) {
5696 reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5697 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5698 goto out_of_window;
5699 }
5700 goto queue_and_out;
5701 }
5702
5703 tcp_data_queue_ofo(sk, skb);
5704 }
5705
tcp_skb_next(struct sk_buff * skb,struct sk_buff_head * list)5706 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
5707 {
5708 if (list)
5709 return !skb_queue_is_last(list, skb) ? skb->next : NULL;
5710
5711 return skb_rb_next(skb);
5712 }
5713
tcp_collapse_one(struct sock * sk,struct sk_buff * skb,struct sk_buff_head * list,struct rb_root * root)5714 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5715 struct sk_buff_head *list,
5716 struct rb_root *root)
5717 {
5718 struct sk_buff *next = tcp_skb_next(skb, list);
5719
5720 if (list)
5721 __skb_unlink(skb, list);
5722 else
5723 rb_erase(&skb->rbnode, root);
5724
5725 __kfree_skb(skb);
5726 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
5727
5728 return next;
5729 }
5730
5731 /* Collapse contiguous sequence of skbs head..tail with
5732 * sequence numbers start..end.
5733 *
5734 * If tail is NULL, this means until the end of the queue.
5735 *
5736 * Segments with FIN/SYN are not collapsed (only because this
5737 * simplifies code)
5738 */
5739 static void
tcp_collapse(struct sock * sk,struct sk_buff_head * list,struct rb_root * root,struct sk_buff * head,struct sk_buff * tail,u32 start,u32 end)5740 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
5741 struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
5742 {
5743 struct sk_buff *skb = head, *n;
5744 struct sk_buff_head tmp;
5745 bool end_of_skbs;
5746
5747 /* First, check that queue is collapsible and find
5748 * the point where collapsing can be useful.
5749 */
5750 restart:
5751 for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
5752 n = tcp_skb_next(skb, list);
5753
5754 if (!skb_frags_readable(skb))
5755 goto skip_this;
5756
5757 /* No new bits? It is possible on ofo queue. */
5758 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5759 skb = tcp_collapse_one(sk, skb, list, root);
5760 if (!skb)
5761 break;
5762 goto restart;
5763 }
5764
5765 /* The first skb to collapse is:
5766 * - not SYN/FIN and
5767 * - bloated or contains data before "start" or
5768 * overlaps to the next one and mptcp allow collapsing.
5769 */
5770 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
5771 (tcp_win_from_space(sk, skb->truesize) > skb->len ||
5772 before(TCP_SKB_CB(skb)->seq, start))) {
5773 end_of_skbs = false;
5774 break;
5775 }
5776
5777 if (n && n != tail && skb_frags_readable(n) &&
5778 tcp_skb_can_collapse_rx(skb, n) &&
5779 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
5780 end_of_skbs = false;
5781 break;
5782 }
5783
5784 skip_this:
5785 /* Decided to skip this, advance start seq. */
5786 start = TCP_SKB_CB(skb)->end_seq;
5787 }
5788 if (end_of_skbs ||
5789 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
5790 !skb_frags_readable(skb))
5791 return;
5792
5793 __skb_queue_head_init(&tmp);
5794
5795 while (before(start, end)) {
5796 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
5797 struct sk_buff *nskb;
5798
5799 nskb = alloc_skb(copy, GFP_ATOMIC);
5800 if (!nskb)
5801 break;
5802
5803 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
5804 skb_copy_decrypted(nskb, skb);
5805 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
5806 if (list)
5807 __skb_queue_before(list, skb, nskb);
5808 else
5809 __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
5810 skb_set_owner_r(nskb, sk);
5811 mptcp_skb_ext_move(nskb, skb);
5812
5813 /* Copy data, releasing collapsed skbs. */
5814 while (copy > 0) {
5815 int offset = start - TCP_SKB_CB(skb)->seq;
5816 int size = TCP_SKB_CB(skb)->end_seq - start;
5817
5818 BUG_ON(offset < 0);
5819 if (size > 0) {
5820 size = min(copy, size);
5821 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
5822 BUG();
5823 TCP_SKB_CB(nskb)->end_seq += size;
5824 copy -= size;
5825 start += size;
5826 }
5827 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5828 skb = tcp_collapse_one(sk, skb, list, root);
5829 if (!skb ||
5830 skb == tail ||
5831 !tcp_skb_can_collapse_rx(nskb, skb) ||
5832 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
5833 !skb_frags_readable(skb))
5834 goto end;
5835 }
5836 }
5837 }
5838 end:
5839 skb_queue_walk_safe(&tmp, skb, n)
5840 tcp_rbtree_insert(root, skb);
5841 }
5842
5843 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
5844 * and tcp_collapse() them until all the queue is collapsed.
5845 */
tcp_collapse_ofo_queue(struct sock * sk)5846 static void tcp_collapse_ofo_queue(struct sock *sk)
5847 {
5848 struct tcp_sock *tp = tcp_sk(sk);
5849 u32 range_truesize, sum_tiny = 0;
5850 struct sk_buff *skb, *head;
5851 u32 start, end;
5852
5853 skb = skb_rb_first(&tp->out_of_order_queue);
5854 new_range:
5855 if (!skb) {
5856 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5857 return;
5858 }
5859 start = TCP_SKB_CB(skb)->seq;
5860 end = TCP_SKB_CB(skb)->end_seq;
5861 range_truesize = skb->truesize;
5862
5863 for (head = skb;;) {
5864 skb = skb_rb_next(skb);
5865
5866 /* Range is terminated when we see a gap or when
5867 * we are at the queue end.
5868 */
5869 if (!skb ||
5870 after(TCP_SKB_CB(skb)->seq, end) ||
5871 before(TCP_SKB_CB(skb)->end_seq, start)) {
5872 /* Do not attempt collapsing tiny skbs */
5873 if (range_truesize != head->truesize ||
5874 end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
5875 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
5876 head, skb, start, end);
5877 } else {
5878 sum_tiny += range_truesize;
5879 if (sum_tiny > sk->sk_rcvbuf >> 3)
5880 return;
5881 }
5882 goto new_range;
5883 }
5884
5885 range_truesize += skb->truesize;
5886 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
5887 start = TCP_SKB_CB(skb)->seq;
5888 if (after(TCP_SKB_CB(skb)->end_seq, end))
5889 end = TCP_SKB_CB(skb)->end_seq;
5890 }
5891 }
5892
5893 /*
5894 * Clean the out-of-order queue to make room.
5895 * We drop high sequences packets to :
5896 * 1) Let a chance for holes to be filled.
5897 * This means we do not drop packets from ooo queue if their sequence
5898 * is before incoming packet sequence.
5899 * 2) not add too big latencies if thousands of packets sit there.
5900 * (But if application shrinks SO_RCVBUF, we could still end up
5901 * freeing whole queue here)
5902 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
5903 *
5904 * Return true if queue has shrunk.
5905 */
tcp_prune_ofo_queue(struct sock * sk,const struct sk_buff * in_skb)5906 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
5907 {
5908 struct tcp_sock *tp = tcp_sk(sk);
5909 struct rb_node *node, *prev;
5910 bool pruned = false;
5911 int goal;
5912
5913 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5914 return false;
5915
5916 goal = sk->sk_rcvbuf >> 3;
5917 node = &tp->ooo_last_skb->rbnode;
5918
5919 do {
5920 struct sk_buff *skb = rb_to_skb(node);
5921
5922 /* If incoming skb would land last in ofo queue, stop pruning. */
5923 if (after(TCP_SKB_CB(in_skb)->seq, TCP_SKB_CB(skb)->seq))
5924 break;
5925 pruned = true;
5926 prev = rb_prev(node);
5927 rb_erase(node, &tp->out_of_order_queue);
5928 goal -= skb->truesize;
5929 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
5930 tp->ooo_last_skb = rb_to_skb(prev);
5931 if (!prev || goal <= 0) {
5932 if (tcp_can_ingest(sk, in_skb) &&
5933 !tcp_under_memory_pressure(sk))
5934 break;
5935 goal = sk->sk_rcvbuf >> 3;
5936 }
5937 node = prev;
5938 } while (node);
5939
5940 if (pruned) {
5941 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
5942 /* Reset SACK state. A conforming SACK implementation will
5943 * do the same at a timeout based retransmit. When a connection
5944 * is in a sad state like this, we care only about integrity
5945 * of the connection not performance.
5946 */
5947 if (tp->rx_opt.sack_ok)
5948 tcp_sack_reset(&tp->rx_opt);
5949 }
5950 return pruned;
5951 }
5952
5953 /* Reduce allocated memory if we can, trying to get
5954 * the socket within its memory limits again.
5955 *
5956 * Return less than zero if we should start dropping frames
5957 * until the socket owning process reads some of the data
5958 * to stabilize the situation.
5959 */
tcp_prune_queue(struct sock * sk,const struct sk_buff * in_skb)5960 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
5961 {
5962 struct tcp_sock *tp = tcp_sk(sk);
5963
5964 /* Do nothing if our queues are empty. */
5965 if (!atomic_read(&sk->sk_rmem_alloc))
5966 return -1;
5967
5968 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
5969
5970 if (!tcp_can_ingest(sk, in_skb))
5971 tcp_clamp_window(sk);
5972 else if (tcp_under_memory_pressure(sk))
5973 tcp_adjust_rcv_ssthresh(sk);
5974
5975 if (tcp_can_ingest(sk, in_skb))
5976 return 0;
5977
5978 tcp_collapse_ofo_queue(sk);
5979 if (!skb_queue_empty(&sk->sk_receive_queue))
5980 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
5981 skb_peek(&sk->sk_receive_queue),
5982 NULL,
5983 tp->copied_seq, tp->rcv_nxt);
5984
5985 if (tcp_can_ingest(sk, in_skb))
5986 return 0;
5987
5988 /* Collapsing did not help, destructive actions follow.
5989 * This must not ever occur. */
5990
5991 tcp_prune_ofo_queue(sk, in_skb);
5992
5993 if (tcp_can_ingest(sk, in_skb))
5994 return 0;
5995
5996 /* If we are really being abused, tell the caller to silently
5997 * drop receive data on the floor. It will get retransmitted
5998 * and hopefully then we'll have sufficient space.
5999 */
6000 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
6001
6002 /* Massive buffer overcommit. */
6003 tp->pred_flags = 0;
6004 return -1;
6005 }
6006
tcp_should_expand_sndbuf(struct sock * sk)6007 static bool tcp_should_expand_sndbuf(struct sock *sk)
6008 {
6009 const struct tcp_sock *tp = tcp_sk(sk);
6010
6011 /* If the user specified a specific send buffer setting, do
6012 * not modify it.
6013 */
6014 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
6015 return false;
6016
6017 /* If we are under global TCP memory pressure, do not expand. */
6018 if (tcp_under_memory_pressure(sk)) {
6019 int unused_mem = sk_unused_reserved_mem(sk);
6020
6021 /* Adjust sndbuf according to reserved mem. But make sure
6022 * it never goes below SOCK_MIN_SNDBUF.
6023 * See sk_stream_moderate_sndbuf() for more details.
6024 */
6025 if (unused_mem > SOCK_MIN_SNDBUF)
6026 WRITE_ONCE(sk->sk_sndbuf, unused_mem);
6027
6028 return false;
6029 }
6030
6031 /* If we are under soft global TCP memory pressure, do not expand. */
6032 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
6033 return false;
6034
6035 /* If we filled the congestion window, do not expand. */
6036 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
6037 return false;
6038
6039 return true;
6040 }
6041
tcp_new_space(struct sock * sk)6042 static void tcp_new_space(struct sock *sk)
6043 {
6044 struct tcp_sock *tp = tcp_sk(sk);
6045
6046 if (tcp_should_expand_sndbuf(sk)) {
6047 tcp_sndbuf_expand(sk);
6048 tp->snd_cwnd_stamp = tcp_jiffies32;
6049 }
6050
6051 INDIRECT_CALL_1(READ_ONCE(sk->sk_write_space),
6052 sk_stream_write_space,
6053 sk);
6054 }
6055
6056 /* Caller made space either from:
6057 * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
6058 * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
6059 *
6060 * We might be able to generate EPOLLOUT to the application if:
6061 * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
6062 * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
6063 * small enough that tcp_stream_memory_free() decides it
6064 * is time to generate EPOLLOUT.
6065 */
__tcp_check_space(struct sock * sk)6066 void __tcp_check_space(struct sock *sk)
6067 {
6068 tcp_new_space(sk);
6069 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
6070 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
6071 }
6072
tcp_data_snd_check(struct sock * sk)6073 static inline void tcp_data_snd_check(struct sock *sk)
6074 {
6075 tcp_push_pending_frames(sk);
6076 tcp_check_space(sk);
6077 }
6078
6079 /*
6080 * Check if sending an ack is needed.
6081 */
__tcp_ack_snd_check(struct sock * sk,int ofo_possible)6082 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
6083 {
6084 struct tcp_sock *tp = tcp_sk(sk);
6085 struct net *net = sock_net(sk);
6086 unsigned long rtt;
6087 u64 delay;
6088
6089 /* More than one full frame received... */
6090 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
6091 /* ... and right edge of window advances far enough.
6092 * (tcp_recvmsg() will send ACK otherwise).
6093 * If application uses SO_RCVLOWAT, we want send ack now if
6094 * we have not received enough bytes to satisfy the condition.
6095 */
6096 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
6097 __tcp_select_window(sk) >= tp->rcv_wnd)) ||
6098 /* We ACK each frame or... */
6099 tcp_in_quickack_mode(sk) ||
6100 /* Protocol state mandates a one-time immediate ACK */
6101 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
6102 /* If we are running from __release_sock() in user context,
6103 * Defer the ack until tcp_release_cb().
6104 */
6105 if (sock_owned_by_user_nocheck(sk) &&
6106 READ_ONCE(net->ipv4.sysctl_tcp_backlog_ack_defer)) {
6107 set_bit(TCP_ACK_DEFERRED, &sk->sk_tsq_flags);
6108 return;
6109 }
6110 send_now:
6111 tcp_send_ack(sk);
6112 return;
6113 }
6114
6115 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
6116 tcp_send_delayed_ack(sk);
6117 return;
6118 }
6119
6120 if (!tcp_is_sack(tp) ||
6121 tp->compressed_ack >= READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_nr))
6122 goto send_now;
6123
6124 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
6125 tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
6126 tp->dup_ack_counter = 0;
6127 }
6128 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
6129 tp->dup_ack_counter++;
6130 goto send_now;
6131 }
6132 tp->compressed_ack++;
6133 if (hrtimer_is_queued(&tp->compressed_ack_timer))
6134 return;
6135
6136 /* compress ack timer : comp_sack_rtt_percent of rtt,
6137 * but no more than tcp_comp_sack_delay_ns.
6138 */
6139
6140 rtt = tp->rcv_rtt_est.rtt_us;
6141 if (tp->srtt_us && tp->srtt_us < rtt)
6142 rtt = tp->srtt_us;
6143
6144 /* delay = (rtt >> 3) * NSEC_PER_USEC * comp_sack_rtt_percent / 100
6145 * ->
6146 * delay = rtt * 1.25 * comp_sack_rtt_percent
6147 */
6148 delay = (u64)(rtt + (rtt >> 2)) *
6149 READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_rtt_percent);
6150
6151 delay = min(delay, READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_delay_ns));
6152
6153 sock_hold(sk);
6154 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
6155 READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_slack_ns),
6156 HRTIMER_MODE_REL_PINNED_SOFT);
6157 }
6158
tcp_ack_snd_check(struct sock * sk)6159 static inline void tcp_ack_snd_check(struct sock *sk)
6160 {
6161 if (!inet_csk_ack_scheduled(sk)) {
6162 /* We sent a data segment already. */
6163 return;
6164 }
6165 __tcp_ack_snd_check(sk, 1);
6166 }
6167
6168 /*
6169 * This routine is only called when we have urgent data
6170 * signaled. Its the 'slow' part of tcp_urg. It could be
6171 * moved inline now as tcp_urg is only called from one
6172 * place. We handle URGent data wrong. We have to - as
6173 * BSD still doesn't use the correction from RFC961.
6174 * For 1003.1g we should support a new option TCP_STDURG to permit
6175 * either form (or just set the sysctl tcp_stdurg).
6176 */
6177
tcp_check_urg(struct sock * sk,const struct tcphdr * th)6178 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
6179 {
6180 struct tcp_sock *tp = tcp_sk(sk);
6181 u32 ptr = ntohs(th->urg_ptr);
6182
6183 if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg))
6184 ptr--;
6185 ptr += ntohl(th->seq);
6186
6187 /* Ignore urgent data that we've already seen and read. */
6188 if (after(tp->copied_seq, ptr))
6189 return;
6190
6191 /* Do not replay urg ptr.
6192 *
6193 * NOTE: interesting situation not covered by specs.
6194 * Misbehaving sender may send urg ptr, pointing to segment,
6195 * which we already have in ofo queue. We are not able to fetch
6196 * such data and will stay in TCP_URG_NOTYET until will be eaten
6197 * by recvmsg(). Seems, we are not obliged to handle such wicked
6198 * situations. But it is worth to think about possibility of some
6199 * DoSes using some hypothetical application level deadlock.
6200 */
6201 if (before(ptr, tp->rcv_nxt))
6202 return;
6203
6204 /* Do we already have a newer (or duplicate) urgent pointer? */
6205 if (tp->urg_data && !after(ptr, tp->urg_seq))
6206 return;
6207
6208 /* Tell the world about our new urgent pointer. */
6209 sk_send_sigurg(sk);
6210
6211 /* We may be adding urgent data when the last byte read was
6212 * urgent. To do this requires some care. We cannot just ignore
6213 * tp->copied_seq since we would read the last urgent byte again
6214 * as data, nor can we alter copied_seq until this data arrives
6215 * or we break the semantics of SIOCATMARK (and thus sockatmark())
6216 *
6217 * NOTE. Double Dutch. Rendering to plain English: author of comment
6218 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
6219 * and expect that both A and B disappear from stream. This is _wrong_.
6220 * Though this happens in BSD with high probability, this is occasional.
6221 * Any application relying on this is buggy. Note also, that fix "works"
6222 * only in this artificial test. Insert some normal data between A and B and we will
6223 * decline of BSD again. Verdict: it is better to remove to trap
6224 * buggy users.
6225 */
6226 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
6227 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
6228 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
6229 tp->copied_seq++;
6230 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
6231 __skb_unlink(skb, &sk->sk_receive_queue);
6232 __kfree_skb(skb);
6233 }
6234 }
6235
6236 WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET);
6237 WRITE_ONCE(tp->urg_seq, ptr);
6238
6239 /* Disable header prediction. */
6240 tp->pred_flags = 0;
6241 }
6242
6243 /* This is the 'fast' part of urgent handling. */
tcp_urg(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th)6244 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
6245 {
6246 struct tcp_sock *tp = tcp_sk(sk);
6247
6248 /* Check if we get a new urgent pointer - normally not. */
6249 if (unlikely(th->urg))
6250 tcp_check_urg(sk, th);
6251
6252 /* Do we wait for any urgent data? - normally not... */
6253 if (unlikely(tp->urg_data == TCP_URG_NOTYET)) {
6254 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
6255 th->syn;
6256
6257 /* Is the urgent pointer pointing into this packet? */
6258 if (ptr < skb->len) {
6259 u8 tmp;
6260 if (skb_copy_bits(skb, ptr, &tmp, 1))
6261 BUG();
6262 WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
6263 if (!sock_flag(sk, SOCK_DEAD))
6264 READ_ONCE(sk->sk_data_ready)(sk);
6265 }
6266 }
6267 }
6268
6269 /* Accept RST for rcv_nxt - 1 after a FIN.
6270 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
6271 * FIN is sent followed by a RST packet. The RST is sent with the same
6272 * sequence number as the FIN, and thus according to RFC 5961 a challenge
6273 * ACK should be sent. However, Mac OSX rate limits replies to challenge
6274 * ACKs on the closed socket. In addition middleboxes can drop either the
6275 * challenge ACK or a subsequent RST.
6276 */
tcp_reset_check(const struct sock * sk,const struct sk_buff * skb)6277 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
6278 {
6279 const struct tcp_sock *tp = tcp_sk(sk);
6280
6281 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
6282 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
6283 TCPF_CLOSING));
6284 }
6285
6286 /* Does PAWS and seqno based validation of an incoming segment, flags will
6287 * play significant role here.
6288 */
tcp_validate_incoming(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th,int syn_inerr)6289 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
6290 const struct tcphdr *th, int syn_inerr)
6291 {
6292 struct tcp_sock *tp = tcp_sk(sk);
6293 bool accecn_reflector = false;
6294 SKB_DR(reason);
6295
6296 /* RFC1323: H1. Apply PAWS check first. */
6297 if (!tcp_fast_parse_options(sock_net(sk), skb, th, tp) ||
6298 !tp->rx_opt.saw_tstamp ||
6299 tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW))
6300 goto step1;
6301
6302 reason = tcp_disordered_ack_check(sk, skb);
6303 if (!reason)
6304 goto step1;
6305 /* Reset is accepted even if it did not pass PAWS. */
6306 if (th->rst)
6307 goto step1;
6308 if (unlikely(th->syn))
6309 goto syn_challenge;
6310
6311 /* Old ACK are common, increment PAWS_OLD_ACK
6312 * and do not send a dupack.
6313 */
6314 if (reason == SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK) {
6315 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWS_OLD_ACK);
6316 goto discard;
6317 }
6318 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
6319 if (!tcp_oow_rate_limited(sock_net(sk), skb,
6320 LINUX_MIB_TCPACKSKIPPEDPAWS,
6321 &tp->last_oow_ack_time))
6322 tcp_send_dupack(sk, skb);
6323 goto discard;
6324
6325 step1:
6326 /* Step 1: check sequence number */
6327 reason = tcp_sequence(sk, TCP_SKB_CB(skb)->seq,
6328 TCP_SKB_CB(skb)->end_seq, th);
6329 if (reason) {
6330 /* RFC793, page 37: "In all states except SYN-SENT, all reset
6331 * (RST) segments are validated by checking their SEQ-fields."
6332 * And page 69: "If an incoming segment is not acceptable,
6333 * an acknowledgment should be sent in reply (unless the RST
6334 * bit is set, if so drop the segment and return)".
6335 */
6336 if (!th->rst) {
6337 if (th->syn)
6338 goto syn_challenge;
6339
6340 if (reason == SKB_DROP_REASON_TCP_INVALID_SEQUENCE ||
6341 reason == SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE)
6342 NET_INC_STATS(sock_net(sk),
6343 LINUX_MIB_BEYOND_WINDOW);
6344 if (!tcp_oow_rate_limited(sock_net(sk), skb,
6345 LINUX_MIB_TCPACKSKIPPEDSEQ,
6346 &tp->last_oow_ack_time))
6347 tcp_send_dupack(sk, skb);
6348 } else if (tcp_reset_check(sk, skb)) {
6349 goto reset;
6350 }
6351 goto discard;
6352 }
6353
6354 /* Step 2: check RST bit */
6355 if (th->rst) {
6356 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
6357 * FIN and SACK too if available):
6358 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
6359 * the right-most SACK block,
6360 * then
6361 * RESET the connection
6362 * else
6363 * Send a challenge ACK
6364 */
6365 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
6366 tcp_reset_check(sk, skb))
6367 goto reset;
6368
6369 if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
6370 struct tcp_sack_block *sp = &tp->selective_acks[0];
6371 int max_sack = sp[0].end_seq;
6372 int this_sack;
6373
6374 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
6375 ++this_sack) {
6376 max_sack = after(sp[this_sack].end_seq,
6377 max_sack) ?
6378 sp[this_sack].end_seq : max_sack;
6379 }
6380
6381 if (TCP_SKB_CB(skb)->seq == max_sack)
6382 goto reset;
6383 }
6384
6385 /* Disable TFO if RST is out-of-order
6386 * and no data has been received
6387 * for current active TFO socket
6388 */
6389 if (tp->syn_fastopen && !tp->data_segs_in &&
6390 sk->sk_state == TCP_ESTABLISHED)
6391 tcp_fastopen_active_disable(sk);
6392 tcp_send_challenge_ack(sk, false);
6393 SKB_DR_SET(reason, TCP_RESET);
6394 goto discard;
6395 }
6396
6397 /* step 3: check security and precedence [ignored] */
6398
6399 /* step 4: Check for a SYN
6400 * RFC 5961 4.2 : Send a challenge ack
6401 */
6402 if (th->syn) {
6403 if (tcp_ecn_mode_accecn(tp)) {
6404 accecn_reflector = true;
6405 tp->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
6406 INET_ECN_MASK;
6407 if (tp->rx_opt.accecn &&
6408 tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
6409 u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
6410
6411 tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
6412 tcp_accecn_opt_demand_min(sk, 1);
6413 }
6414 }
6415 if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
6416 TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
6417 TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
6418 TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
6419 goto pass;
6420 syn_challenge:
6421 if (syn_inerr)
6422 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6423 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
6424 tcp_send_challenge_ack(sk, accecn_reflector);
6425 SKB_DR_SET(reason, TCP_INVALID_SYN);
6426 goto discard;
6427 }
6428
6429 pass:
6430 bpf_skops_parse_hdr(sk, skb);
6431
6432 return true;
6433
6434 discard:
6435 tcp_drop_reason(sk, skb, reason);
6436 return false;
6437
6438 reset:
6439 tcp_reset(sk, skb);
6440 __kfree_skb(skb);
6441 return false;
6442 }
6443
6444 /*
6445 * TCP receive function for the ESTABLISHED state.
6446 *
6447 * It is split into a fast path and a slow path. The fast path is
6448 * disabled when:
6449 * - A zero window was announced from us - zero window probing
6450 * is only handled properly in the slow path.
6451 * - Out of order segments arrived.
6452 * - Urgent data is expected.
6453 * - There is no buffer space left
6454 * - Unexpected TCP flags/window values/header lengths are received
6455 * (detected by checking the TCP header against pred_flags)
6456 * - Data is sent in both directions. Fast path only supports pure senders
6457 * or pure receivers (this means either the sequence number or the ack
6458 * value must stay constant)
6459 * - Unexpected TCP option.
6460 *
6461 * When these conditions are not satisfied it drops into a standard
6462 * receive procedure patterned after RFC793 to handle all cases.
6463 * The first three cases are guaranteed by proper pred_flags setting,
6464 * the rest is checked inline. Fast processing is turned on in
6465 * tcp_data_queue when everything is OK.
6466 */
tcp_rcv_established(struct sock * sk,struct sk_buff * skb)6467 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
6468 {
6469 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
6470 const struct tcphdr *th = (const struct tcphdr *)skb->data;
6471 struct tcp_sock *tp = tcp_sk(sk);
6472 unsigned int len = skb->len;
6473
6474 /* TCP congestion window tracking */
6475 trace_tcp_probe(sk, skb);
6476
6477 tcp_mstamp_refresh(tp);
6478 if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
6479 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
6480 /*
6481 * Header prediction.
6482 * The code loosely follows the one in the famous
6483 * "30 instruction TCP receive" Van Jacobson mail.
6484 *
6485 * Van's trick is to deposit buffers into socket queue
6486 * on a device interrupt, to call tcp_recv function
6487 * on the receive process context and checksum and copy
6488 * the buffer to user space. smart...
6489 *
6490 * Our current scheme is not silly either but we take the
6491 * extra cost of the net_bh soft interrupt processing...
6492 * We do checksum and copy also but from device to kernel.
6493 */
6494
6495 tp->rx_opt.saw_tstamp = 0;
6496 tp->rx_opt.accecn = 0;
6497
6498 /* pred_flags is 0xS?10 << 16 + snd_wnd
6499 * if header_prediction is to be made
6500 * 'S' will always be tp->tcp_header_len >> 2
6501 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
6502 * turn it off (when there are holes in the receive
6503 * space for instance)
6504 * PSH flag is ignored.
6505 */
6506
6507 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
6508 TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
6509 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6510 int tcp_header_len = tp->tcp_header_len;
6511 s32 delta = 0;
6512 int flag = 0;
6513
6514 /* Timestamp header prediction: tcp_header_len
6515 * is automatically equal to th->doff*4 due to pred_flags
6516 * match.
6517 */
6518
6519 /* Check timestamp */
6520 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
6521 /* No? Slow path! */
6522 if (!tcp_parse_aligned_timestamp(tp, th))
6523 goto slow_path;
6524
6525 delta = tp->rx_opt.rcv_tsval -
6526 tp->rx_opt.ts_recent;
6527 /* If PAWS failed, check it more carefully in slow path */
6528 if (delta < 0)
6529 goto slow_path;
6530
6531 /* DO NOT update ts_recent here, if checksum fails
6532 * and timestamp was corrupted part, it will result
6533 * in a hung connection since we will drop all
6534 * future packets due to the PAWS test.
6535 */
6536 }
6537
6538 if (len <= tcp_header_len) {
6539 /* Bulk data transfer: sender */
6540 if (len == tcp_header_len) {
6541 /* Predicted packet is in window by definition.
6542 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6543 * Hence, check seq<=rcv_wup reduces to:
6544 */
6545 if (tcp_header_len ==
6546 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6547 tp->rcv_nxt == tp->rcv_wup)
6548 flag |= __tcp_replace_ts_recent(tp,
6549 delta);
6550
6551 tcp_ecn_received_counters(sk, skb, 0);
6552
6553 /* We know that such packets are checksummed
6554 * on entry.
6555 */
6556 tcp_ack(sk, skb, flag);
6557 __kfree_skb(skb);
6558 tcp_data_snd_check(sk);
6559 /* When receiving pure ack in fast path, update
6560 * last ts ecr directly instead of calling
6561 * tcp_rcv_rtt_measure_ts()
6562 */
6563 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
6564 return;
6565 } else { /* Header too small */
6566 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
6567 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6568 goto discard;
6569 }
6570 } else {
6571 int eaten = 0;
6572 bool fragstolen = false;
6573
6574 if (tcp_checksum_complete(skb))
6575 goto csum_error;
6576
6577 if (after(TCP_SKB_CB(skb)->end_seq,
6578 tp->rcv_nxt + tcp_receive_window(tp)))
6579 goto validate;
6580
6581 if ((int)skb->truesize > sk->sk_forward_alloc)
6582 goto step5;
6583
6584 /* Predicted packet is in window by definition.
6585 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6586 * Hence, check seq<=rcv_wup reduces to:
6587 */
6588 if (tcp_header_len ==
6589 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6590 tp->rcv_nxt == tp->rcv_wup)
6591 flag |= __tcp_replace_ts_recent(tp,
6592 delta);
6593
6594 tcp_rcv_rtt_measure_ts(sk, skb);
6595
6596 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
6597
6598 /* Bulk data transfer: receiver */
6599 tcp_cleanup_skb(skb);
6600 __skb_pull(skb, tcp_header_len);
6601 tcp_ecn_received_counters(sk, skb,
6602 len - tcp_header_len);
6603 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
6604
6605 tcp_event_data_recv(sk, skb);
6606
6607 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
6608 /* Well, only one small jumplet in fast path... */
6609 tcp_ack(sk, skb, flag | FLAG_DATA);
6610 tcp_data_snd_check(sk);
6611 if (!inet_csk_ack_scheduled(sk))
6612 goto no_ack;
6613 } else {
6614 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
6615 }
6616
6617 __tcp_ack_snd_check(sk, 0);
6618 no_ack:
6619 if (eaten)
6620 kfree_skb_partial(skb, fragstolen);
6621 tcp_data_ready(sk);
6622 return;
6623 }
6624 }
6625
6626 slow_path:
6627 if (len < (th->doff << 2) || tcp_checksum_complete(skb))
6628 goto csum_error;
6629
6630 if (!th->ack && !th->rst && !th->syn) {
6631 reason = SKB_DROP_REASON_TCP_FLAGS;
6632 goto discard;
6633 }
6634
6635 /*
6636 * Standard slow path.
6637 */
6638 validate:
6639 if (!tcp_validate_incoming(sk, skb, th, 1))
6640 return;
6641
6642 step5:
6643 tcp_ecn_received_counters_payload(sk, skb);
6644
6645 reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT);
6646 if ((int)reason < 0) {
6647 reason = -reason;
6648 goto discard;
6649 }
6650 tcp_rcv_rtt_measure_ts(sk, skb);
6651
6652 /* Process urgent data. */
6653 tcp_urg(sk, skb, th);
6654
6655 /* step 7: process the segment text */
6656 tcp_data_queue(sk, skb);
6657
6658 tcp_data_snd_check(sk);
6659 tcp_ack_snd_check(sk);
6660 return;
6661
6662 csum_error:
6663 reason = SKB_DROP_REASON_TCP_CSUM;
6664 trace_tcp_bad_csum(skb);
6665 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
6666 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6667
6668 discard:
6669 tcp_drop_reason(sk, skb, reason);
6670 }
6671
tcp_init_transfer(struct sock * sk,int bpf_op,struct sk_buff * skb)6672 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
6673 {
6674 struct inet_connection_sock *icsk = inet_csk(sk);
6675 struct tcp_sock *tp = tcp_sk(sk);
6676
6677 tcp_mtup_init(sk);
6678 icsk->icsk_af_ops->rebuild_header(sk);
6679 tcp_init_metrics(sk);
6680
6681 /* Initialize the congestion window to start the transfer.
6682 * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
6683 * retransmitted. In light of RFC6298 more aggressive 1sec
6684 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
6685 * retransmission has occurred.
6686 */
6687 if (tp->total_retrans > 1 && tp->undo_marker)
6688 tcp_snd_cwnd_set(tp, 1);
6689 else
6690 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
6691 tp->snd_cwnd_stamp = tcp_jiffies32;
6692
6693 bpf_skops_established(sk, bpf_op, skb);
6694 /* Initialize congestion control unless BPF initialized it already: */
6695 if (!icsk->icsk_ca_initialized)
6696 tcp_init_congestion_control(sk);
6697 tcp_init_buffer_space(sk);
6698 }
6699
tcp_finish_connect(struct sock * sk,struct sk_buff * skb)6700 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
6701 {
6702 struct tcp_sock *tp = tcp_sk(sk);
6703 struct inet_connection_sock *icsk = inet_csk(sk);
6704
6705 tcp_ao_finish_connect(sk, skb);
6706 tcp_set_state(sk, TCP_ESTABLISHED);
6707 icsk->icsk_ack.lrcvtime = tcp_jiffies32;
6708
6709 if (skb) {
6710 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
6711 security_inet_conn_established(sk, skb);
6712 sk_mark_napi_id(sk, skb);
6713 }
6714
6715 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, skb);
6716
6717 /* Prevent spurious tcp_cwnd_restart() on first data
6718 * packet.
6719 */
6720 tp->lsndtime = tcp_jiffies32;
6721
6722 if (sock_flag(sk, SOCK_KEEPOPEN))
6723 tcp_reset_keepalive_timer(sk, keepalive_time_when(tp));
6724
6725 if (!tp->rx_opt.snd_wscale)
6726 __tcp_fast_path_on(tp, tp->snd_wnd);
6727 else
6728 tp->pred_flags = 0;
6729 }
6730
tcp_rcv_fastopen_synack(struct sock * sk,struct sk_buff * synack,struct tcp_fastopen_cookie * cookie)6731 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
6732 struct tcp_fastopen_cookie *cookie)
6733 {
6734 struct tcp_sock *tp = tcp_sk(sk);
6735 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL;
6736 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
6737 bool syn_drop = false;
6738
6739 if (mss == READ_ONCE(tp->rx_opt.user_mss)) {
6740 struct tcp_options_received opt;
6741
6742 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
6743 tcp_clear_options(&opt);
6744 opt.user_mss = opt.mss_clamp = 0;
6745 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
6746 mss = opt.mss_clamp;
6747 }
6748
6749 if (!tp->syn_fastopen) {
6750 /* Ignore an unsolicited cookie */
6751 cookie->len = -1;
6752 } else if (tp->total_retrans) {
6753 /* SYN timed out and the SYN-ACK neither has a cookie nor
6754 * acknowledges data. Presumably the remote received only
6755 * the retransmitted (regular) SYNs: either the original
6756 * SYN-data or the corresponding SYN-ACK was dropped.
6757 */
6758 syn_drop = (cookie->len < 0 && data);
6759 } else if (cookie->len < 0 && !tp->syn_data) {
6760 /* We requested a cookie but didn't get it. If we did not use
6761 * the (old) exp opt format then try so next time (try_exp=1).
6762 * Otherwise we go back to use the RFC7413 opt (try_exp=2).
6763 */
6764 try_exp = tp->syn_fastopen_exp ? 2 : 1;
6765 }
6766
6767 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
6768
6769 if (data) { /* Retransmit unacked data in SYN */
6770 if (tp->total_retrans)
6771 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED;
6772 else
6773 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
6774 skb_rbtree_walk_from(data)
6775 tcp_mark_skb_lost(sk, data);
6776 tcp_non_congestion_loss_retransmit(sk);
6777 NET_INC_STATS(sock_net(sk),
6778 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
6779 return true;
6780 }
6781 tp->syn_data_acked = tp->syn_data;
6782 if (tp->syn_data_acked) {
6783 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
6784 /* SYN-data is counted as two separate packets in tcp_ack() */
6785 if (tp->delivered > 1)
6786 WRITE_ONCE(tp->delivered, tp->delivered - 1);
6787 }
6788
6789 tcp_fastopen_add_skb(sk, synack);
6790
6791 return false;
6792 }
6793
smc_check_reset_syn(struct tcp_sock * tp)6794 static void smc_check_reset_syn(struct tcp_sock *tp)
6795 {
6796 #if IS_ENABLED(CONFIG_SMC)
6797 if (static_branch_unlikely(&tcp_have_smc)) {
6798 if (tp->syn_smc && !tp->rx_opt.smc_ok)
6799 tp->syn_smc = 0;
6800 }
6801 #endif
6802 }
6803
tcp_try_undo_spurious_syn(struct sock * sk)6804 static void tcp_try_undo_spurious_syn(struct sock *sk)
6805 {
6806 struct tcp_sock *tp = tcp_sk(sk);
6807 u32 syn_stamp;
6808
6809 /* undo_marker is set when SYN or SYNACK times out. The timeout is
6810 * spurious if the ACK's timestamp option echo value matches the
6811 * original SYN timestamp.
6812 */
6813 syn_stamp = tp->retrans_stamp;
6814 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp &&
6815 syn_stamp == tp->rx_opt.rcv_tsecr)
6816 tp->undo_marker = 0;
6817 }
6818
tcp_rcv_synsent_state_process(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th)6819 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
6820 const struct tcphdr *th)
6821 {
6822 struct inet_connection_sock *icsk = inet_csk(sk);
6823 struct tcp_sock *tp = tcp_sk(sk);
6824 struct tcp_fastopen_cookie foc = { .len = -1 };
6825 int saved_clamp = tp->rx_opt.mss_clamp;
6826 bool fastopen_fail;
6827 SKB_DR(reason);
6828
6829 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
6830 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
6831 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
6832
6833 if (th->ack) {
6834 /* rfc793:
6835 * "If the state is SYN-SENT then
6836 * first check the ACK bit
6837 * If the ACK bit is set
6838 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
6839 * a reset (unless the RST bit is set, if so drop
6840 * the segment and return)"
6841 */
6842 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
6843 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6844 /* Previous FIN/ACK or RST/ACK might be ignored. */
6845 if (icsk->icsk_retransmits == 0)
6846 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
6847 TCP_TIMEOUT_MIN, false);
6848 SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
6849 goto reset_and_undo;
6850 }
6851
6852 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
6853 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
6854 tcp_time_stamp_ts(tp))) {
6855 NET_INC_STATS(sock_net(sk),
6856 LINUX_MIB_PAWSACTIVEREJECTED);
6857 SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6858 goto reset_and_undo;
6859 }
6860
6861 /* Now ACK is acceptable.
6862 *
6863 * "If the RST bit is set
6864 * If the ACK was acceptable then signal the user "error:
6865 * connection reset", drop the segment, enter CLOSED state,
6866 * delete TCB, and return."
6867 */
6868
6869 if (th->rst) {
6870 tcp_reset(sk, skb);
6871 consume:
6872 __kfree_skb(skb);
6873 return 0;
6874 }
6875
6876 /* rfc793:
6877 * "fifth, if neither of the SYN or RST bits is set then
6878 * drop the segment and return."
6879 *
6880 * See note below!
6881 * --ANK(990513)
6882 */
6883 if (!th->syn) {
6884 SKB_DR_SET(reason, TCP_FLAGS);
6885 goto discard_and_undo;
6886 }
6887 /* rfc793:
6888 * "If the SYN bit is on ...
6889 * are acceptable then ...
6890 * (our SYN has been ACKed), change the connection
6891 * state to ESTABLISHED..."
6892 */
6893
6894 if (tcp_ecn_mode_any(tp))
6895 tcp_ecn_rcv_synack(sk, skb, th,
6896 TCP_SKB_CB(skb)->ip_dsfield);
6897
6898 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
6899 tcp_try_undo_spurious_syn(sk);
6900 tcp_ack(sk, skb, FLAG_SLOWPATH);
6901
6902 /* Ok.. it's good. Set up sequence numbers and
6903 * move to established.
6904 */
6905 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
6906 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
6907 tp->rcv_mwnd_seq = tp->rcv_wup + tp->rcv_wnd;
6908
6909 /* RFC1323: The window in SYN & SYN/ACK segments is
6910 * never scaled.
6911 */
6912 tp->snd_wnd = ntohs(th->window);
6913
6914 if (!tp->rx_opt.wscale_ok) {
6915 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
6916 WRITE_ONCE(tp->window_clamp,
6917 min(tp->window_clamp, 65535U));
6918 }
6919
6920 if (tp->rx_opt.saw_tstamp) {
6921 tp->rx_opt.tstamp_ok = 1;
6922 tp->tcp_header_len =
6923 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
6924 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6925 tcp_store_ts_recent(tp);
6926 } else {
6927 tp->tcp_header_len = sizeof(struct tcphdr);
6928 }
6929
6930 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6931 tcp_initialize_rcv_mss(sk);
6932
6933 /* Remember, tcp_poll() does not lock socket!
6934 * Change state from SYN-SENT only after copied_seq
6935 * is initialized. */
6936 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6937
6938 smc_check_reset_syn(tp);
6939
6940 smp_mb();
6941
6942 tcp_finish_connect(sk, skb);
6943
6944 fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
6945 tcp_rcv_fastopen_synack(sk, skb, &foc);
6946
6947 if (!sock_flag(sk, SOCK_DEAD)) {
6948 sk->sk_state_change(sk);
6949 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
6950 }
6951 if (fastopen_fail)
6952 return -1;
6953 if (sk->sk_write_pending ||
6954 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) ||
6955 inet_csk_in_pingpong_mode(sk)) {
6956 /* Save one ACK. Data will be ready after
6957 * several ticks, if write_pending is set.
6958 *
6959 * It may be deleted, but with this feature tcpdumps
6960 * look so _wonderfully_ clever, that I was not able
6961 * to stand against the temptation 8) --ANK
6962 */
6963 inet_csk_schedule_ack(sk);
6964 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
6965 tcp_reset_xmit_timer(sk, ICSK_TIME_DACK,
6966 TCP_DELACK_MAX, false);
6967 goto consume;
6968 }
6969 tcp_send_ack_reflect_ect(sk, tcp_ecn_mode_accecn(tp));
6970 return -1;
6971 }
6972
6973 /* No ACK in the segment */
6974
6975 if (th->rst) {
6976 /* rfc793:
6977 * "If the RST bit is set
6978 *
6979 * Otherwise (no ACK) drop the segment and return."
6980 */
6981 SKB_DR_SET(reason, TCP_RESET);
6982 goto discard_and_undo;
6983 }
6984
6985 /* PAWS check. */
6986 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
6987 tcp_paws_reject(&tp->rx_opt, 0)) {
6988 SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6989 goto discard_and_undo;
6990 }
6991 if (th->syn) {
6992 /* We see SYN without ACK. It is attempt of
6993 * simultaneous connect with crossed SYNs.
6994 * Particularly, it can be connect to self.
6995 */
6996 #ifdef CONFIG_TCP_AO
6997 struct tcp_ao_info *ao;
6998
6999 ao = rcu_dereference_protected(tp->ao_info,
7000 lockdep_sock_is_held(sk));
7001 if (ao) {
7002 WRITE_ONCE(ao->risn, th->seq);
7003 ao->rcv_sne = 0;
7004 }
7005 #endif
7006 tcp_set_state(sk, TCP_SYN_RECV);
7007
7008 if (tp->rx_opt.saw_tstamp) {
7009 tp->rx_opt.tstamp_ok = 1;
7010 tcp_store_ts_recent(tp);
7011 tp->tcp_header_len =
7012 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
7013 } else {
7014 tp->tcp_header_len = sizeof(struct tcphdr);
7015 }
7016
7017 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
7018 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
7019 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
7020 tp->rcv_mwnd_seq = tp->rcv_wup + tp->rcv_wnd;
7021
7022 /* RFC1323: The window in SYN & SYN/ACK segments is
7023 * never scaled.
7024 */
7025 tp->snd_wnd = ntohs(th->window);
7026 tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
7027 tp->max_window = tp->snd_wnd;
7028
7029 tcp_ecn_rcv_syn(sk, th, skb);
7030
7031 tcp_mtup_init(sk);
7032 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
7033 tcp_initialize_rcv_mss(sk);
7034
7035 tcp_send_synack(sk);
7036 #if 0
7037 /* Note, we could accept data and URG from this segment.
7038 * There are no obstacles to make this (except that we must
7039 * either change tcp_recvmsg() to prevent it from returning data
7040 * before 3WHS completes per RFC793, or employ TCP Fast Open).
7041 *
7042 * However, if we ignore data in ACKless segments sometimes,
7043 * we have no reasons to accept it sometimes.
7044 * Also, seems the code doing it in step6 of tcp_rcv_state_process
7045 * is not flawless. So, discard packet for sanity.
7046 * Uncomment this return to process the data.
7047 */
7048 return -1;
7049 #else
7050 goto consume;
7051 #endif
7052 }
7053 /* "fifth, if neither of the SYN or RST bits is set then
7054 * drop the segment and return."
7055 */
7056
7057 discard_and_undo:
7058 tcp_clear_options(&tp->rx_opt);
7059 tp->rx_opt.mss_clamp = saved_clamp;
7060 tcp_drop_reason(sk, skb, reason);
7061 return 0;
7062
7063 reset_and_undo:
7064 tcp_clear_options(&tp->rx_opt);
7065 tp->rx_opt.mss_clamp = saved_clamp;
7066 /* we can reuse/return @reason to its caller to handle the exception */
7067 return reason;
7068 }
7069
tcp_rcv_synrecv_state_fastopen(struct sock * sk)7070 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
7071 {
7072 struct tcp_sock *tp = tcp_sk(sk);
7073 struct request_sock *req;
7074
7075 /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
7076 * undo. If peer SACKs triggered fast recovery, we can't undo here.
7077 */
7078 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
7079 tcp_try_undo_recovery(sk);
7080
7081 tcp_update_rto_time(tp);
7082 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
7083 /* In tcp_fastopen_synack_timer() on the first SYNACK RTO we set
7084 * retrans_stamp but don't enter CA_Loss, so in case that happened we
7085 * need to zero retrans_stamp here to prevent spurious
7086 * retransmits_timed_out(). However, if the ACK of our SYNACK caused us
7087 * to enter CA_Recovery then we need to leave retrans_stamp as it was
7088 * set entering CA_Recovery, for correct retransmits_timed_out() and
7089 * undo behavior.
7090 */
7091 tcp_retrans_stamp_cleanup(sk);
7092
7093 /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
7094 * we no longer need req so release it.
7095 */
7096 req = rcu_dereference_protected(tp->fastopen_rsk,
7097 lockdep_sock_is_held(sk));
7098 reqsk_fastopen_remove(sk, req, false);
7099
7100 /* Re-arm the timer because data may have been sent out.
7101 * This is similar to the regular data transmission case
7102 * when new data has just been ack'ed.
7103 *
7104 * (TFO) - we could try to be more aggressive and
7105 * retransmitting any data sooner based on when they
7106 * are sent out.
7107 */
7108 tcp_rearm_rto(sk);
7109 }
7110
7111 /*
7112 * This function implements the receiving procedure of RFC 793 for
7113 * all states except ESTABLISHED and TIME_WAIT.
7114 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
7115 * address independent.
7116 */
7117
7118 enum skb_drop_reason
tcp_rcv_state_process(struct sock * sk,struct sk_buff * skb)7119 tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
7120 {
7121 struct tcp_sock *tp = tcp_sk(sk);
7122 struct inet_connection_sock *icsk = inet_csk(sk);
7123 const struct tcphdr *th = tcp_hdr(skb);
7124 struct request_sock *req;
7125 int queued = 0;
7126 SKB_DR(reason);
7127
7128 switch (sk->sk_state) {
7129 case TCP_CLOSE:
7130 SKB_DR_SET(reason, TCP_CLOSE);
7131 goto discard;
7132
7133 case TCP_LISTEN:
7134 if (th->ack)
7135 return SKB_DROP_REASON_TCP_FLAGS;
7136
7137 if (th->rst) {
7138 SKB_DR_SET(reason, TCP_RESET);
7139 goto discard;
7140 }
7141 if (th->syn) {
7142 if (th->fin) {
7143 SKB_DR_SET(reason, TCP_FLAGS);
7144 goto discard;
7145 }
7146 /* It is possible that we process SYN packets from backlog,
7147 * so we need to make sure to disable BH and RCU right there.
7148 */
7149 rcu_read_lock();
7150 local_bh_disable();
7151 icsk->icsk_af_ops->conn_request(sk, skb);
7152 local_bh_enable();
7153 rcu_read_unlock();
7154
7155 consume_skb(skb);
7156 return 0;
7157 }
7158 SKB_DR_SET(reason, TCP_FLAGS);
7159 goto discard;
7160
7161 case TCP_SYN_SENT:
7162 tp->rx_opt.saw_tstamp = 0;
7163 tcp_mstamp_refresh(tp);
7164 queued = tcp_rcv_synsent_state_process(sk, skb, th);
7165 if (queued >= 0)
7166 return queued;
7167
7168 /* Do step6 onward by hand. */
7169 tcp_urg(sk, skb, th);
7170 __kfree_skb(skb);
7171 tcp_data_snd_check(sk);
7172 return 0;
7173 }
7174
7175 tcp_mstamp_refresh(tp);
7176 tp->rx_opt.saw_tstamp = 0;
7177 req = rcu_dereference_protected(tp->fastopen_rsk,
7178 lockdep_sock_is_held(sk));
7179 if (req) {
7180 bool req_stolen;
7181
7182 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
7183 sk->sk_state != TCP_FIN_WAIT1);
7184
7185 SKB_DR_SET(reason, TCP_FASTOPEN);
7186 if (!tcp_check_req(sk, skb, req, true, &req_stolen, &reason))
7187 goto discard;
7188 }
7189
7190 if (!th->ack && !th->rst && !th->syn) {
7191 SKB_DR_SET(reason, TCP_FLAGS);
7192 goto discard;
7193 }
7194 if (!tcp_validate_incoming(sk, skb, th, 0))
7195 return 0;
7196
7197 /* step 5: check the ACK field */
7198 reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
7199 FLAG_UPDATE_TS_RECENT |
7200 FLAG_NO_CHALLENGE_ACK);
7201
7202 if ((int)reason <= 0) {
7203 if (sk->sk_state == TCP_SYN_RECV) {
7204 /* send one RST */
7205 if (!reason)
7206 return SKB_DROP_REASON_TCP_OLD_ACK;
7207 return -reason;
7208 }
7209 /* accept old ack during closing */
7210 if ((int)reason < 0) {
7211 tcp_send_challenge_ack(sk, false);
7212 reason = -reason;
7213 goto discard;
7214 }
7215 }
7216 SKB_DR_SET(reason, NOT_SPECIFIED);
7217 switch (sk->sk_state) {
7218 case TCP_SYN_RECV:
7219 WRITE_ONCE(tp->delivered, tp->delivered + 1); /* SYN-ACK delivery isn't tracked in tcp_ack */
7220 if (!tp->srtt_us)
7221 tcp_synack_rtt_meas(sk, req);
7222
7223 if (tp->rx_opt.tstamp_ok)
7224 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
7225
7226 if (req) {
7227 tcp_rcv_synrecv_state_fastopen(sk);
7228 } else {
7229 tcp_try_undo_spurious_syn(sk);
7230 tp->retrans_stamp = 0;
7231 tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,
7232 skb);
7233 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
7234 }
7235 tcp_ao_established(sk);
7236 smp_mb();
7237 tcp_set_state(sk, TCP_ESTABLISHED);
7238 sk->sk_state_change(sk);
7239
7240 /* Note, that this wakeup is only for marginal crossed SYN case.
7241 * Passively open sockets are not waked up, because
7242 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
7243 */
7244 if (sk->sk_socket)
7245 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
7246
7247 WRITE_ONCE(tp->snd_una, TCP_SKB_CB(skb)->ack_seq);
7248 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
7249 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
7250
7251 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
7252 tcp_update_pacing_rate(sk);
7253
7254 /* Prevent spurious tcp_cwnd_restart() on first data packet */
7255 tp->lsndtime = tcp_jiffies32;
7256
7257 tcp_initialize_rcv_mss(sk);
7258 if (tcp_ecn_mode_accecn(tp))
7259 tcp_accecn_third_ack(sk, skb, tp->syn_ect_snt);
7260 tcp_fast_path_on(tp);
7261 if (sk->sk_shutdown & SEND_SHUTDOWN)
7262 tcp_shutdown(sk, SEND_SHUTDOWN);
7263
7264 break;
7265
7266 case TCP_FIN_WAIT1: {
7267 int tmo;
7268
7269 if (req)
7270 tcp_rcv_synrecv_state_fastopen(sk);
7271
7272 if (tp->snd_una != tp->write_seq)
7273 break;
7274
7275 tcp_set_state(sk, TCP_FIN_WAIT2);
7276 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN);
7277
7278 sk_dst_confirm(sk);
7279
7280 if (!sock_flag(sk, SOCK_DEAD)) {
7281 /* Wake up lingering close() */
7282 sk->sk_state_change(sk);
7283 break;
7284 }
7285
7286 if (READ_ONCE(tp->linger2) < 0) {
7287 tcp_done(sk);
7288 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
7289 return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
7290 }
7291 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
7292 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
7293 /* Receive out of order FIN after close() */
7294 if (tp->syn_fastopen && th->fin)
7295 tcp_fastopen_active_disable(sk);
7296 tcp_done(sk);
7297 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
7298 return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
7299 }
7300
7301 tmo = tcp_fin_time(sk);
7302 if (tmo > TCP_TIMEWAIT_LEN) {
7303 tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
7304 } else if (th->fin || sock_owned_by_user(sk)) {
7305 /* Bad case. We could lose such FIN otherwise.
7306 * It is not a big problem, but it looks confusing
7307 * and not so rare event. We still can lose it now,
7308 * if it spins in bh_lock_sock(), but it is really
7309 * marginal case.
7310 */
7311 tcp_reset_keepalive_timer(sk, tmo);
7312 } else {
7313 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
7314 goto consume;
7315 }
7316 break;
7317 }
7318
7319 case TCP_CLOSING:
7320 if (tp->snd_una == tp->write_seq) {
7321 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
7322 goto consume;
7323 }
7324 break;
7325
7326 case TCP_LAST_ACK:
7327 if (tp->snd_una == tp->write_seq) {
7328 tcp_update_metrics(sk);
7329 tcp_done(sk);
7330 goto consume;
7331 }
7332 break;
7333 }
7334
7335 /* step 6: check the URG bit */
7336 tcp_urg(sk, skb, th);
7337
7338 /* step 7: process the segment text */
7339 switch (sk->sk_state) {
7340 case TCP_CLOSE_WAIT:
7341 case TCP_CLOSING:
7342 case TCP_LAST_ACK:
7343 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
7344 /* If a subflow has been reset, the packet should not
7345 * continue to be processed, drop the packet.
7346 */
7347 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
7348 goto discard;
7349 break;
7350 }
7351 fallthrough;
7352 case TCP_FIN_WAIT1:
7353 case TCP_FIN_WAIT2:
7354 /* RFC 793 says to queue data in these states,
7355 * RFC 1122 says we MUST send a reset.
7356 * BSD 4.4 also does reset.
7357 */
7358 if (sk->sk_shutdown & RCV_SHUTDOWN) {
7359 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
7360 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
7361 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
7362 tcp_reset(sk, skb);
7363 return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
7364 }
7365 }
7366 fallthrough;
7367 case TCP_ESTABLISHED:
7368 tcp_data_queue(sk, skb);
7369 queued = 1;
7370 break;
7371 }
7372
7373 /* tcp_data could move socket to TIME-WAIT */
7374 if (sk->sk_state != TCP_CLOSE) {
7375 tcp_data_snd_check(sk);
7376 tcp_ack_snd_check(sk);
7377 }
7378
7379 if (!queued) {
7380 discard:
7381 tcp_drop_reason(sk, skb, reason);
7382 }
7383 return 0;
7384
7385 consume:
7386 __kfree_skb(skb);
7387 return 0;
7388 }
7389
pr_drop_req(struct request_sock * req,__u16 port,int family)7390 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
7391 {
7392 struct inet_request_sock *ireq = inet_rsk(req);
7393
7394 if (family == AF_INET)
7395 net_dbg_ratelimited("drop open request from %pI4/%u\n",
7396 &ireq->ir_rmt_addr, port);
7397 #if IS_ENABLED(CONFIG_IPV6)
7398 else if (family == AF_INET6)
7399 net_dbg_ratelimited("drop open request from %pI6/%u\n",
7400 &ireq->ir_v6_rmt_addr, port);
7401 #endif
7402 }
7403
7404 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
7405 *
7406 * If we receive a SYN packet with these bits set, it means a
7407 * network is playing bad games with TOS bits. In order to
7408 * avoid possible false congestion notifications, we disable
7409 * TCP ECN negotiation.
7410 *
7411 * Exception: tcp_ca wants ECN. This is required for DCTCP
7412 * congestion control: Linux DCTCP asserts ECT on all packets,
7413 * including SYN, which is most optimal solution; however,
7414 * others, such as FreeBSD do not.
7415 *
7416 * Exception: At least one of the reserved bits of the TCP header (th->res1) is
7417 * set, indicating the use of a future TCP extension (such as AccECN). See
7418 * RFC8311 §4.3 which updates RFC3168 to allow the development of such
7419 * extensions.
7420 */
tcp_ecn_create_request(struct request_sock * req,const struct sk_buff * skb,const struct sock * listen_sk,const struct dst_entry * dst)7421 static void tcp_ecn_create_request(struct request_sock *req,
7422 const struct sk_buff *skb,
7423 const struct sock *listen_sk,
7424 const struct dst_entry *dst)
7425 {
7426 const struct tcphdr *th = tcp_hdr(skb);
7427 const struct net *net = sock_net(listen_sk);
7428 bool th_ecn = th->ece && th->cwr;
7429 bool ect, ecn_ok;
7430 u32 ecn_ok_dst;
7431
7432 if (tcp_accecn_syn_requested(th) &&
7433 (READ_ONCE(net->ipv4.sysctl_tcp_ecn) >= 3 ||
7434 tcp_ca_needs_accecn(listen_sk))) {
7435 inet_rsk(req)->ecn_ok = 1;
7436 tcp_rsk(req)->accecn_ok = 1;
7437 tcp_rsk(req)->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
7438 INET_ECN_MASK;
7439 return;
7440 }
7441
7442 if (!th_ecn)
7443 return;
7444
7445 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
7446 ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
7447 ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;
7448
7449 if (((!ect || th->res1 || th->ae) && ecn_ok) ||
7450 tcp_ca_needs_ecn(listen_sk) ||
7451 (ecn_ok_dst & DST_FEATURE_ECN_CA) ||
7452 tcp_bpf_ca_needs_ecn((struct sock *)req))
7453 inet_rsk(req)->ecn_ok = 1;
7454 }
7455
tcp_openreq_init(struct request_sock * req,const struct tcp_options_received * rx_opt,struct sk_buff * skb,const struct sock * sk)7456 static void tcp_openreq_init(struct request_sock *req,
7457 const struct tcp_options_received *rx_opt,
7458 struct sk_buff *skb, const struct sock *sk)
7459 {
7460 struct inet_request_sock *ireq = inet_rsk(req);
7461
7462 req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */
7463 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
7464 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
7465 tcp_rsk(req)->snt_synack = 0;
7466 tcp_rsk(req)->snt_tsval_first = 0;
7467 tcp_rsk(req)->last_oow_ack_time = 0;
7468 tcp_rsk(req)->accecn_ok = 0;
7469 tcp_rsk(req)->saw_accecn_opt = TCP_ACCECN_OPT_NOT_SEEN;
7470 tcp_rsk(req)->accecn_fail_mode = 0;
7471 tcp_rsk(req)->syn_ect_rcv = 0;
7472 tcp_rsk(req)->syn_ect_snt = 0;
7473 req->mss = rx_opt->mss_clamp;
7474 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
7475 ireq->tstamp_ok = rx_opt->tstamp_ok;
7476 ireq->sack_ok = rx_opt->sack_ok;
7477 ireq->snd_wscale = rx_opt->snd_wscale;
7478 ireq->wscale_ok = rx_opt->wscale_ok;
7479 ireq->acked = 0;
7480 ireq->ecn_ok = 0;
7481 ireq->ir_rmt_port = tcp_hdr(skb)->source;
7482 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
7483 ireq->ir_mark = inet_request_mark(sk, skb);
7484 #if IS_ENABLED(CONFIG_SMC)
7485 ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested &&
7486 tcp_sk(sk)->smc_hs_congested(sk));
7487 #endif
7488 }
7489
7490 /*
7491 * Return true if a syncookie should be sent
7492 */
tcp_syn_flood_action(struct sock * sk,const char * proto)7493 static bool tcp_syn_flood_action(struct sock *sk, const char *proto)
7494 {
7495 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
7496 const char *msg = "Dropping request";
7497 struct net *net = sock_net(sk);
7498 bool want_cookie = false;
7499 u8 syncookies;
7500
7501 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7502
7503 #ifdef CONFIG_SYN_COOKIES
7504 if (syncookies) {
7505 msg = "Sending cookies";
7506 want_cookie = true;
7507 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
7508 } else
7509 #endif
7510 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
7511
7512 if (syncookies != 2 && !READ_ONCE(queue->synflood_warned)) {
7513 WRITE_ONCE(queue->synflood_warned, 1);
7514 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_family == AF_INET6) {
7515 net_info_ratelimited("%s: Possible SYN flooding on port [%pI6c]:%u. %s.\n",
7516 proto, inet6_rcv_saddr(sk),
7517 sk->sk_num, msg);
7518 } else {
7519 net_info_ratelimited("%s: Possible SYN flooding on port %pI4:%u. %s.\n",
7520 proto, &sk->sk_rcv_saddr,
7521 sk->sk_num, msg);
7522 }
7523 }
7524
7525 return want_cookie;
7526 }
7527
tcp_reqsk_record_syn(const struct sock * sk,struct request_sock * req,const struct sk_buff * skb)7528 static void tcp_reqsk_record_syn(const struct sock *sk,
7529 struct request_sock *req,
7530 const struct sk_buff *skb)
7531 {
7532 if (tcp_sk(sk)->save_syn) {
7533 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
7534 struct saved_syn *saved_syn;
7535 u32 mac_hdrlen;
7536 void *base;
7537
7538 if (tcp_sk(sk)->save_syn == 2) { /* Save full header. */
7539 base = skb_mac_header(skb);
7540 mac_hdrlen = skb_mac_header_len(skb);
7541 len += mac_hdrlen;
7542 } else {
7543 base = skb_network_header(skb);
7544 mac_hdrlen = 0;
7545 }
7546
7547 saved_syn = kmalloc_flex(*saved_syn, data, len, GFP_ATOMIC);
7548 if (saved_syn) {
7549 saved_syn->mac_hdrlen = mac_hdrlen;
7550 saved_syn->network_hdrlen = skb_network_header_len(skb);
7551 saved_syn->tcp_hdrlen = tcp_hdrlen(skb);
7552 memcpy(saved_syn->data, base, len);
7553 req->saved_syn = saved_syn;
7554 }
7555 }
7556 }
7557
7558 /* If a SYN cookie is required and supported, returns a clamped MSS value to be
7559 * used for SYN cookie generation.
7560 */
tcp_get_syncookie_mss(struct request_sock_ops * rsk_ops,const struct tcp_request_sock_ops * af_ops,struct sock * sk,struct tcphdr * th)7561 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
7562 const struct tcp_request_sock_ops *af_ops,
7563 struct sock *sk, struct tcphdr *th)
7564 {
7565 struct tcp_sock *tp = tcp_sk(sk);
7566 u16 mss;
7567
7568 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 &&
7569 !inet_csk_reqsk_queue_is_full(sk))
7570 return 0;
7571
7572 if (!tcp_syn_flood_action(sk, rsk_ops->slab_name))
7573 return 0;
7574
7575 if (sk_acceptq_is_full(sk)) {
7576 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7577 return 0;
7578 }
7579
7580 mss = tcp_parse_mss_option(th, READ_ONCE(tp->rx_opt.user_mss));
7581 if (!mss)
7582 mss = af_ops->mss_clamp;
7583
7584 return mss;
7585 }
7586
tcp_conn_request(struct request_sock_ops * rsk_ops,const struct tcp_request_sock_ops * af_ops,struct sock * sk,struct sk_buff * skb)7587 int tcp_conn_request(struct request_sock_ops *rsk_ops,
7588 const struct tcp_request_sock_ops *af_ops,
7589 struct sock *sk, struct sk_buff *skb)
7590 {
7591 struct tcp_fastopen_cookie foc = { .len = -1 };
7592 struct tcp_options_received tmp_opt;
7593 const struct tcp_sock *tp = tcp_sk(sk);
7594 struct net *net = sock_net(sk);
7595 struct sock *fastopen_sk = NULL;
7596 union tcp_seq_and_ts_off st;
7597 struct request_sock *req;
7598 bool want_cookie = false;
7599 struct dst_entry *dst;
7600 struct flowi fl;
7601 u8 syncookies;
7602 u32 isn;
7603
7604 #ifdef CONFIG_TCP_AO
7605 const struct tcp_ao_hdr *aoh;
7606 #endif
7607
7608 isn = __this_cpu_read(tcp_tw_isn);
7609 if (isn) {
7610 /* TW buckets are converted to open requests without
7611 * limitations, they conserve resources and peer is
7612 * evidently real one.
7613 */
7614 __this_cpu_write(tcp_tw_isn, 0);
7615 } else {
7616 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7617
7618 if (syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) {
7619 want_cookie = tcp_syn_flood_action(sk,
7620 rsk_ops->slab_name);
7621 if (!want_cookie)
7622 goto drop;
7623 }
7624 }
7625
7626 if (sk_acceptq_is_full(sk)) {
7627 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7628 goto drop;
7629 }
7630
7631 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
7632 if (!req)
7633 goto drop;
7634
7635 req->syncookie = want_cookie;
7636 tcp_rsk(req)->af_specific = af_ops;
7637 tcp_rsk(req)->ts_off = 0;
7638 tcp_rsk(req)->req_usec_ts = false;
7639 #if IS_ENABLED(CONFIG_MPTCP)
7640 tcp_rsk(req)->is_mptcp = 0;
7641 #endif
7642
7643 tcp_clear_options(&tmp_opt);
7644 tmp_opt.mss_clamp = af_ops->mss_clamp;
7645 tmp_opt.user_mss = READ_ONCE(tp->rx_opt.user_mss);
7646 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
7647 want_cookie ? NULL : &foc);
7648
7649 if (want_cookie && !tmp_opt.saw_tstamp)
7650 tcp_clear_options(&tmp_opt);
7651
7652 if (IS_ENABLED(CONFIG_SMC) && want_cookie)
7653 tmp_opt.smc_ok = 0;
7654
7655 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
7656 tcp_openreq_init(req, &tmp_opt, skb, sk);
7657 inet_rsk(req)->no_srccheck = inet_test_bit(TRANSPARENT, sk);
7658
7659 /* Note: tcp_v6_init_req() might override ir_iif for link locals */
7660 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
7661
7662 dst = af_ops->route_req(sk, skb, &fl, req, isn);
7663 if (!dst)
7664 goto drop_and_free;
7665
7666 if (tmp_opt.tstamp_ok || (!want_cookie && !isn))
7667 st = INDIRECT_CALL_INET(af_ops->init_seq_and_ts_off,
7668 tcp_v6_init_seq_and_ts_off,
7669 tcp_v4_init_seq_and_ts_off,
7670 net, skb);
7671
7672 if (tmp_opt.tstamp_ok) {
7673 tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
7674 tcp_rsk(req)->ts_off = st.ts_off;
7675 }
7676 if (!want_cookie && !isn) {
7677 int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
7678
7679 /* Kill the following clause, if you dislike this way. */
7680 if (!syncookies &&
7681 (max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
7682 (max_syn_backlog >> 2)) &&
7683 !tcp_peer_is_proven(req, dst)) {
7684 /* Without syncookies last quarter of
7685 * backlog is filled with destinations,
7686 * proven to be alive.
7687 * It means that we continue to communicate
7688 * to destinations, already remembered
7689 * to the moment of synflood.
7690 */
7691 pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
7692 rsk_ops->family);
7693 goto drop_and_release;
7694 }
7695
7696 isn = st.seq;
7697 }
7698
7699 tcp_ecn_create_request(req, skb, sk, dst);
7700
7701 if (want_cookie) {
7702 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
7703 if (!tmp_opt.tstamp_ok)
7704 inet_rsk(req)->ecn_ok = 0;
7705 }
7706
7707 #ifdef CONFIG_TCP_AO
7708 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
7709 goto drop_and_release; /* Invalid TCP options */
7710 if (aoh) {
7711 tcp_rsk(req)->used_tcp_ao = true;
7712 tcp_rsk(req)->ao_rcv_next = aoh->keyid;
7713 tcp_rsk(req)->ao_keyid = aoh->rnext_keyid;
7714
7715 } else {
7716 tcp_rsk(req)->used_tcp_ao = false;
7717 }
7718 #endif
7719 tcp_rsk(req)->snt_isn = isn;
7720 tcp_rsk(req)->txhash = net_tx_rndhash();
7721 tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
7722 tcp_openreq_init_rwin(req, sk, dst);
7723 sk_rx_queue_set(req_to_sk(req), skb);
7724 if (!want_cookie) {
7725 tcp_reqsk_record_syn(sk, req, skb);
7726 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
7727 }
7728 if (fastopen_sk) {
7729 af_ops->send_synack(fastopen_sk, dst, &fl, req,
7730 &foc, TCP_SYNACK_FASTOPEN, skb);
7731 /* Add the child socket directly into the accept queue */
7732 if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
7733 bh_unlock_sock(fastopen_sk);
7734 sock_put(fastopen_sk);
7735 goto drop_and_free;
7736 }
7737 READ_ONCE(sk->sk_data_ready)(sk);
7738 bh_unlock_sock(fastopen_sk);
7739 sock_put(fastopen_sk);
7740 } else {
7741 tcp_rsk(req)->tfo_listener = false;
7742 if (!want_cookie &&
7743 unlikely(!inet_csk_reqsk_queue_hash_add(sk, req))) {
7744 reqsk_free(req);
7745 dst_release(dst);
7746 return 0;
7747 }
7748 af_ops->send_synack(sk, dst, &fl, req, &foc,
7749 !want_cookie ? TCP_SYNACK_NORMAL :
7750 TCP_SYNACK_COOKIE,
7751 skb);
7752 if (want_cookie) {
7753 reqsk_free(req);
7754 return 0;
7755 }
7756 }
7757 reqsk_put(req);
7758 return 0;
7759
7760 drop_and_release:
7761 dst_release(dst);
7762 drop_and_free:
7763 __reqsk_free(req);
7764 drop:
7765 tcp_listendrop(sk);
7766 return 0;
7767 }
7768