1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/tcp.h>
4 #include <linux/rcupdate.h>
5 #include <net/tcp.h>
6 #include <net/busy_poll.h>
7
8 /*
9 * This function is called to set a Fast Open socket's "fastopen_rsk" field
10 * to NULL when a TFO socket no longer needs to access the request_sock.
11 * This happens only after 3WHS has been either completed or aborted (e.g.,
12 * RST is received).
13 *
14 * Before TFO, a child socket is created only after 3WHS is completed,
15 * hence it never needs to access the request_sock. things get a lot more
16 * complex with TFO. A child socket, accepted or not, has to access its
17 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
18 * until 3WHS is either completed or aborted. Afterwards the req will stay
19 * until either the child socket is accepted, or in the rare case when the
20 * listener is closed before the child is accepted.
21 *
22 * In short, a request socket is only freed after BOTH 3WHS has completed
23 * (or aborted) and the child socket has been accepted (or listener closed).
24 * When a child socket is accepted, its corresponding req->sk is set to
25 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
26 * will be used by the code below to determine if a child socket has been
27 * accepted or not, and the check is protected by the fastopenq->lock
28 * described below.
29 *
30 * Note that fastopen_rsk is only accessed from the child socket's context
31 * with its socket lock held. But a request_sock (req) can be accessed by
32 * both its child socket through fastopen_rsk, and a listener socket through
33 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
34 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
35 * only in the rare case when both the listener and the child locks are held,
36 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
37 * The lock also protects other fields such as fastopenq->qlen, which is
38 * decremented by this function when fastopen_rsk is no longer needed.
39 *
40 * Note that another solution was to simply use the existing socket lock
41 * from the listener. But first socket lock is difficult to use. It is not
42 * a simple spin lock - one must consider sock_owned_by_user() and arrange
43 * to use sk_add_backlog() stuff. But what really makes it infeasible is the
44 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
45 * acquire a child's lock while holding listener's socket lock.
46 *
47 * This function also sets "treq->tfo_listener" to false.
48 * treq->tfo_listener is used by the listener so it is protected by the
49 * fastopenq->lock in this function.
50 */
reqsk_fastopen_remove(struct sock * sk,struct request_sock * req,bool reset)51 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
52 bool reset)
53 {
54 struct sock *lsk = req->rsk_listener;
55 struct fastopen_queue *fastopenq;
56
57 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
58
59 RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
60 spin_lock_bh(&fastopenq->lock);
61 fastopenq->qlen--;
62 tcp_rsk(req)->tfo_listener = false;
63 if (req->sk) /* the child socket hasn't been accepted yet */
64 goto out;
65
66 if (!reset || lsk->sk_state != TCP_LISTEN) {
67 /* If the listener has been closed don't bother with the
68 * special RST handling below.
69 */
70 spin_unlock_bh(&fastopenq->lock);
71 reqsk_put(req);
72 return;
73 }
74 /* Wait for 60secs before removing a req that has triggered RST.
75 * This is a simple defense against TFO spoofing attack - by
76 * counting the req against fastopen.max_qlen, and disabling
77 * TFO when the qlen exceeds max_qlen.
78 *
79 * For more details see CoNext'11 "TCP Fast Open" paper.
80 */
81 req->rsk_timer.expires = jiffies + 60*HZ;
82 if (fastopenq->rskq_rst_head == NULL)
83 fastopenq->rskq_rst_head = req;
84 else
85 fastopenq->rskq_rst_tail->dl_next = req;
86
87 req->dl_next = NULL;
88 fastopenq->rskq_rst_tail = req;
89 fastopenq->qlen++;
90 out:
91 spin_unlock_bh(&fastopenq->lock);
92 }
93
tcp_fastopen_init_key_once(struct net * net)94 void tcp_fastopen_init_key_once(struct net *net)
95 {
96 u8 key[TCP_FASTOPEN_KEY_LENGTH];
97 struct tcp_fastopen_context *ctxt;
98
99 rcu_read_lock();
100 ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
101 if (ctxt) {
102 rcu_read_unlock();
103 return;
104 }
105 rcu_read_unlock();
106
107 /* tcp_fastopen_reset_cipher publishes the new context
108 * atomically, so we allow this race happening here.
109 *
110 * All call sites of tcp_fastopen_cookie_gen also check
111 * for a valid cookie, so this is an acceptable risk.
112 */
113 get_random_bytes(key, sizeof(key));
114 tcp_fastopen_reset_cipher(net, NULL, key, NULL);
115 }
116
tcp_fastopen_ctx_free(struct rcu_head * head)117 static void tcp_fastopen_ctx_free(struct rcu_head *head)
118 {
119 struct tcp_fastopen_context *ctx =
120 container_of(head, struct tcp_fastopen_context, rcu);
121
122 kfree_sensitive(ctx);
123 }
124
tcp_fastopen_destroy_cipher(struct sock * sk)125 void tcp_fastopen_destroy_cipher(struct sock *sk)
126 {
127 struct tcp_fastopen_context *ctx;
128
129 ctx = rcu_dereference_protected(
130 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
131 if (ctx)
132 call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
133 }
134
tcp_fastopen_ctx_destroy(struct net * net)135 void tcp_fastopen_ctx_destroy(struct net *net)
136 {
137 struct tcp_fastopen_context *ctxt;
138
139 ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL));
140
141 if (ctxt)
142 call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
143 }
144
tcp_fastopen_reset_cipher(struct net * net,struct sock * sk,void * primary_key,void * backup_key)145 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
146 void *primary_key, void *backup_key)
147 {
148 struct tcp_fastopen_context *ctx, *octx;
149 struct fastopen_queue *q;
150 int err = 0;
151
152 ctx = kmalloc_obj(*ctx);
153 if (!ctx) {
154 err = -ENOMEM;
155 goto out;
156 }
157
158 ctx->key[0].key[0] = get_unaligned_le64(primary_key);
159 ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
160 if (backup_key) {
161 ctx->key[1].key[0] = get_unaligned_le64(backup_key);
162 ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
163 ctx->num = 2;
164 } else {
165 ctx->num = 1;
166 }
167
168 if (sk) {
169 q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
170 octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx)));
171 } else {
172 octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx,
173 RCU_INITIALIZER(ctx)));
174 }
175
176 if (octx)
177 call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
178 out:
179 return err;
180 }
181
tcp_fastopen_get_cipher(struct net * net,struct inet_connection_sock * icsk,u64 * key)182 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
183 u64 *key)
184 {
185 struct tcp_fastopen_context *ctx;
186 int n_keys = 0, i;
187
188 rcu_read_lock();
189 if (icsk)
190 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
191 else
192 ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
193 if (ctx) {
194 n_keys = tcp_fastopen_context_len(ctx);
195 for (i = 0; i < n_keys; i++) {
196 put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
197 put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
198 }
199 }
200 rcu_read_unlock();
201
202 return n_keys;
203 }
204
__tcp_fastopen_cookie_gen_cipher(struct request_sock * req,struct sk_buff * syn,const siphash_key_t * key,struct tcp_fastopen_cookie * foc)205 static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
206 struct sk_buff *syn,
207 const siphash_key_t *key,
208 struct tcp_fastopen_cookie *foc)
209 {
210 BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
211
212 if (req->rsk_ops->family == AF_INET) {
213 const struct iphdr *iph = ip_hdr(syn);
214
215 foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
216 sizeof(iph->saddr) +
217 sizeof(iph->daddr),
218 key));
219 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
220 return true;
221 }
222 #if IS_ENABLED(CONFIG_IPV6)
223 if (req->rsk_ops->family == AF_INET6) {
224 const struct ipv6hdr *ip6h = ipv6_hdr(syn);
225
226 foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
227 sizeof(ip6h->saddr) +
228 sizeof(ip6h->daddr),
229 key));
230 foc->len = TCP_FASTOPEN_COOKIE_SIZE;
231 return true;
232 }
233 #endif
234 return false;
235 }
236
237 /* Generate the fastopen cookie by applying SipHash to both the source and
238 * destination addresses.
239 */
tcp_fastopen_cookie_gen(struct sock * sk,struct request_sock * req,struct sk_buff * syn,struct tcp_fastopen_cookie * foc)240 static void tcp_fastopen_cookie_gen(struct sock *sk,
241 struct request_sock *req,
242 struct sk_buff *syn,
243 struct tcp_fastopen_cookie *foc)
244 {
245 struct tcp_fastopen_context *ctx;
246
247 rcu_read_lock();
248 ctx = tcp_fastopen_get_ctx(sk);
249 if (ctx)
250 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
251 rcu_read_unlock();
252 }
253
254 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
255 * queue this additional data / FIN.
256 */
tcp_fastopen_add_skb(struct sock * sk,struct sk_buff * skb)257 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
258 {
259 struct tcp_sock *tp = tcp_sk(sk);
260
261 if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
262 return;
263
264 skb = skb_clone(skb, GFP_ATOMIC);
265 if (!skb)
266 return;
267
268 tcp_cleanup_skb(skb);
269 /* segs_in has been initialized to 1 in tcp_create_openreq_child().
270 * Hence, reset segs_in to 0 before calling tcp_segs_in()
271 * to avoid double counting. Also, tcp_segs_in() expects
272 * skb->len to include the tcp_hdrlen. Hence, it should
273 * be called before __skb_pull().
274 */
275 tp->segs_in = 0;
276 tcp_segs_in(tp, skb);
277 __skb_pull(skb, tcp_hdrlen(skb));
278 sk_forced_mem_schedule(sk, skb->truesize);
279 skb_set_owner_r(skb, sk);
280
281 TCP_SKB_CB(skb)->seq++;
282 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
283
284 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
285 tcp_add_receive_queue(sk, skb);
286 tp->syn_data_acked = 1;
287
288 /* u64_stats_update_begin(&tp->syncp) not needed here,
289 * as we certainly are not changing upper 32bit value (0)
290 */
291 tp->bytes_received = skb->len;
292
293 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
294 tcp_fin(sk);
295 }
296
297 /* returns 0 - no key match, 1 for primary, 2 for backup */
tcp_fastopen_cookie_gen_check(struct sock * sk,struct request_sock * req,struct sk_buff * syn,struct tcp_fastopen_cookie * orig,struct tcp_fastopen_cookie * valid_foc)298 static int tcp_fastopen_cookie_gen_check(struct sock *sk,
299 struct request_sock *req,
300 struct sk_buff *syn,
301 struct tcp_fastopen_cookie *orig,
302 struct tcp_fastopen_cookie *valid_foc)
303 {
304 struct tcp_fastopen_cookie search_foc = { .len = -1 };
305 struct tcp_fastopen_cookie *foc = valid_foc;
306 struct tcp_fastopen_context *ctx;
307 int i, ret = 0;
308
309 rcu_read_lock();
310 ctx = tcp_fastopen_get_ctx(sk);
311 if (!ctx)
312 goto out;
313 for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
314 __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
315 if (tcp_fastopen_cookie_match(foc, orig)) {
316 ret = i + 1;
317 goto out;
318 }
319 foc = &search_foc;
320 }
321 out:
322 rcu_read_unlock();
323 return ret;
324 }
325
tcp_fastopen_create_child(struct sock * sk,struct sk_buff * skb,struct request_sock * req)326 static struct sock *tcp_fastopen_create_child(struct sock *sk,
327 struct sk_buff *skb,
328 struct request_sock *req)
329 {
330 struct tcp_sock *tp;
331 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
332 struct sock *child;
333 bool own_req;
334
335 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
336 NULL, &own_req, NULL);
337 if (!child)
338 return NULL;
339
340 spin_lock(&queue->fastopenq.lock);
341 queue->fastopenq.qlen++;
342 spin_unlock(&queue->fastopenq.lock);
343
344 /* Initialize the child socket. Have to fix some values to take
345 * into account the child is a Fast Open socket and is created
346 * only out of the bits carried in the SYN packet.
347 */
348 tp = tcp_sk(child);
349
350 rcu_assign_pointer(tp->fastopen_rsk, req);
351 tcp_rsk(req)->tfo_listener = true;
352
353 /* RFC1323: The window in SYN & SYN/ACK segments is never
354 * scaled. So correct it appropriately.
355 */
356 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
357 tp->max_window = tp->snd_wnd;
358
359 /* Activate the retrans timer so that SYNACK can be retransmitted.
360 * The request socket is not added to the ehash
361 * because it's been added to the accept queue directly.
362 */
363 req->timeout = tcp_timeout_init(child);
364 tcp_reset_xmit_timer(child, ICSK_TIME_RETRANS,
365 req->timeout, false);
366
367 refcount_set(&req->rsk_refcnt, 2);
368
369 sk_mark_napi_id_set(child, skb);
370
371 /* Now finish processing the fastopen child socket. */
372 tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
373
374 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
375
376 tcp_fastopen_add_skb(child, skb);
377
378 tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
379 tp->rcv_wup = tp->rcv_nxt;
380 /* tcp_conn_request() is sending the SYNACK,
381 * and queues the child into listener accept queue.
382 */
383 return child;
384 }
385
tcp_fastopen_queue_check(struct sock * sk)386 static bool tcp_fastopen_queue_check(struct sock *sk)
387 {
388 struct fastopen_queue *fastopenq;
389 int max_qlen;
390
391 /* Make sure the listener has enabled fastopen, and we don't
392 * exceed the max # of pending TFO requests allowed before trying
393 * to validating the cookie in order to avoid burning CPU cycles
394 * unnecessarily.
395 *
396 * XXX (TFO) - The implication of checking the max_qlen before
397 * processing a cookie request is that clients can't differentiate
398 * between qlen overflow causing Fast Open to be disabled
399 * temporarily vs a server not supporting Fast Open at all.
400 */
401 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
402 max_qlen = READ_ONCE(fastopenq->max_qlen);
403 if (max_qlen == 0)
404 return false;
405
406 if (fastopenq->qlen >= max_qlen) {
407 struct request_sock *req1;
408 spin_lock(&fastopenq->lock);
409 req1 = fastopenq->rskq_rst_head;
410 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
411 __NET_INC_STATS(sock_net(sk),
412 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
413 spin_unlock(&fastopenq->lock);
414 return false;
415 }
416 fastopenq->rskq_rst_head = req1->dl_next;
417 fastopenq->qlen--;
418 spin_unlock(&fastopenq->lock);
419 reqsk_put(req1);
420 }
421 return true;
422 }
423
tcp_fastopen_no_cookie(const struct sock * sk,const struct dst_entry * dst,int flag)424 static bool tcp_fastopen_no_cookie(const struct sock *sk,
425 const struct dst_entry *dst,
426 int flag)
427 {
428 return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
429 tcp_sk(sk)->fastopen_no_cookie ||
430 (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
431 }
432
433 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
434 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
435 * cookie request (foc->len == 0).
436 */
tcp_try_fastopen(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct tcp_fastopen_cookie * foc,const struct dst_entry * dst)437 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
438 struct request_sock *req,
439 struct tcp_fastopen_cookie *foc,
440 const struct dst_entry *dst)
441 {
442 bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
443 int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
444 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
445 struct sock *child;
446 int ret = 0;
447
448 if (foc->len == 0) /* Client requests a cookie */
449 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
450
451 if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
452 (syn_data || foc->len >= 0) &&
453 tcp_fastopen_queue_check(sk))) {
454 foc->len = -1;
455 return NULL;
456 }
457
458 if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
459 goto fastopen;
460
461 if (foc->len == 0) {
462 /* Client requests a cookie. */
463 tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
464 } else if (foc->len > 0) {
465 ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
466 &valid_foc);
467 if (!ret) {
468 NET_INC_STATS(sock_net(sk),
469 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
470 } else {
471 /* Cookie is valid. Create a (full) child socket to
472 * accept the data in SYN before returning a SYN-ACK to
473 * ack the data. If we fail to create the socket, fall
474 * back and ack the ISN only but includes the same
475 * cookie.
476 *
477 * Note: Data-less SYN with valid cookie is allowed to
478 * send data in SYN_RECV state.
479 */
480 fastopen:
481 child = tcp_fastopen_create_child(sk, skb, req);
482 if (child) {
483 if (ret == 2) {
484 valid_foc.exp = foc->exp;
485 *foc = valid_foc;
486 NET_INC_STATS(sock_net(sk),
487 LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
488 } else {
489 foc->len = -1;
490 }
491 NET_INC_STATS(sock_net(sk),
492 LINUX_MIB_TCPFASTOPENPASSIVE);
493 tcp_sk(child)->syn_fastopen_child = 1;
494 return child;
495 }
496 NET_INC_STATS(sock_net(sk),
497 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
498 }
499 }
500 valid_foc.exp = foc->exp;
501 *foc = valid_foc;
502 return NULL;
503 }
504
tcp_fastopen_cookie_check(struct sock * sk,u16 * mss,struct tcp_fastopen_cookie * cookie)505 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
506 struct tcp_fastopen_cookie *cookie)
507 {
508 const struct dst_entry *dst;
509
510 tcp_fastopen_cache_get(sk, mss, cookie);
511
512 /* Firewall blackhole issue check */
513 if (tcp_fastopen_active_should_disable(sk)) {
514 cookie->len = -1;
515 return false;
516 }
517
518 dst = __sk_dst_get(sk);
519
520 if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
521 cookie->len = -1;
522 return true;
523 }
524 if (cookie->len > 0)
525 return true;
526 tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
527 return false;
528 }
529
530 /* This function checks if we want to defer sending SYN until the first
531 * write(). We defer under the following conditions:
532 * 1. fastopen_connect sockopt is set
533 * 2. we have a valid cookie
534 * Return value: return true if we want to defer until application writes data
535 * return false if we want to send out SYN immediately
536 */
tcp_fastopen_defer_connect(struct sock * sk,int * err)537 bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
538 {
539 struct tcp_fastopen_cookie cookie = { .len = 0 };
540 struct tcp_sock *tp = tcp_sk(sk);
541 u16 mss;
542
543 if (tp->fastopen_connect && !tp->fastopen_req) {
544 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
545 inet_set_bit(DEFER_CONNECT, sk);
546 return true;
547 }
548
549 /* Alloc fastopen_req in order for FO option to be included
550 * in SYN
551 */
552 tp->fastopen_req = kzalloc_obj(*tp->fastopen_req,
553 sk->sk_allocation);
554 if (tp->fastopen_req)
555 tp->fastopen_req->cookie = cookie;
556 else
557 *err = -ENOBUFS;
558 }
559 return false;
560 }
561 EXPORT_IPV6_MOD(tcp_fastopen_defer_connect);
562
563 /*
564 * The following code block is to deal with middle box issues with TFO:
565 * Middlebox firewall issues can potentially cause server's data being
566 * blackholed after a successful 3WHS using TFO.
567 * The proposed solution is to disable active TFO globally under the
568 * following circumstances:
569 * 1. client side TFO socket receives out of order FIN
570 * 2. client side TFO socket receives out of order RST
571 * 3. client side TFO socket has timed out three times consecutively during
572 * or after handshake
573 * We disable active side TFO globally for 1hr at first. Then if it
574 * happens again, we disable it for 2h, then 4h, 8h, ...
575 * And we reset the timeout back to 1hr when we see a successful active
576 * TFO connection with data exchanges.
577 */
578
579 /* Disable active TFO and record current jiffies and
580 * tfo_active_disable_times
581 */
tcp_fastopen_active_disable(struct sock * sk)582 void tcp_fastopen_active_disable(struct sock *sk)
583 {
584 struct net *net = sock_net(sk);
585
586 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
587 return;
588
589 /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
590 WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
591
592 /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
593 * We want net->ipv4.tfo_active_disable_stamp to be updated first.
594 */
595 smp_mb__before_atomic();
596 atomic_inc(&net->ipv4.tfo_active_disable_times);
597
598 NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
599 }
600
601 /* Calculate timeout for tfo active disable
602 * Return true if we are still in the active TFO disable period
603 * Return false if timeout already expired and we should use active TFO
604 */
tcp_fastopen_active_should_disable(struct sock * sk)605 bool tcp_fastopen_active_should_disable(struct sock *sk)
606 {
607 unsigned int tfo_bh_timeout =
608 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
609 unsigned long timeout;
610 int tfo_da_times;
611 int multiplier;
612
613 if (!tfo_bh_timeout)
614 return false;
615
616 tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
617 if (!tfo_da_times)
618 return false;
619
620 /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
621 smp_rmb();
622
623 /* Limit timeout to max: 2^6 * initial timeout */
624 multiplier = 1 << min(tfo_da_times - 1, 6);
625
626 /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
627 timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
628 multiplier * tfo_bh_timeout * HZ;
629 if (time_before(jiffies, timeout))
630 return true;
631
632 /* Mark check bit so we can check for successful active TFO
633 * condition and reset tfo_active_disable_times
634 */
635 tcp_sk(sk)->syn_fastopen_ch = 1;
636 return false;
637 }
638
639 /* Disable active TFO if FIN is the only packet in the ofo queue
640 * and no data is received.
641 * Also check if we can reset tfo_active_disable_times if data is
642 * received successfully on a marked active TFO sockets opened on
643 * a non-loopback interface
644 */
tcp_fastopen_active_disable_ofo_check(struct sock * sk)645 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
646 {
647 struct tcp_sock *tp = tcp_sk(sk);
648 struct net_device *dev;
649 struct dst_entry *dst;
650 struct sk_buff *skb;
651
652 if (!tp->syn_fastopen)
653 return;
654
655 if (!tp->data_segs_in) {
656 skb = skb_rb_first(&tp->out_of_order_queue);
657 if (skb && !skb_rb_next(skb)) {
658 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
659 tcp_fastopen_active_disable(sk);
660 return;
661 }
662 }
663 } else if (tp->syn_fastopen_ch &&
664 atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
665 rcu_read_lock();
666 dst = __sk_dst_get(sk);
667 dev = dst ? dst_dev_rcu(dst) : NULL;
668 if (!(dev && (dev->flags & IFF_LOOPBACK)))
669 atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
670 rcu_read_unlock();
671 }
672 }
673
tcp_fastopen_active_detect_blackhole(struct sock * sk,bool expired)674 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
675 {
676 u32 timeouts = inet_csk(sk)->icsk_retransmits;
677 struct tcp_sock *tp = tcp_sk(sk);
678
679 /* Broken middle-boxes may black-hole Fast Open connection during or
680 * even after the handshake. Be extremely conservative and pause
681 * Fast Open globally after hitting the third consecutive timeout or
682 * exceeding the configured timeout limit.
683 */
684 if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
685 (timeouts == 2 || (timeouts < 2 && expired))) {
686 tcp_fastopen_active_disable(sk);
687 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
688 }
689 }
690