xref: /linux/net/ipv4/tcp_fastopen.c (revision 13091aa30535b719e269f20a7bc34002bf5afae5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/crypto.h>
3 #include <linux/err.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/list.h>
7 #include <linux/tcp.h>
8 #include <linux/rcupdate.h>
9 #include <linux/rculist.h>
10 #include <linux/siphash.h>
11 #include <net/inetpeer.h>
12 #include <net/tcp.h>
13 
14 void tcp_fastopen_init_key_once(struct net *net)
15 {
16 	u8 key[TCP_FASTOPEN_KEY_LENGTH];
17 	struct tcp_fastopen_context *ctxt;
18 
19 	rcu_read_lock();
20 	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
21 	if (ctxt) {
22 		rcu_read_unlock();
23 		return;
24 	}
25 	rcu_read_unlock();
26 
27 	/* tcp_fastopen_reset_cipher publishes the new context
28 	 * atomically, so we allow this race happening here.
29 	 *
30 	 * All call sites of tcp_fastopen_cookie_gen also check
31 	 * for a valid cookie, so this is an acceptable risk.
32 	 */
33 	get_random_bytes(key, sizeof(key));
34 	tcp_fastopen_reset_cipher(net, NULL, key, NULL, sizeof(key));
35 }
36 
37 static void tcp_fastopen_ctx_free(struct rcu_head *head)
38 {
39 	struct tcp_fastopen_context *ctx =
40 	    container_of(head, struct tcp_fastopen_context, rcu);
41 
42 	kzfree(ctx);
43 }
44 
45 void tcp_fastopen_destroy_cipher(struct sock *sk)
46 {
47 	struct tcp_fastopen_context *ctx;
48 
49 	ctx = rcu_dereference_protected(
50 			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
51 	if (ctx)
52 		call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
53 }
54 
55 void tcp_fastopen_ctx_destroy(struct net *net)
56 {
57 	struct tcp_fastopen_context *ctxt;
58 
59 	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
60 
61 	ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
62 				lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
63 	rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
64 	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
65 
66 	if (ctxt)
67 		call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
68 }
69 
70 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
71 			      void *primary_key, void *backup_key,
72 			      unsigned int len)
73 {
74 	struct tcp_fastopen_context *ctx, *octx;
75 	struct fastopen_queue *q;
76 	int err = 0;
77 
78 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
79 	if (!ctx) {
80 		err = -ENOMEM;
81 		goto out;
82 	}
83 
84 	memcpy(ctx->key[0], primary_key, len);
85 	if (backup_key) {
86 		memcpy(ctx->key[1], backup_key, len);
87 		ctx->num = 2;
88 	} else {
89 		ctx->num = 1;
90 	}
91 
92 	spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
93 	if (sk) {
94 		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
95 		octx = rcu_dereference_protected(q->ctx,
96 			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
97 		rcu_assign_pointer(q->ctx, ctx);
98 	} else {
99 		octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
100 			lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
101 		rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
102 	}
103 	spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
104 
105 	if (octx)
106 		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
107 out:
108 	return err;
109 }
110 
111 static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
112 					     struct sk_buff *syn,
113 					     const u8 *key,
114 					     struct tcp_fastopen_cookie *foc)
115 {
116 	BUILD_BUG_ON(TCP_FASTOPEN_KEY_LENGTH != sizeof(siphash_key_t));
117 	BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
118 
119 	if (req->rsk_ops->family == AF_INET) {
120 		const struct iphdr *iph = ip_hdr(syn);
121 
122 		foc->val[0] = siphash(&iph->saddr,
123 				      sizeof(iph->saddr) +
124 				      sizeof(iph->daddr),
125 				      (const siphash_key_t *)key);
126 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
127 		return true;
128 	}
129 #if IS_ENABLED(CONFIG_IPV6)
130 	if (req->rsk_ops->family == AF_INET6) {
131 		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
132 
133 		foc->val[0] = siphash(&ip6h->saddr,
134 				      sizeof(ip6h->saddr) +
135 				      sizeof(ip6h->daddr),
136 				      (const siphash_key_t *)key);
137 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
138 		return true;
139 	}
140 #endif
141 	return false;
142 }
143 
144 /* Generate the fastopen cookie by applying SipHash to both the source and
145  * destination addresses.
146  */
147 static void tcp_fastopen_cookie_gen(struct sock *sk,
148 				    struct request_sock *req,
149 				    struct sk_buff *syn,
150 				    struct tcp_fastopen_cookie *foc)
151 {
152 	struct tcp_fastopen_context *ctx;
153 
154 	rcu_read_lock();
155 	ctx = tcp_fastopen_get_ctx(sk);
156 	if (ctx)
157 		__tcp_fastopen_cookie_gen_cipher(req, syn, ctx->key[0], foc);
158 	rcu_read_unlock();
159 }
160 
161 /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
162  * queue this additional data / FIN.
163  */
164 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
165 {
166 	struct tcp_sock *tp = tcp_sk(sk);
167 
168 	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
169 		return;
170 
171 	skb = skb_clone(skb, GFP_ATOMIC);
172 	if (!skb)
173 		return;
174 
175 	skb_dst_drop(skb);
176 	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
177 	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
178 	 * to avoid double counting.  Also, tcp_segs_in() expects
179 	 * skb->len to include the tcp_hdrlen.  Hence, it should
180 	 * be called before __skb_pull().
181 	 */
182 	tp->segs_in = 0;
183 	tcp_segs_in(tp, skb);
184 	__skb_pull(skb, tcp_hdrlen(skb));
185 	sk_forced_mem_schedule(sk, skb->truesize);
186 	skb_set_owner_r(skb, sk);
187 
188 	TCP_SKB_CB(skb)->seq++;
189 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
190 
191 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
192 	__skb_queue_tail(&sk->sk_receive_queue, skb);
193 	tp->syn_data_acked = 1;
194 
195 	/* u64_stats_update_begin(&tp->syncp) not needed here,
196 	 * as we certainly are not changing upper 32bit value (0)
197 	 */
198 	tp->bytes_received = skb->len;
199 
200 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
201 		tcp_fin(sk);
202 }
203 
204 /* returns 0 - no key match, 1 for primary, 2 for backup */
205 static int tcp_fastopen_cookie_gen_check(struct sock *sk,
206 					 struct request_sock *req,
207 					 struct sk_buff *syn,
208 					 struct tcp_fastopen_cookie *orig,
209 					 struct tcp_fastopen_cookie *valid_foc)
210 {
211 	struct tcp_fastopen_cookie search_foc = { .len = -1 };
212 	struct tcp_fastopen_cookie *foc = valid_foc;
213 	struct tcp_fastopen_context *ctx;
214 	int i, ret = 0;
215 
216 	rcu_read_lock();
217 	ctx = tcp_fastopen_get_ctx(sk);
218 	if (!ctx)
219 		goto out;
220 	for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
221 		__tcp_fastopen_cookie_gen_cipher(req, syn, ctx->key[i], foc);
222 		if (tcp_fastopen_cookie_match(foc, orig)) {
223 			ret = i + 1;
224 			goto out;
225 		}
226 		foc = &search_foc;
227 	}
228 out:
229 	rcu_read_unlock();
230 	return ret;
231 }
232 
233 static struct sock *tcp_fastopen_create_child(struct sock *sk,
234 					      struct sk_buff *skb,
235 					      struct request_sock *req)
236 {
237 	struct tcp_sock *tp;
238 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
239 	struct sock *child;
240 	bool own_req;
241 
242 	req->num_retrans = 0;
243 	req->num_timeout = 0;
244 	req->sk = NULL;
245 
246 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
247 							 NULL, &own_req);
248 	if (!child)
249 		return NULL;
250 
251 	spin_lock(&queue->fastopenq.lock);
252 	queue->fastopenq.qlen++;
253 	spin_unlock(&queue->fastopenq.lock);
254 
255 	/* Initialize the child socket. Have to fix some values to take
256 	 * into account the child is a Fast Open socket and is created
257 	 * only out of the bits carried in the SYN packet.
258 	 */
259 	tp = tcp_sk(child);
260 
261 	tp->fastopen_rsk = req;
262 	tcp_rsk(req)->tfo_listener = true;
263 
264 	/* RFC1323: The window in SYN & SYN/ACK segments is never
265 	 * scaled. So correct it appropriately.
266 	 */
267 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
268 	tp->max_window = tp->snd_wnd;
269 
270 	/* Activate the retrans timer so that SYNACK can be retransmitted.
271 	 * The request socket is not added to the ehash
272 	 * because it's been added to the accept queue directly.
273 	 */
274 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
275 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
276 
277 	refcount_set(&req->rsk_refcnt, 2);
278 
279 	/* Now finish processing the fastopen child socket. */
280 	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
281 
282 	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
283 
284 	tcp_fastopen_add_skb(child, skb);
285 
286 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
287 	tp->rcv_wup = tp->rcv_nxt;
288 	/* tcp_conn_request() is sending the SYNACK,
289 	 * and queues the child into listener accept queue.
290 	 */
291 	return child;
292 }
293 
294 static bool tcp_fastopen_queue_check(struct sock *sk)
295 {
296 	struct fastopen_queue *fastopenq;
297 
298 	/* Make sure the listener has enabled fastopen, and we don't
299 	 * exceed the max # of pending TFO requests allowed before trying
300 	 * to validating the cookie in order to avoid burning CPU cycles
301 	 * unnecessarily.
302 	 *
303 	 * XXX (TFO) - The implication of checking the max_qlen before
304 	 * processing a cookie request is that clients can't differentiate
305 	 * between qlen overflow causing Fast Open to be disabled
306 	 * temporarily vs a server not supporting Fast Open at all.
307 	 */
308 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
309 	if (fastopenq->max_qlen == 0)
310 		return false;
311 
312 	if (fastopenq->qlen >= fastopenq->max_qlen) {
313 		struct request_sock *req1;
314 		spin_lock(&fastopenq->lock);
315 		req1 = fastopenq->rskq_rst_head;
316 		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
317 			__NET_INC_STATS(sock_net(sk),
318 					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
319 			spin_unlock(&fastopenq->lock);
320 			return false;
321 		}
322 		fastopenq->rskq_rst_head = req1->dl_next;
323 		fastopenq->qlen--;
324 		spin_unlock(&fastopenq->lock);
325 		reqsk_put(req1);
326 	}
327 	return true;
328 }
329 
330 static bool tcp_fastopen_no_cookie(const struct sock *sk,
331 				   const struct dst_entry *dst,
332 				   int flag)
333 {
334 	return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
335 	       tcp_sk(sk)->fastopen_no_cookie ||
336 	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
337 }
338 
339 /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
340  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
341  * cookie request (foc->len == 0).
342  */
343 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
344 			      struct request_sock *req,
345 			      struct tcp_fastopen_cookie *foc,
346 			      const struct dst_entry *dst)
347 {
348 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
349 	int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
350 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
351 	struct sock *child;
352 	int ret = 0;
353 
354 	if (foc->len == 0) /* Client requests a cookie */
355 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
356 
357 	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
358 	      (syn_data || foc->len >= 0) &&
359 	      tcp_fastopen_queue_check(sk))) {
360 		foc->len = -1;
361 		return NULL;
362 	}
363 
364 	if (syn_data &&
365 	    tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
366 		goto fastopen;
367 
368 	if (foc->len == 0) {
369 		/* Client requests a cookie. */
370 		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
371 	} else if (foc->len > 0) {
372 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
373 						    &valid_foc);
374 		if (!ret) {
375 			NET_INC_STATS(sock_net(sk),
376 				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
377 		} else {
378 			/* Cookie is valid. Create a (full) child socket to
379 			 * accept the data in SYN before returning a SYN-ACK to
380 			 * ack the data. If we fail to create the socket, fall
381 			 * back and ack the ISN only but includes the same
382 			 * cookie.
383 			 *
384 			 * Note: Data-less SYN with valid cookie is allowed to
385 			 * send data in SYN_RECV state.
386 			 */
387 fastopen:
388 			child = tcp_fastopen_create_child(sk, skb, req);
389 			if (child) {
390 				if (ret == 2) {
391 					valid_foc.exp = foc->exp;
392 					*foc = valid_foc;
393 					NET_INC_STATS(sock_net(sk),
394 						      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
395 				} else {
396 					foc->len = -1;
397 				}
398 				NET_INC_STATS(sock_net(sk),
399 					      LINUX_MIB_TCPFASTOPENPASSIVE);
400 				return child;
401 			}
402 			NET_INC_STATS(sock_net(sk),
403 				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
404 		}
405 	}
406 	valid_foc.exp = foc->exp;
407 	*foc = valid_foc;
408 	return NULL;
409 }
410 
411 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
412 			       struct tcp_fastopen_cookie *cookie)
413 {
414 	const struct dst_entry *dst;
415 
416 	tcp_fastopen_cache_get(sk, mss, cookie);
417 
418 	/* Firewall blackhole issue check */
419 	if (tcp_fastopen_active_should_disable(sk)) {
420 		cookie->len = -1;
421 		return false;
422 	}
423 
424 	dst = __sk_dst_get(sk);
425 
426 	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
427 		cookie->len = -1;
428 		return true;
429 	}
430 	return cookie->len > 0;
431 }
432 
433 /* This function checks if we want to defer sending SYN until the first
434  * write().  We defer under the following conditions:
435  * 1. fastopen_connect sockopt is set
436  * 2. we have a valid cookie
437  * Return value: return true if we want to defer until application writes data
438  *               return false if we want to send out SYN immediately
439  */
440 bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
441 {
442 	struct tcp_fastopen_cookie cookie = { .len = 0 };
443 	struct tcp_sock *tp = tcp_sk(sk);
444 	u16 mss;
445 
446 	if (tp->fastopen_connect && !tp->fastopen_req) {
447 		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
448 			inet_sk(sk)->defer_connect = 1;
449 			return true;
450 		}
451 
452 		/* Alloc fastopen_req in order for FO option to be included
453 		 * in SYN
454 		 */
455 		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
456 					   sk->sk_allocation);
457 		if (tp->fastopen_req)
458 			tp->fastopen_req->cookie = cookie;
459 		else
460 			*err = -ENOBUFS;
461 	}
462 	return false;
463 }
464 EXPORT_SYMBOL(tcp_fastopen_defer_connect);
465 
466 /*
467  * The following code block is to deal with middle box issues with TFO:
468  * Middlebox firewall issues can potentially cause server's data being
469  * blackholed after a successful 3WHS using TFO.
470  * The proposed solution is to disable active TFO globally under the
471  * following circumstances:
472  *   1. client side TFO socket receives out of order FIN
473  *   2. client side TFO socket receives out of order RST
474  *   3. client side TFO socket has timed out three times consecutively during
475  *      or after handshake
476  * We disable active side TFO globally for 1hr at first. Then if it
477  * happens again, we disable it for 2h, then 4h, 8h, ...
478  * And we reset the timeout back to 1hr when we see a successful active
479  * TFO connection with data exchanges.
480  */
481 
482 /* Disable active TFO and record current jiffies and
483  * tfo_active_disable_times
484  */
485 void tcp_fastopen_active_disable(struct sock *sk)
486 {
487 	struct net *net = sock_net(sk);
488 
489 	atomic_inc(&net->ipv4.tfo_active_disable_times);
490 	net->ipv4.tfo_active_disable_stamp = jiffies;
491 	NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
492 }
493 
494 /* Calculate timeout for tfo active disable
495  * Return true if we are still in the active TFO disable period
496  * Return false if timeout already expired and we should use active TFO
497  */
498 bool tcp_fastopen_active_should_disable(struct sock *sk)
499 {
500 	unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
501 	int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
502 	unsigned long timeout;
503 	int multiplier;
504 
505 	if (!tfo_da_times)
506 		return false;
507 
508 	/* Limit timout to max: 2^6 * initial timeout */
509 	multiplier = 1 << min(tfo_da_times - 1, 6);
510 	timeout = multiplier * tfo_bh_timeout * HZ;
511 	if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
512 		return true;
513 
514 	/* Mark check bit so we can check for successful active TFO
515 	 * condition and reset tfo_active_disable_times
516 	 */
517 	tcp_sk(sk)->syn_fastopen_ch = 1;
518 	return false;
519 }
520 
521 /* Disable active TFO if FIN is the only packet in the ofo queue
522  * and no data is received.
523  * Also check if we can reset tfo_active_disable_times if data is
524  * received successfully on a marked active TFO sockets opened on
525  * a non-loopback interface
526  */
527 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
528 {
529 	struct tcp_sock *tp = tcp_sk(sk);
530 	struct dst_entry *dst;
531 	struct sk_buff *skb;
532 
533 	if (!tp->syn_fastopen)
534 		return;
535 
536 	if (!tp->data_segs_in) {
537 		skb = skb_rb_first(&tp->out_of_order_queue);
538 		if (skb && !skb_rb_next(skb)) {
539 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
540 				tcp_fastopen_active_disable(sk);
541 				return;
542 			}
543 		}
544 	} else if (tp->syn_fastopen_ch &&
545 		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
546 		dst = sk_dst_get(sk);
547 		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
548 			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
549 		dst_release(dst);
550 	}
551 }
552 
553 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
554 {
555 	u32 timeouts = inet_csk(sk)->icsk_retransmits;
556 	struct tcp_sock *tp = tcp_sk(sk);
557 
558 	/* Broken middle-boxes may black-hole Fast Open connection during or
559 	 * even after the handshake. Be extremely conservative and pause
560 	 * Fast Open globally after hitting the third consecutive timeout or
561 	 * exceeding the configured timeout limit.
562 	 */
563 	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
564 	    (timeouts == 2 || (timeouts < 2 && expired))) {
565 		tcp_fastopen_active_disable(sk);
566 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
567 	}
568 }
569