xref: /linux/net/ipv4/tcp_fastopen.c (revision 021266ec640c7a4527e6cd4b7349a512b351de1d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
22100c8d2SYuchung Cheng #include <linux/kernel.h>
310467163SJerry Chu #include <linux/tcp.h>
410467163SJerry Chu #include <linux/rcupdate.h>
510467163SJerry Chu #include <net/tcp.h>
62100c8d2SYuchung Cheng 
743713848SHaishuang Yan void tcp_fastopen_init_key_once(struct net *net)
8222e83d2SHannes Frederic Sowa {
943713848SHaishuang Yan 	u8 key[TCP_FASTOPEN_KEY_LENGTH];
1043713848SHaishuang Yan 	struct tcp_fastopen_context *ctxt;
1143713848SHaishuang Yan 
1243713848SHaishuang Yan 	rcu_read_lock();
1343713848SHaishuang Yan 	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
1443713848SHaishuang Yan 	if (ctxt) {
1543713848SHaishuang Yan 		rcu_read_unlock();
1643713848SHaishuang Yan 		return;
1743713848SHaishuang Yan 	}
1843713848SHaishuang Yan 	rcu_read_unlock();
19222e83d2SHannes Frederic Sowa 
20222e83d2SHannes Frederic Sowa 	/* tcp_fastopen_reset_cipher publishes the new context
21222e83d2SHannes Frederic Sowa 	 * atomically, so we allow this race happening here.
22222e83d2SHannes Frederic Sowa 	 *
23222e83d2SHannes Frederic Sowa 	 * All call sites of tcp_fastopen_cookie_gen also check
24222e83d2SHannes Frederic Sowa 	 * for a valid cookie, so this is an acceptable risk.
25222e83d2SHannes Frederic Sowa 	 */
2643713848SHaishuang Yan 	get_random_bytes(key, sizeof(key));
27438ac880SArd Biesheuvel 	tcp_fastopen_reset_cipher(net, NULL, key, NULL);
28222e83d2SHannes Frederic Sowa }
29222e83d2SHannes Frederic Sowa 
3010467163SJerry Chu static void tcp_fastopen_ctx_free(struct rcu_head *head)
3110467163SJerry Chu {
3210467163SJerry Chu 	struct tcp_fastopen_context *ctx =
3310467163SJerry Chu 	    container_of(head, struct tcp_fastopen_context, rcu);
349092a76dSJason Baron 
35453431a5SWaiman Long 	kfree_sensitive(ctx);
3610467163SJerry Chu }
3710467163SJerry Chu 
381fba70e5SYuchung Cheng void tcp_fastopen_destroy_cipher(struct sock *sk)
391fba70e5SYuchung Cheng {
401fba70e5SYuchung Cheng 	struct tcp_fastopen_context *ctx;
411fba70e5SYuchung Cheng 
421fba70e5SYuchung Cheng 	ctx = rcu_dereference_protected(
431fba70e5SYuchung Cheng 			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
441fba70e5SYuchung Cheng 	if (ctx)
451fba70e5SYuchung Cheng 		call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
461fba70e5SYuchung Cheng }
471fba70e5SYuchung Cheng 
4843713848SHaishuang Yan void tcp_fastopen_ctx_destroy(struct net *net)
4943713848SHaishuang Yan {
5043713848SHaishuang Yan 	struct tcp_fastopen_context *ctxt;
5143713848SHaishuang Yan 
52e93abb84SEric Dumazet 	ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
5343713848SHaishuang Yan 
5443713848SHaishuang Yan 	if (ctxt)
5543713848SHaishuang Yan 		call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
5643713848SHaishuang Yan }
5743713848SHaishuang Yan 
581fba70e5SYuchung Cheng int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
59438ac880SArd Biesheuvel 			      void *primary_key, void *backup_key)
6010467163SJerry Chu {
6110467163SJerry Chu 	struct tcp_fastopen_context *ctx, *octx;
621fba70e5SYuchung Cheng 	struct fastopen_queue *q;
639092a76dSJason Baron 	int err = 0;
6410467163SJerry Chu 
65c681edaeSArd Biesheuvel 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
66c681edaeSArd Biesheuvel 	if (!ctx) {
67c681edaeSArd Biesheuvel 		err = -ENOMEM;
689092a76dSJason Baron 		goto out;
6910467163SJerry Chu 	}
70c681edaeSArd Biesheuvel 
71438ac880SArd Biesheuvel 	ctx->key[0].key[0] = get_unaligned_le64(primary_key);
72438ac880SArd Biesheuvel 	ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
73c681edaeSArd Biesheuvel 	if (backup_key) {
74438ac880SArd Biesheuvel 		ctx->key[1].key[0] = get_unaligned_le64(backup_key);
75438ac880SArd Biesheuvel 		ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
76c681edaeSArd Biesheuvel 		ctx->num = 2;
77c681edaeSArd Biesheuvel 	} else {
78c681edaeSArd Biesheuvel 		ctx->num = 1;
79c681edaeSArd Biesheuvel 	}
80c681edaeSArd Biesheuvel 
811fba70e5SYuchung Cheng 	if (sk) {
821fba70e5SYuchung Cheng 		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
83e93abb84SEric Dumazet 		octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
841fba70e5SYuchung Cheng 	} else {
85e93abb84SEric Dumazet 		octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
861fba70e5SYuchung Cheng 	}
8710467163SJerry Chu 
8810467163SJerry Chu 	if (octx)
8910467163SJerry Chu 		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
909092a76dSJason Baron out:
9110467163SJerry Chu 	return err;
9210467163SJerry Chu }
9310467163SJerry Chu 
94f19008e6SJason Baron int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
95f19008e6SJason Baron 			    u64 *key)
96f19008e6SJason Baron {
97f19008e6SJason Baron 	struct tcp_fastopen_context *ctx;
98f19008e6SJason Baron 	int n_keys = 0, i;
99f19008e6SJason Baron 
100f19008e6SJason Baron 	rcu_read_lock();
101f19008e6SJason Baron 	if (icsk)
102f19008e6SJason Baron 		ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
103f19008e6SJason Baron 	else
104f19008e6SJason Baron 		ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
105f19008e6SJason Baron 	if (ctx) {
106f19008e6SJason Baron 		n_keys = tcp_fastopen_context_len(ctx);
107f19008e6SJason Baron 		for (i = 0; i < n_keys; i++) {
108f19008e6SJason Baron 			put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
109f19008e6SJason Baron 			put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
110f19008e6SJason Baron 		}
111f19008e6SJason Baron 	}
112f19008e6SJason Baron 	rcu_read_unlock();
113f19008e6SJason Baron 
114f19008e6SJason Baron 	return n_keys;
115f19008e6SJason Baron }
116f19008e6SJason Baron 
117483642e5SChristoph Paasch static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
118483642e5SChristoph Paasch 					     struct sk_buff *syn,
119438ac880SArd Biesheuvel 					     const siphash_key_t *key,
120149479d0SYuchung Cheng 					     struct tcp_fastopen_cookie *foc)
12110467163SJerry Chu {
122c681edaeSArd Biesheuvel 	BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
123c681edaeSArd Biesheuvel 
124483642e5SChristoph Paasch 	if (req->rsk_ops->family == AF_INET) {
125483642e5SChristoph Paasch 		const struct iphdr *iph = ip_hdr(syn);
12610467163SJerry Chu 
127438ac880SArd Biesheuvel 		foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
128c681edaeSArd Biesheuvel 					  sizeof(iph->saddr) +
129c681edaeSArd Biesheuvel 					  sizeof(iph->daddr),
130438ac880SArd Biesheuvel 					  key));
13110467163SJerry Chu 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
132483642e5SChristoph Paasch 		return true;
13310467163SJerry Chu 	}
134483642e5SChristoph Paasch #if IS_ENABLED(CONFIG_IPV6)
135483642e5SChristoph Paasch 	if (req->rsk_ops->family == AF_INET6) {
136483642e5SChristoph Paasch 		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
137483642e5SChristoph Paasch 
138438ac880SArd Biesheuvel 		foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
139c681edaeSArd Biesheuvel 					  sizeof(ip6h->saddr) +
140c681edaeSArd Biesheuvel 					  sizeof(ip6h->daddr),
141438ac880SArd Biesheuvel 					  key));
142483642e5SChristoph Paasch 		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
143483642e5SChristoph Paasch 		return true;
144483642e5SChristoph Paasch 	}
145483642e5SChristoph Paasch #endif
146483642e5SChristoph Paasch 	return false;
1473a19ce0eSDaniel Lee }
1483a19ce0eSDaniel Lee 
149c681edaeSArd Biesheuvel /* Generate the fastopen cookie by applying SipHash to both the source and
150c681edaeSArd Biesheuvel  * destination addresses.
1513a19ce0eSDaniel Lee  */
1529092a76dSJason Baron static void tcp_fastopen_cookie_gen(struct sock *sk,
15343713848SHaishuang Yan 				    struct request_sock *req,
1543a19ce0eSDaniel Lee 				    struct sk_buff *syn,
1553a19ce0eSDaniel Lee 				    struct tcp_fastopen_cookie *foc)
1563a19ce0eSDaniel Lee {
157483642e5SChristoph Paasch 	struct tcp_fastopen_context *ctx;
1583a19ce0eSDaniel Lee 
159483642e5SChristoph Paasch 	rcu_read_lock();
1609092a76dSJason Baron 	ctx = tcp_fastopen_get_ctx(sk);
161483642e5SChristoph Paasch 	if (ctx)
162438ac880SArd Biesheuvel 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
163483642e5SChristoph Paasch 	rcu_read_unlock();
16410467163SJerry Chu }
1655b7ed089SYuchung Cheng 
16661d2bcaeSEric Dumazet /* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
16761d2bcaeSEric Dumazet  * queue this additional data / FIN.
16861d2bcaeSEric Dumazet  */
16961d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
17061d2bcaeSEric Dumazet {
17161d2bcaeSEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
17261d2bcaeSEric Dumazet 
17361d2bcaeSEric Dumazet 	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
17461d2bcaeSEric Dumazet 		return;
17561d2bcaeSEric Dumazet 
17661d2bcaeSEric Dumazet 	skb = skb_clone(skb, GFP_ATOMIC);
17761d2bcaeSEric Dumazet 	if (!skb)
17861d2bcaeSEric Dumazet 		return;
17961d2bcaeSEric Dumazet 
18061d2bcaeSEric Dumazet 	skb_dst_drop(skb);
181a44d6eacSMartin KaFai Lau 	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
182a44d6eacSMartin KaFai Lau 	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
183a44d6eacSMartin KaFai Lau 	 * to avoid double counting.  Also, tcp_segs_in() expects
184a44d6eacSMartin KaFai Lau 	 * skb->len to include the tcp_hdrlen.  Hence, it should
185a44d6eacSMartin KaFai Lau 	 * be called before __skb_pull().
186a44d6eacSMartin KaFai Lau 	 */
187a44d6eacSMartin KaFai Lau 	tp->segs_in = 0;
188a44d6eacSMartin KaFai Lau 	tcp_segs_in(tp, skb);
18961d2bcaeSEric Dumazet 	__skb_pull(skb, tcp_hdrlen(skb));
19076061f63SEric Dumazet 	sk_forced_mem_schedule(sk, skb->truesize);
19161d2bcaeSEric Dumazet 	skb_set_owner_r(skb, sk);
19261d2bcaeSEric Dumazet 
1939d691539SEric Dumazet 	TCP_SKB_CB(skb)->seq++;
1949d691539SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
1959d691539SEric Dumazet 
19661d2bcaeSEric Dumazet 	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
19761d2bcaeSEric Dumazet 	__skb_queue_tail(&sk->sk_receive_queue, skb);
19861d2bcaeSEric Dumazet 	tp->syn_data_acked = 1;
19961d2bcaeSEric Dumazet 
20061d2bcaeSEric Dumazet 	/* u64_stats_update_begin(&tp->syncp) not needed here,
20161d2bcaeSEric Dumazet 	 * as we certainly are not changing upper 32bit value (0)
20261d2bcaeSEric Dumazet 	 */
20361d2bcaeSEric Dumazet 	tp->bytes_received = skb->len;
204e3e17b77SEric Dumazet 
205e3e17b77SEric Dumazet 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
206e3e17b77SEric Dumazet 		tcp_fin(sk);
20761d2bcaeSEric Dumazet }
20861d2bcaeSEric Dumazet 
2099092a76dSJason Baron /* returns 0 - no key match, 1 for primary, 2 for backup */
2109092a76dSJason Baron static int tcp_fastopen_cookie_gen_check(struct sock *sk,
2119092a76dSJason Baron 					 struct request_sock *req,
2129092a76dSJason Baron 					 struct sk_buff *syn,
2139092a76dSJason Baron 					 struct tcp_fastopen_cookie *orig,
2149092a76dSJason Baron 					 struct tcp_fastopen_cookie *valid_foc)
2159092a76dSJason Baron {
2169092a76dSJason Baron 	struct tcp_fastopen_cookie search_foc = { .len = -1 };
2179092a76dSJason Baron 	struct tcp_fastopen_cookie *foc = valid_foc;
2189092a76dSJason Baron 	struct tcp_fastopen_context *ctx;
2199092a76dSJason Baron 	int i, ret = 0;
2209092a76dSJason Baron 
2219092a76dSJason Baron 	rcu_read_lock();
2229092a76dSJason Baron 	ctx = tcp_fastopen_get_ctx(sk);
2239092a76dSJason Baron 	if (!ctx)
2249092a76dSJason Baron 		goto out;
2259092a76dSJason Baron 	for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
226438ac880SArd Biesheuvel 		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
2279092a76dSJason Baron 		if (tcp_fastopen_cookie_match(foc, orig)) {
2289092a76dSJason Baron 			ret = i + 1;
2299092a76dSJason Baron 			goto out;
2309092a76dSJason Baron 		}
2319092a76dSJason Baron 		foc = &search_foc;
2329092a76dSJason Baron 	}
2339092a76dSJason Baron out:
2349092a76dSJason Baron 	rcu_read_unlock();
2359092a76dSJason Baron 	return ret;
2369092a76dSJason Baron }
2379092a76dSJason Baron 
2387c85af88SEric Dumazet static struct sock *tcp_fastopen_create_child(struct sock *sk,
2395b7ed089SYuchung Cheng 					      struct sk_buff *skb,
2405b7ed089SYuchung Cheng 					      struct request_sock *req)
2415b7ed089SYuchung Cheng {
24217846376SDave Jones 	struct tcp_sock *tp;
2435b7ed089SYuchung Cheng 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
2445b7ed089SYuchung Cheng 	struct sock *child;
2455e0724d0SEric Dumazet 	bool own_req;
2465b7ed089SYuchung Cheng 
2475e0724d0SEric Dumazet 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
2485e0724d0SEric Dumazet 							 NULL, &own_req);
24951456b29SIan Morris 	if (!child)
2507c85af88SEric Dumazet 		return NULL;
2515b7ed089SYuchung Cheng 
2520536fcc0SEric Dumazet 	spin_lock(&queue->fastopenq.lock);
2530536fcc0SEric Dumazet 	queue->fastopenq.qlen++;
2540536fcc0SEric Dumazet 	spin_unlock(&queue->fastopenq.lock);
2555b7ed089SYuchung Cheng 
2565b7ed089SYuchung Cheng 	/* Initialize the child socket. Have to fix some values to take
2575b7ed089SYuchung Cheng 	 * into account the child is a Fast Open socket and is created
2585b7ed089SYuchung Cheng 	 * only out of the bits carried in the SYN packet.
2595b7ed089SYuchung Cheng 	 */
2605b7ed089SYuchung Cheng 	tp = tcp_sk(child);
2615b7ed089SYuchung Cheng 
262d983ea6fSEric Dumazet 	rcu_assign_pointer(tp->fastopen_rsk, req);
2639439ce00SEric Dumazet 	tcp_rsk(req)->tfo_listener = true;
2645b7ed089SYuchung Cheng 
2655b7ed089SYuchung Cheng 	/* RFC1323: The window in SYN & SYN/ACK segments is never
2665b7ed089SYuchung Cheng 	 * scaled. So correct it appropriately.
2675b7ed089SYuchung Cheng 	 */
2685b7ed089SYuchung Cheng 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
2690dbd7ff3SAlexey Kodanev 	tp->max_window = tp->snd_wnd;
2705b7ed089SYuchung Cheng 
2715b7ed089SYuchung Cheng 	/* Activate the retrans timer so that SYNACK can be retransmitted.
272ca6fb065SEric Dumazet 	 * The request socket is not added to the ehash
2735b7ed089SYuchung Cheng 	 * because it's been added to the accept queue directly.
2745b7ed089SYuchung Cheng 	 */
2755b7ed089SYuchung Cheng 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
2765b7ed089SYuchung Cheng 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
2775b7ed089SYuchung Cheng 
27841c6d650SReshetova, Elena 	refcount_set(&req->rsk_refcnt, 2);
2795b7ed089SYuchung Cheng 
2805b7ed089SYuchung Cheng 	/* Now finish processing the fastopen child socket. */
28172be0fe6SMartin KaFai Lau 	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
2825b7ed089SYuchung Cheng 
28361d2bcaeSEric Dumazet 	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
284ba34e6d9SEric Dumazet 
28561d2bcaeSEric Dumazet 	tcp_fastopen_add_skb(child, skb);
286d654976cSEric Dumazet 
28761d2bcaeSEric Dumazet 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
28828b346cbSNeal Cardwell 	tp->rcv_wup = tp->rcv_nxt;
2897656d842SEric Dumazet 	/* tcp_conn_request() is sending the SYNACK,
2907656d842SEric Dumazet 	 * and queues the child into listener accept queue.
2917c85af88SEric Dumazet 	 */
2927c85af88SEric Dumazet 	return child;
2935b7ed089SYuchung Cheng }
2945b7ed089SYuchung Cheng 
2955b7ed089SYuchung Cheng static bool tcp_fastopen_queue_check(struct sock *sk)
2965b7ed089SYuchung Cheng {
2975b7ed089SYuchung Cheng 	struct fastopen_queue *fastopenq;
2985b7ed089SYuchung Cheng 
2995b7ed089SYuchung Cheng 	/* Make sure the listener has enabled fastopen, and we don't
3005b7ed089SYuchung Cheng 	 * exceed the max # of pending TFO requests allowed before trying
3015b7ed089SYuchung Cheng 	 * to validating the cookie in order to avoid burning CPU cycles
3025b7ed089SYuchung Cheng 	 * unnecessarily.
3035b7ed089SYuchung Cheng 	 *
3045b7ed089SYuchung Cheng 	 * XXX (TFO) - The implication of checking the max_qlen before
3055b7ed089SYuchung Cheng 	 * processing a cookie request is that clients can't differentiate
3065b7ed089SYuchung Cheng 	 * between qlen overflow causing Fast Open to be disabled
3075b7ed089SYuchung Cheng 	 * temporarily vs a server not supporting Fast Open at all.
3085b7ed089SYuchung Cheng 	 */
3090536fcc0SEric Dumazet 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
3100536fcc0SEric Dumazet 	if (fastopenq->max_qlen == 0)
3115b7ed089SYuchung Cheng 		return false;
3125b7ed089SYuchung Cheng 
3135b7ed089SYuchung Cheng 	if (fastopenq->qlen >= fastopenq->max_qlen) {
3145b7ed089SYuchung Cheng 		struct request_sock *req1;
3155b7ed089SYuchung Cheng 		spin_lock(&fastopenq->lock);
3165b7ed089SYuchung Cheng 		req1 = fastopenq->rskq_rst_head;
317fa76ce73SEric Dumazet 		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
31802a1d6e7SEric Dumazet 			__NET_INC_STATS(sock_net(sk),
3195b7ed089SYuchung Cheng 					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
320c10d9310SEric Dumazet 			spin_unlock(&fastopenq->lock);
3215b7ed089SYuchung Cheng 			return false;
3225b7ed089SYuchung Cheng 		}
3235b7ed089SYuchung Cheng 		fastopenq->rskq_rst_head = req1->dl_next;
3245b7ed089SYuchung Cheng 		fastopenq->qlen--;
3255b7ed089SYuchung Cheng 		spin_unlock(&fastopenq->lock);
32613854e5aSEric Dumazet 		reqsk_put(req1);
3275b7ed089SYuchung Cheng 	}
3285b7ed089SYuchung Cheng 	return true;
3295b7ed089SYuchung Cheng }
3305b7ed089SYuchung Cheng 
33171c02379SChristoph Paasch static bool tcp_fastopen_no_cookie(const struct sock *sk,
33271c02379SChristoph Paasch 				   const struct dst_entry *dst,
33371c02379SChristoph Paasch 				   int flag)
33471c02379SChristoph Paasch {
3355a542133SKuniyuki Iwashima 	return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
33671c02379SChristoph Paasch 	       tcp_sk(sk)->fastopen_no_cookie ||
33771c02379SChristoph Paasch 	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
33871c02379SChristoph Paasch }
33971c02379SChristoph Paasch 
34089278c9dSYuchung Cheng /* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
34189278c9dSYuchung Cheng  * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
34289278c9dSYuchung Cheng  * cookie request (foc->len == 0).
34389278c9dSYuchung Cheng  */
3447c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
3455b7ed089SYuchung Cheng 			      struct request_sock *req,
34671c02379SChristoph Paasch 			      struct tcp_fastopen_cookie *foc,
34771c02379SChristoph Paasch 			      const struct dst_entry *dst)
3485b7ed089SYuchung Cheng {
34989278c9dSYuchung Cheng 	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
3505a542133SKuniyuki Iwashima 	int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
351e1cfcbe8SHaishuang Yan 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
3527c85af88SEric Dumazet 	struct sock *child;
3539092a76dSJason Baron 	int ret = 0;
3545b7ed089SYuchung Cheng 
355531c94a9SYuchung Cheng 	if (foc->len == 0) /* Client requests a cookie */
356c10d9310SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
357531c94a9SYuchung Cheng 
358e1cfcbe8SHaishuang Yan 	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
35989278c9dSYuchung Cheng 	      (syn_data || foc->len >= 0) &&
36089278c9dSYuchung Cheng 	      tcp_fastopen_queue_check(sk))) {
36189278c9dSYuchung Cheng 		foc->len = -1;
3627c85af88SEric Dumazet 		return NULL;
3635b7ed089SYuchung Cheng 	}
36489278c9dSYuchung Cheng 
365e3faa49bSLuke Hsiao 	if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
36689278c9dSYuchung Cheng 		goto fastopen;
36789278c9dSYuchung Cheng 
3689092a76dSJason Baron 	if (foc->len == 0) {
3699092a76dSJason Baron 		/* Client requests a cookie. */
3709092a76dSJason Baron 		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
3719092a76dSJason Baron 	} else if (foc->len > 0) {
3729092a76dSJason Baron 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
3739092a76dSJason Baron 						    &valid_foc);
3749092a76dSJason Baron 		if (!ret) {
3759092a76dSJason Baron 			NET_INC_STATS(sock_net(sk),
3769092a76dSJason Baron 				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
3779092a76dSJason Baron 		} else {
3789092a76dSJason Baron 			/* Cookie is valid. Create a (full) child socket to
3799092a76dSJason Baron 			 * accept the data in SYN before returning a SYN-ACK to
3809092a76dSJason Baron 			 * ack the data. If we fail to create the socket, fall
3819092a76dSJason Baron 			 * back and ack the ISN only but includes the same
3829092a76dSJason Baron 			 * cookie.
383843f4a55SYuchung Cheng 			 *
3849092a76dSJason Baron 			 * Note: Data-less SYN with valid cookie is allowed to
3859092a76dSJason Baron 			 * send data in SYN_RECV state.
386843f4a55SYuchung Cheng 			 */
38789278c9dSYuchung Cheng fastopen:
38811199369STonghao Zhang 			child = tcp_fastopen_create_child(sk, skb, req);
3897c85af88SEric Dumazet 			if (child) {
3909092a76dSJason Baron 				if (ret == 2) {
3919092a76dSJason Baron 					valid_foc.exp = foc->exp;
3929092a76dSJason Baron 					*foc = valid_foc;
3939092a76dSJason Baron 					NET_INC_STATS(sock_net(sk),
3949092a76dSJason Baron 						      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
3959092a76dSJason Baron 				} else {
39689278c9dSYuchung Cheng 					foc->len = -1;
3979092a76dSJason Baron 				}
398c10d9310SEric Dumazet 				NET_INC_STATS(sock_net(sk),
399843f4a55SYuchung Cheng 					      LINUX_MIB_TCPFASTOPENPASSIVE);
4007c85af88SEric Dumazet 				return child;
4015b7ed089SYuchung Cheng 			}
4029092a76dSJason Baron 			NET_INC_STATS(sock_net(sk),
4039092a76dSJason Baron 				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
4049092a76dSJason Baron 		}
4059092a76dSJason Baron 	}
4067f9b838bSDaniel Lee 	valid_foc.exp = foc->exp;
40789278c9dSYuchung Cheng 	*foc = valid_foc;
4087c85af88SEric Dumazet 	return NULL;
4095b7ed089SYuchung Cheng }
410065263f4SWei Wang 
411065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
412065263f4SWei Wang 			       struct tcp_fastopen_cookie *cookie)
413065263f4SWei Wang {
41471c02379SChristoph Paasch 	const struct dst_entry *dst;
415065263f4SWei Wang 
4167268586bSYuchung Cheng 	tcp_fastopen_cache_get(sk, mss, cookie);
417cf1ef3f0SWei Wang 
418cf1ef3f0SWei Wang 	/* Firewall blackhole issue check */
419cf1ef3f0SWei Wang 	if (tcp_fastopen_active_should_disable(sk)) {
420cf1ef3f0SWei Wang 		cookie->len = -1;
421cf1ef3f0SWei Wang 		return false;
422cf1ef3f0SWei Wang 	}
423cf1ef3f0SWei Wang 
42471c02379SChristoph Paasch 	dst = __sk_dst_get(sk);
42571c02379SChristoph Paasch 
42671c02379SChristoph Paasch 	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
427065263f4SWei Wang 		cookie->len = -1;
428065263f4SWei Wang 		return true;
429065263f4SWei Wang 	}
43048027478SJason Baron 	if (cookie->len > 0)
43148027478SJason Baron 		return true;
43248027478SJason Baron 	tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
43348027478SJason Baron 	return false;
434065263f4SWei Wang }
43519f6d3f3SWei Wang 
43619f6d3f3SWei Wang /* This function checks if we want to defer sending SYN until the first
43719f6d3f3SWei Wang  * write().  We defer under the following conditions:
43819f6d3f3SWei Wang  * 1. fastopen_connect sockopt is set
43919f6d3f3SWei Wang  * 2. we have a valid cookie
44019f6d3f3SWei Wang  * Return value: return true if we want to defer until application writes data
44119f6d3f3SWei Wang  *               return false if we want to send out SYN immediately
44219f6d3f3SWei Wang  */
44319f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
44419f6d3f3SWei Wang {
44519f6d3f3SWei Wang 	struct tcp_fastopen_cookie cookie = { .len = 0 };
44619f6d3f3SWei Wang 	struct tcp_sock *tp = tcp_sk(sk);
44719f6d3f3SWei Wang 	u16 mss;
44819f6d3f3SWei Wang 
44919f6d3f3SWei Wang 	if (tp->fastopen_connect && !tp->fastopen_req) {
45019f6d3f3SWei Wang 		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
45119f6d3f3SWei Wang 			inet_sk(sk)->defer_connect = 1;
45219f6d3f3SWei Wang 			return true;
45319f6d3f3SWei Wang 		}
45419f6d3f3SWei Wang 
45519f6d3f3SWei Wang 		/* Alloc fastopen_req in order for FO option to be included
45619f6d3f3SWei Wang 		 * in SYN
45719f6d3f3SWei Wang 		 */
45819f6d3f3SWei Wang 		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
45919f6d3f3SWei Wang 					   sk->sk_allocation);
46019f6d3f3SWei Wang 		if (tp->fastopen_req)
46119f6d3f3SWei Wang 			tp->fastopen_req->cookie = cookie;
46219f6d3f3SWei Wang 		else
46319f6d3f3SWei Wang 			*err = -ENOBUFS;
46419f6d3f3SWei Wang 	}
46519f6d3f3SWei Wang 	return false;
46619f6d3f3SWei Wang }
46719f6d3f3SWei Wang EXPORT_SYMBOL(tcp_fastopen_defer_connect);
468cf1ef3f0SWei Wang 
469cf1ef3f0SWei Wang /*
470cf1ef3f0SWei Wang  * The following code block is to deal with middle box issues with TFO:
471cf1ef3f0SWei Wang  * Middlebox firewall issues can potentially cause server's data being
472cf1ef3f0SWei Wang  * blackholed after a successful 3WHS using TFO.
473cf1ef3f0SWei Wang  * The proposed solution is to disable active TFO globally under the
474cf1ef3f0SWei Wang  * following circumstances:
475cf1ef3f0SWei Wang  *   1. client side TFO socket receives out of order FIN
476cf1ef3f0SWei Wang  *   2. client side TFO socket receives out of order RST
4777268586bSYuchung Cheng  *   3. client side TFO socket has timed out three times consecutively during
4787268586bSYuchung Cheng  *      or after handshake
479cf1ef3f0SWei Wang  * We disable active side TFO globally for 1hr at first. Then if it
480cf1ef3f0SWei Wang  * happens again, we disable it for 2h, then 4h, 8h, ...
481cf1ef3f0SWei Wang  * And we reset the timeout back to 1hr when we see a successful active
482cf1ef3f0SWei Wang  * TFO connection with data exchanges.
483cf1ef3f0SWei Wang  */
484cf1ef3f0SWei Wang 
485cf1ef3f0SWei Wang /* Disable active TFO and record current jiffies and
486cf1ef3f0SWei Wang  * tfo_active_disable_times
487cf1ef3f0SWei Wang  */
48846c2fa39SWei Wang void tcp_fastopen_active_disable(struct sock *sk)
489cf1ef3f0SWei Wang {
4903733be14SHaishuang Yan 	struct net *net = sock_net(sk);
491cf1ef3f0SWei Wang 
492*021266ecSKuniyuki Iwashima 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
493213ad73dSWei Wang 		return;
494213ad73dSWei Wang 
4956f20c8adSEric Dumazet 	/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
4966f20c8adSEric Dumazet 	WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
4976f20c8adSEric Dumazet 
4986f20c8adSEric Dumazet 	/* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
4996f20c8adSEric Dumazet 	 * We want net->ipv4.tfo_active_disable_stamp to be updated first.
5006f20c8adSEric Dumazet 	 */
5016f20c8adSEric Dumazet 	smp_mb__before_atomic();
5023733be14SHaishuang Yan 	atomic_inc(&net->ipv4.tfo_active_disable_times);
5036f20c8adSEric Dumazet 
5043733be14SHaishuang Yan 	NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
505cf1ef3f0SWei Wang }
506cf1ef3f0SWei Wang 
507cf1ef3f0SWei Wang /* Calculate timeout for tfo active disable
508cf1ef3f0SWei Wang  * Return true if we are still in the active TFO disable period
509cf1ef3f0SWei Wang  * Return false if timeout already expired and we should use active TFO
510cf1ef3f0SWei Wang  */
511cf1ef3f0SWei Wang bool tcp_fastopen_active_should_disable(struct sock *sk)
512cf1ef3f0SWei Wang {
513*021266ecSKuniyuki Iwashima 	unsigned int tfo_bh_timeout =
514*021266ecSKuniyuki Iwashima 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
515cf1ef3f0SWei Wang 	unsigned long timeout;
516213ad73dSWei Wang 	int tfo_da_times;
5173733be14SHaishuang Yan 	int multiplier;
518cf1ef3f0SWei Wang 
519213ad73dSWei Wang 	if (!tfo_bh_timeout)
520213ad73dSWei Wang 		return false;
521213ad73dSWei Wang 
522213ad73dSWei Wang 	tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
523cf1ef3f0SWei Wang 	if (!tfo_da_times)
524cf1ef3f0SWei Wang 		return false;
525cf1ef3f0SWei Wang 
5266f20c8adSEric Dumazet 	/* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
5276f20c8adSEric Dumazet 	smp_rmb();
5286f20c8adSEric Dumazet 
529974d8f86SZheng Yongjun 	/* Limit timeout to max: 2^6 * initial timeout */
530cf1ef3f0SWei Wang 	multiplier = 1 << min(tfo_da_times - 1, 6);
5316f20c8adSEric Dumazet 
5326f20c8adSEric Dumazet 	/* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
5336f20c8adSEric Dumazet 	timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
5346f20c8adSEric Dumazet 		  multiplier * tfo_bh_timeout * HZ;
5356f20c8adSEric Dumazet 	if (time_before(jiffies, timeout))
536cf1ef3f0SWei Wang 		return true;
537cf1ef3f0SWei Wang 
538cf1ef3f0SWei Wang 	/* Mark check bit so we can check for successful active TFO
539cf1ef3f0SWei Wang 	 * condition and reset tfo_active_disable_times
540cf1ef3f0SWei Wang 	 */
541cf1ef3f0SWei Wang 	tcp_sk(sk)->syn_fastopen_ch = 1;
542cf1ef3f0SWei Wang 	return false;
543cf1ef3f0SWei Wang }
544cf1ef3f0SWei Wang 
545cf1ef3f0SWei Wang /* Disable active TFO if FIN is the only packet in the ofo queue
546cf1ef3f0SWei Wang  * and no data is received.
547cf1ef3f0SWei Wang  * Also check if we can reset tfo_active_disable_times if data is
548cf1ef3f0SWei Wang  * received successfully on a marked active TFO sockets opened on
549cf1ef3f0SWei Wang  * a non-loopback interface
550cf1ef3f0SWei Wang  */
551cf1ef3f0SWei Wang void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
552cf1ef3f0SWei Wang {
553cf1ef3f0SWei Wang 	struct tcp_sock *tp = tcp_sk(sk);
554cf1ef3f0SWei Wang 	struct dst_entry *dst;
55518a4c0eaSEric Dumazet 	struct sk_buff *skb;
556cf1ef3f0SWei Wang 
557cf1ef3f0SWei Wang 	if (!tp->syn_fastopen)
558cf1ef3f0SWei Wang 		return;
559cf1ef3f0SWei Wang 
560cf1ef3f0SWei Wang 	if (!tp->data_segs_in) {
56118a4c0eaSEric Dumazet 		skb = skb_rb_first(&tp->out_of_order_queue);
56218a4c0eaSEric Dumazet 		if (skb && !skb_rb_next(skb)) {
563cf1ef3f0SWei Wang 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
56446c2fa39SWei Wang 				tcp_fastopen_active_disable(sk);
565cf1ef3f0SWei Wang 				return;
566cf1ef3f0SWei Wang 			}
567cf1ef3f0SWei Wang 		}
568cf1ef3f0SWei Wang 	} else if (tp->syn_fastopen_ch &&
5693733be14SHaishuang Yan 		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
570cf1ef3f0SWei Wang 		dst = sk_dst_get(sk);
571cf1ef3f0SWei Wang 		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
5723733be14SHaishuang Yan 			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
573cf1ef3f0SWei Wang 		dst_release(dst);
574cf1ef3f0SWei Wang 	}
575cf1ef3f0SWei Wang }
5767268586bSYuchung Cheng 
5777268586bSYuchung Cheng void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
5787268586bSYuchung Cheng {
5797268586bSYuchung Cheng 	u32 timeouts = inet_csk(sk)->icsk_retransmits;
5807268586bSYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
5817268586bSYuchung Cheng 
5827268586bSYuchung Cheng 	/* Broken middle-boxes may black-hole Fast Open connection during or
5837268586bSYuchung Cheng 	 * even after the handshake. Be extremely conservative and pause
5847268586bSYuchung Cheng 	 * Fast Open globally after hitting the third consecutive timeout or
5857268586bSYuchung Cheng 	 * exceeding the configured timeout limit.
5867268586bSYuchung Cheng 	 */
5877268586bSYuchung Cheng 	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
5887268586bSYuchung Cheng 	    (timeouts == 2 || (timeouts < 2 && expired))) {
5897268586bSYuchung Cheng 		tcp_fastopen_active_disable(sk);
5907268586bSYuchung Cheng 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
5917268586bSYuchung Cheng 	}
5927268586bSYuchung Cheng }
593