xref: /linux/net/ipv4/tcp_minisocks.c (revision 0e685c3e7158d35626d6d76b9f859eae806d87fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Implementation of the Transmission Control Protocol(TCP).
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche, <flla@stud.uni-sb.de>
14  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17  *		Matthew Dillon, <dillon@apollo.west.oic.com>
18  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19  *		Jorge Cwik, <jorge@laser.satlink.net>
20  */
21 
22 #include <net/tcp.h>
23 #include <net/xfrm.h>
24 #include <net/busy_poll.h>
25 
26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
27 {
28 	if (seq == s_win)
29 		return true;
30 	if (after(end_seq, s_win) && before(seq, e_win))
31 		return true;
32 	return seq == e_win && seq == end_seq;
33 }
34 
35 static enum tcp_tw_status
36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 				  const struct sk_buff *skb, int mib_idx)
38 {
39 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
40 
41 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 				  &tcptw->tw_last_oow_ack_time)) {
43 		/* Send ACK. Note, we do not put the bucket,
44 		 * it will be released by caller.
45 		 */
46 		return TCP_TW_ACK;
47 	}
48 
49 	/* We are rate-limiting, so just release the tw sock and drop skb. */
50 	inet_twsk_put(tw);
51 	return TCP_TW_SUCCESS;
52 }
53 
54 /*
55  * * Main purpose of TIME-WAIT state is to close connection gracefully,
56  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
57  *   (and, probably, tail of data) and one or more our ACKs are lost.
58  * * What is TIME-WAIT timeout? It is associated with maximal packet
59  *   lifetime in the internet, which results in wrong conclusion, that
60  *   it is set to catch "old duplicate segments" wandering out of their path.
61  *   It is not quite correct. This timeout is calculated so that it exceeds
62  *   maximal retransmission timeout enough to allow to lose one (or more)
63  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
64  * * When TIME-WAIT socket receives RST, it means that another end
65  *   finally closed and we are allowed to kill TIME-WAIT too.
66  * * Second purpose of TIME-WAIT is catching old duplicate segments.
67  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
68  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
69  * * If we invented some more clever way to catch duplicates
70  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
71  *
72  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
73  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
74  * from the very beginning.
75  *
76  * NOTE. With recycling (and later with fin-wait-2) TW bucket
77  * is _not_ stateless. It means, that strictly speaking we must
78  * spinlock it. I do not want! Well, probability of misbehaviour
79  * is ridiculously low and, seems, we could use some mb() tricks
80  * to avoid misread sequence numbers, states etc.  --ANK
81  *
82  * We don't need to initialize tmp_out.sack_ok as we don't use the results
83  */
84 enum tcp_tw_status
85 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
86 			   const struct tcphdr *th)
87 {
88 	struct tcp_options_received tmp_opt;
89 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90 	bool paws_reject = false;
91 
92 	tmp_opt.saw_tstamp = 0;
93 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
95 
96 		if (tmp_opt.saw_tstamp) {
97 			if (tmp_opt.rcv_tsecr)
98 				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
99 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
100 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
101 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
102 		}
103 	}
104 
105 	if (tw->tw_substate == TCP_FIN_WAIT2) {
106 		/* Just repeat all the checks of tcp_rcv_state_process() */
107 
108 		/* Out of window, send ACK */
109 		if (paws_reject ||
110 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
111 				   tcptw->tw_rcv_nxt,
112 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
113 			return tcp_timewait_check_oow_rate_limit(
114 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
115 
116 		if (th->rst)
117 			goto kill;
118 
119 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120 			return TCP_TW_RST;
121 
122 		/* Dup ACK? */
123 		if (!th->ack ||
124 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
125 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126 			inet_twsk_put(tw);
127 			return TCP_TW_SUCCESS;
128 		}
129 
130 		/* New data or FIN. If new data arrive after half-duplex close,
131 		 * reset.
132 		 */
133 		if (!th->fin ||
134 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
135 			return TCP_TW_RST;
136 
137 		/* FIN arrived, enter true time-wait state. */
138 		tw->tw_substate	  = TCP_TIME_WAIT;
139 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
140 		if (tmp_opt.saw_tstamp) {
141 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
142 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
143 		}
144 
145 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
146 		return TCP_TW_ACK;
147 	}
148 
149 	/*
150 	 *	Now real TIME-WAIT state.
151 	 *
152 	 *	RFC 1122:
153 	 *	"When a connection is [...] on TIME-WAIT state [...]
154 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
155 	 *	reopen the connection directly, if it:
156 	 *
157 	 *	(1)  assigns its initial sequence number for the new
158 	 *	connection to be larger than the largest sequence
159 	 *	number it used on the previous connection incarnation,
160 	 *	and
161 	 *
162 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
163 	 *	to be an old duplicate".
164 	 */
165 
166 	if (!paws_reject &&
167 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
168 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
169 		/* In window segment, it may be only reset or bare ack. */
170 
171 		if (th->rst) {
172 			/* This is TIME_WAIT assassination, in two flavors.
173 			 * Oh well... nobody has a sufficient solution to this
174 			 * protocol bug yet.
175 			 */
176 			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
177 kill:
178 				inet_twsk_deschedule_put(tw);
179 				return TCP_TW_SUCCESS;
180 			}
181 		} else {
182 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
183 		}
184 
185 		if (tmp_opt.saw_tstamp) {
186 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
187 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
188 		}
189 
190 		inet_twsk_put(tw);
191 		return TCP_TW_SUCCESS;
192 	}
193 
194 	/* Out of window segment.
195 
196 	   All the segments are ACKed immediately.
197 
198 	   The only exception is new SYN. We accept it, if it is
199 	   not old duplicate and we are not in danger to be killed
200 	   by delayed old duplicates. RFC check is that it has
201 	   newer sequence number works at rates <40Mbit/sec.
202 	   However, if paws works, it is reliable AND even more,
203 	   we even may relax silly seq space cutoff.
204 
205 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
206 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
207 	   we must return socket to time-wait state. It is not good,
208 	   but not fatal yet.
209 	 */
210 
211 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
212 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
213 	     (tmp_opt.saw_tstamp &&
214 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
215 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
216 		if (isn == 0)
217 			isn++;
218 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
219 		return TCP_TW_SYN;
220 	}
221 
222 	if (paws_reject)
223 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
224 
225 	if (!th->rst) {
226 		/* In this case we must reset the TIMEWAIT timer.
227 		 *
228 		 * If it is ACKless SYN it may be both old duplicate
229 		 * and new good SYN with random sequence number <rcv_nxt.
230 		 * Do not reschedule in the last case.
231 		 */
232 		if (paws_reject || th->ack)
233 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
234 
235 		return tcp_timewait_check_oow_rate_limit(
236 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
237 	}
238 	inet_twsk_put(tw);
239 	return TCP_TW_SUCCESS;
240 }
241 EXPORT_SYMBOL(tcp_timewait_state_process);
242 
243 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244 {
245 #ifdef CONFIG_TCP_MD5SIG
246 	const struct tcp_sock *tp = tcp_sk(sk);
247 	struct tcp_md5sig_key *key;
248 
249 	/*
250 	 * The timewait bucket does not have the key DB from the
251 	 * sock structure. We just make a quick copy of the
252 	 * md5 key being used (if indeed we are using one)
253 	 * so the timewait ack generating code has the key.
254 	 */
255 	tcptw->tw_md5_key = NULL;
256 	if (!static_branch_unlikely(&tcp_md5_needed.key))
257 		return;
258 
259 	key = tp->af_specific->md5_lookup(sk, sk);
260 	if (key) {
261 		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262 		if (!tcptw->tw_md5_key)
263 			return;
264 		if (!tcp_alloc_md5sig_pool())
265 			goto out_free;
266 		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267 			goto out_free;
268 	}
269 	return;
270 out_free:
271 	WARN_ON_ONCE(1);
272 	kfree(tcptw->tw_md5_key);
273 	tcptw->tw_md5_key = NULL;
274 #endif
275 }
276 
277 /*
278  * Move a socket to time-wait or dead fin-wait-2 state.
279  */
280 void tcp_time_wait(struct sock *sk, int state, int timeo)
281 {
282 	const struct inet_connection_sock *icsk = inet_csk(sk);
283 	const struct tcp_sock *tp = tcp_sk(sk);
284 	struct net *net = sock_net(sk);
285 	struct inet_timewait_sock *tw;
286 
287 	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
288 
289 	if (tw) {
290 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
292 		struct inet_sock *inet = inet_sk(sk);
293 
294 		tw->tw_transparent	= inet->transparent;
295 		tw->tw_mark		= sk->sk_mark;
296 		tw->tw_priority		= sk->sk_priority;
297 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
298 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
299 		tcptw->tw_snd_nxt	= tp->snd_nxt;
300 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
301 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
302 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
303 		tcptw->tw_ts_offset	= tp->tsoffset;
304 		tcptw->tw_last_oow_ack_time = 0;
305 		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
306 #if IS_ENABLED(CONFIG_IPV6)
307 		if (tw->tw_family == PF_INET6) {
308 			struct ipv6_pinfo *np = inet6_sk(sk);
309 
310 			tw->tw_v6_daddr = sk->sk_v6_daddr;
311 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312 			tw->tw_tclass = np->tclass;
313 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314 			tw->tw_txhash = sk->sk_txhash;
315 			tw->tw_ipv6only = sk->sk_ipv6only;
316 		}
317 #endif
318 
319 		tcp_time_wait_init(sk, tcptw);
320 
321 		/* Get the TIME_WAIT timeout firing. */
322 		if (timeo < rto)
323 			timeo = rto;
324 
325 		if (state == TCP_TIME_WAIT)
326 			timeo = TCP_TIMEWAIT_LEN;
327 
328 		/* tw_timer is pinned, so we need to make sure BH are disabled
329 		 * in following section, otherwise timer handler could run before
330 		 * we complete the initialization.
331 		 */
332 		local_bh_disable();
333 		inet_twsk_schedule(tw, timeo);
334 		/* Linkage updates.
335 		 * Note that access to tw after this point is illegal.
336 		 */
337 		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
338 		local_bh_enable();
339 	} else {
340 		/* Sorry, if we're out of memory, just CLOSE this
341 		 * socket up.  We've got bigger problems than
342 		 * non-graceful socket closings.
343 		 */
344 		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
345 	}
346 
347 	tcp_update_metrics(sk);
348 	tcp_done(sk);
349 }
350 EXPORT_SYMBOL(tcp_time_wait);
351 
352 void tcp_twsk_destructor(struct sock *sk)
353 {
354 #ifdef CONFIG_TCP_MD5SIG
355 	if (static_branch_unlikely(&tcp_md5_needed.key)) {
356 		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
357 
358 		if (twsk->tw_md5_key) {
359 			kfree_rcu(twsk->tw_md5_key, rcu);
360 			static_branch_slow_dec_deferred(&tcp_md5_needed);
361 		}
362 	}
363 #endif
364 }
365 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
366 
367 void tcp_twsk_purge(struct list_head *net_exit_list, int family)
368 {
369 	bool purged_once = false;
370 	struct net *net;
371 
372 	list_for_each_entry(net, net_exit_list, exit_list) {
373 		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
374 			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
375 			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
376 		} else if (!purged_once) {
377 			/* The last refcount is decremented in tcp_sk_exit_batch() */
378 			if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
379 				continue;
380 
381 			inet_twsk_purge(&tcp_hashinfo, family);
382 			purged_once = true;
383 		}
384 	}
385 }
386 EXPORT_SYMBOL_GPL(tcp_twsk_purge);
387 
388 /* Warning : This function is called without sk_listener being locked.
389  * Be sure to read socket fields once, as their value could change under us.
390  */
391 void tcp_openreq_init_rwin(struct request_sock *req,
392 			   const struct sock *sk_listener,
393 			   const struct dst_entry *dst)
394 {
395 	struct inet_request_sock *ireq = inet_rsk(req);
396 	const struct tcp_sock *tp = tcp_sk(sk_listener);
397 	int full_space = tcp_full_space(sk_listener);
398 	u32 window_clamp;
399 	__u8 rcv_wscale;
400 	u32 rcv_wnd;
401 	int mss;
402 
403 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
404 	window_clamp = READ_ONCE(tp->window_clamp);
405 	/* Set this up on the first call only */
406 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
407 
408 	/* limit the window selection if the user enforce a smaller rx buffer */
409 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
410 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
411 		req->rsk_window_clamp = full_space;
412 
413 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
414 	if (rcv_wnd == 0)
415 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
416 	else if (full_space < rcv_wnd * mss)
417 		full_space = rcv_wnd * mss;
418 
419 	/* tcp_full_space because it is guaranteed to be the first packet */
420 	tcp_select_initial_window(sk_listener, full_space,
421 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
422 		&req->rsk_rcv_wnd,
423 		&req->rsk_window_clamp,
424 		ireq->wscale_ok,
425 		&rcv_wscale,
426 		rcv_wnd);
427 	ireq->rcv_wscale = rcv_wscale;
428 }
429 EXPORT_SYMBOL(tcp_openreq_init_rwin);
430 
431 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
432 				  const struct request_sock *req)
433 {
434 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
435 }
436 
437 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
438 {
439 	struct inet_connection_sock *icsk = inet_csk(sk);
440 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
441 	bool ca_got_dst = false;
442 
443 	if (ca_key != TCP_CA_UNSPEC) {
444 		const struct tcp_congestion_ops *ca;
445 
446 		rcu_read_lock();
447 		ca = tcp_ca_find_key(ca_key);
448 		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
449 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
450 			icsk->icsk_ca_ops = ca;
451 			ca_got_dst = true;
452 		}
453 		rcu_read_unlock();
454 	}
455 
456 	/* If no valid choice made yet, assign current system default ca. */
457 	if (!ca_got_dst &&
458 	    (!icsk->icsk_ca_setsockopt ||
459 	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
460 		tcp_assign_congestion_control(sk);
461 
462 	tcp_set_ca_state(sk, TCP_CA_Open);
463 }
464 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
465 
466 static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
467 				    struct request_sock *req,
468 				    struct tcp_sock *newtp)
469 {
470 #if IS_ENABLED(CONFIG_SMC)
471 	struct inet_request_sock *ireq;
472 
473 	if (static_branch_unlikely(&tcp_have_smc)) {
474 		ireq = inet_rsk(req);
475 		if (oldtp->syn_smc && !ireq->smc_ok)
476 			newtp->syn_smc = 0;
477 	}
478 #endif
479 }
480 
481 /* This is not only more efficient than what we used to do, it eliminates
482  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
483  *
484  * Actually, we could lots of memory writes here. tp of listening
485  * socket contains all necessary default parameters.
486  */
487 struct sock *tcp_create_openreq_child(const struct sock *sk,
488 				      struct request_sock *req,
489 				      struct sk_buff *skb)
490 {
491 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
492 	const struct inet_request_sock *ireq = inet_rsk(req);
493 	struct tcp_request_sock *treq = tcp_rsk(req);
494 	struct inet_connection_sock *newicsk;
495 	struct tcp_sock *oldtp, *newtp;
496 	u32 seq;
497 
498 	if (!newsk)
499 		return NULL;
500 
501 	newicsk = inet_csk(newsk);
502 	newtp = tcp_sk(newsk);
503 	oldtp = tcp_sk(sk);
504 
505 	smc_check_reset_syn_req(oldtp, req, newtp);
506 
507 	/* Now setup tcp_sock */
508 	newtp->pred_flags = 0;
509 
510 	seq = treq->rcv_isn + 1;
511 	newtp->rcv_wup = seq;
512 	WRITE_ONCE(newtp->copied_seq, seq);
513 	WRITE_ONCE(newtp->rcv_nxt, seq);
514 	newtp->segs_in = 1;
515 
516 	seq = treq->snt_isn + 1;
517 	newtp->snd_sml = newtp->snd_una = seq;
518 	WRITE_ONCE(newtp->snd_nxt, seq);
519 	newtp->snd_up = seq;
520 
521 	INIT_LIST_HEAD(&newtp->tsq_node);
522 	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
523 
524 	tcp_init_wl(newtp, treq->rcv_isn);
525 
526 	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
527 	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
528 
529 	newtp->lsndtime = tcp_jiffies32;
530 	newsk->sk_txhash = treq->txhash;
531 	newtp->total_retrans = req->num_retrans;
532 
533 	tcp_init_xmit_timers(newsk);
534 	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
535 
536 	if (sock_flag(newsk, SOCK_KEEPOPEN))
537 		inet_csk_reset_keepalive_timer(newsk,
538 					       keepalive_time_when(newtp));
539 
540 	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
541 	newtp->rx_opt.sack_ok = ireq->sack_ok;
542 	newtp->window_clamp = req->rsk_window_clamp;
543 	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
544 	newtp->rcv_wnd = req->rsk_rcv_wnd;
545 	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
546 	if (newtp->rx_opt.wscale_ok) {
547 		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
548 		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
549 	} else {
550 		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
551 		newtp->window_clamp = min(newtp->window_clamp, 65535U);
552 	}
553 	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
554 	newtp->max_window = newtp->snd_wnd;
555 
556 	if (newtp->rx_opt.tstamp_ok) {
557 		newtp->rx_opt.ts_recent = req->ts_recent;
558 		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
559 		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
560 	} else {
561 		newtp->rx_opt.ts_recent_stamp = 0;
562 		newtp->tcp_header_len = sizeof(struct tcphdr);
563 	}
564 	if (req->num_timeout) {
565 		newtp->undo_marker = treq->snt_isn;
566 		newtp->retrans_stamp = div_u64(treq->snt_synack,
567 					       USEC_PER_SEC / TCP_TS_HZ);
568 	}
569 	newtp->tsoffset = treq->ts_off;
570 #ifdef CONFIG_TCP_MD5SIG
571 	newtp->md5sig_info = NULL;	/*XXX*/
572 	if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
573 		newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
574 #endif
575 	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
576 		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
577 	newtp->rx_opt.mss_clamp = req->mss;
578 	tcp_ecn_openreq_child(newtp, req);
579 	newtp->fastopen_req = NULL;
580 	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
581 
582 	newtp->bpf_chg_cc_inprogress = 0;
583 	tcp_bpf_clone(sk, newsk);
584 
585 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
586 
587 	return newsk;
588 }
589 EXPORT_SYMBOL(tcp_create_openreq_child);
590 
591 /*
592  * Process an incoming packet for SYN_RECV sockets represented as a
593  * request_sock. Normally sk is the listener socket but for TFO it
594  * points to the child socket.
595  *
596  * XXX (TFO) - The current impl contains a special check for ack
597  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
598  *
599  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
600  *
601  * Note: If @fastopen is true, this can be called from process context.
602  *       Otherwise, this is from BH context.
603  */
604 
605 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
606 			   struct request_sock *req,
607 			   bool fastopen, bool *req_stolen)
608 {
609 	struct tcp_options_received tmp_opt;
610 	struct sock *child;
611 	const struct tcphdr *th = tcp_hdr(skb);
612 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
613 	bool paws_reject = false;
614 	bool own_req;
615 
616 	tmp_opt.saw_tstamp = 0;
617 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
618 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
619 
620 		if (tmp_opt.saw_tstamp) {
621 			tmp_opt.ts_recent = req->ts_recent;
622 			if (tmp_opt.rcv_tsecr)
623 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
624 			/* We do not store true stamp, but it is not required,
625 			 * it can be estimated (approximately)
626 			 * from another data.
627 			 */
628 			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
629 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
630 		}
631 	}
632 
633 	/* Check for pure retransmitted SYN. */
634 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
635 	    flg == TCP_FLAG_SYN &&
636 	    !paws_reject) {
637 		/*
638 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
639 		 * this case on figure 6 and figure 8, but formal
640 		 * protocol description says NOTHING.
641 		 * To be more exact, it says that we should send ACK,
642 		 * because this segment (at least, if it has no data)
643 		 * is out of window.
644 		 *
645 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
646 		 *  describe SYN-RECV state. All the description
647 		 *  is wrong, we cannot believe to it and should
648 		 *  rely only on common sense and implementation
649 		 *  experience.
650 		 *
651 		 * Enforce "SYN-ACK" according to figure 8, figure 6
652 		 * of RFC793, fixed by RFC1122.
653 		 *
654 		 * Note that even if there is new data in the SYN packet
655 		 * they will be thrown away too.
656 		 *
657 		 * Reset timer after retransmitting SYNACK, similar to
658 		 * the idea of fast retransmit in recovery.
659 		 */
660 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
661 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
662 					  &tcp_rsk(req)->last_oow_ack_time) &&
663 
664 		    !inet_rtx_syn_ack(sk, req)) {
665 			unsigned long expires = jiffies;
666 
667 			expires += reqsk_timeout(req, TCP_RTO_MAX);
668 			if (!fastopen)
669 				mod_timer_pending(&req->rsk_timer, expires);
670 			else
671 				req->rsk_timer.expires = expires;
672 		}
673 		return NULL;
674 	}
675 
676 	/* Further reproduces section "SEGMENT ARRIVES"
677 	   for state SYN-RECEIVED of RFC793.
678 	   It is broken, however, it does not work only
679 	   when SYNs are crossed.
680 
681 	   You would think that SYN crossing is impossible here, since
682 	   we should have a SYN_SENT socket (from connect()) on our end,
683 	   but this is not true if the crossed SYNs were sent to both
684 	   ends by a malicious third party.  We must defend against this,
685 	   and to do that we first verify the ACK (as per RFC793, page
686 	   36) and reset if it is invalid.  Is this a true full defense?
687 	   To convince ourselves, let us consider a way in which the ACK
688 	   test can still pass in this 'malicious crossed SYNs' case.
689 	   Malicious sender sends identical SYNs (and thus identical sequence
690 	   numbers) to both A and B:
691 
692 		A: gets SYN, seq=7
693 		B: gets SYN, seq=7
694 
695 	   By our good fortune, both A and B select the same initial
696 	   send sequence number of seven :-)
697 
698 		A: sends SYN|ACK, seq=7, ack_seq=8
699 		B: sends SYN|ACK, seq=7, ack_seq=8
700 
701 	   So we are now A eating this SYN|ACK, ACK test passes.  So
702 	   does sequence test, SYN is truncated, and thus we consider
703 	   it a bare ACK.
704 
705 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
706 	   bare ACK.  Otherwise, we create an established connection.  Both
707 	   ends (listening sockets) accept the new incoming connection and try
708 	   to talk to each other. 8-)
709 
710 	   Note: This case is both harmless, and rare.  Possibility is about the
711 	   same as us discovering intelligent life on another plant tomorrow.
712 
713 	   But generally, we should (RFC lies!) to accept ACK
714 	   from SYNACK both here and in tcp_rcv_state_process().
715 	   tcp_rcv_state_process() does not, hence, we do not too.
716 
717 	   Note that the case is absolutely generic:
718 	   we cannot optimize anything here without
719 	   violating protocol. All the checks must be made
720 	   before attempt to create socket.
721 	 */
722 
723 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
724 	 *                  and the incoming segment acknowledges something not yet
725 	 *                  sent (the segment carries an unacceptable ACK) ...
726 	 *                  a reset is sent."
727 	 *
728 	 * Invalid ACK: reset will be sent by listening socket.
729 	 * Note that the ACK validity check for a Fast Open socket is done
730 	 * elsewhere and is checked directly against the child socket rather
731 	 * than req because user data may have been sent out.
732 	 */
733 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
734 	    (TCP_SKB_CB(skb)->ack_seq !=
735 	     tcp_rsk(req)->snt_isn + 1))
736 		return sk;
737 
738 	/* Also, it would be not so bad idea to check rcv_tsecr, which
739 	 * is essentially ACK extension and too early or too late values
740 	 * should cause reset in unsynchronized states.
741 	 */
742 
743 	/* RFC793: "first check sequence number". */
744 
745 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
746 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
747 		/* Out of window: send ACK and drop. */
748 		if (!(flg & TCP_FLAG_RST) &&
749 		    !tcp_oow_rate_limited(sock_net(sk), skb,
750 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
751 					  &tcp_rsk(req)->last_oow_ack_time))
752 			req->rsk_ops->send_ack(sk, skb, req);
753 		if (paws_reject)
754 			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
755 		return NULL;
756 	}
757 
758 	/* In sequence, PAWS is OK. */
759 
760 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
761 		req->ts_recent = tmp_opt.rcv_tsval;
762 
763 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
764 		/* Truncate SYN, it is out of window starting
765 		   at tcp_rsk(req)->rcv_isn + 1. */
766 		flg &= ~TCP_FLAG_SYN;
767 	}
768 
769 	/* RFC793: "second check the RST bit" and
770 	 *	   "fourth, check the SYN bit"
771 	 */
772 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
773 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
774 		goto embryonic_reset;
775 	}
776 
777 	/* ACK sequence verified above, just make sure ACK is
778 	 * set.  If ACK not set, just silently drop the packet.
779 	 *
780 	 * XXX (TFO) - if we ever allow "data after SYN", the
781 	 * following check needs to be removed.
782 	 */
783 	if (!(flg & TCP_FLAG_ACK))
784 		return NULL;
785 
786 	/* For Fast Open no more processing is needed (sk is the
787 	 * child socket).
788 	 */
789 	if (fastopen)
790 		return sk;
791 
792 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
793 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
794 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
795 		inet_rsk(req)->acked = 1;
796 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
797 		return NULL;
798 	}
799 
800 	/* OK, ACK is valid, create big socket and
801 	 * feed this segment to it. It will repeat all
802 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
803 	 * ESTABLISHED STATE. If it will be dropped after
804 	 * socket is created, wait for troubles.
805 	 */
806 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
807 							 req, &own_req);
808 	if (!child)
809 		goto listen_overflow;
810 
811 	if (own_req && rsk_drop_req(req)) {
812 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
813 		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
814 		return child;
815 	}
816 
817 	sock_rps_save_rxhash(child, skb);
818 	tcp_synack_rtt_meas(child, req);
819 	*req_stolen = !own_req;
820 	return inet_csk_complete_hashdance(sk, child, req, own_req);
821 
822 listen_overflow:
823 	if (sk != req->rsk_listener)
824 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
825 
826 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
827 		inet_rsk(req)->acked = 1;
828 		return NULL;
829 	}
830 
831 embryonic_reset:
832 	if (!(flg & TCP_FLAG_RST)) {
833 		/* Received a bad SYN pkt - for TFO We try not to reset
834 		 * the local connection unless it's really necessary to
835 		 * avoid becoming vulnerable to outside attack aiming at
836 		 * resetting legit local connections.
837 		 */
838 		req->rsk_ops->send_reset(sk, skb);
839 	} else if (fastopen) { /* received a valid RST pkt */
840 		reqsk_fastopen_remove(sk, req, true);
841 		tcp_reset(sk, skb);
842 	}
843 	if (!fastopen) {
844 		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
845 
846 		if (unlinked)
847 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
848 		*req_stolen = !unlinked;
849 	}
850 	return NULL;
851 }
852 EXPORT_SYMBOL(tcp_check_req);
853 
854 /*
855  * Queue segment on the new socket if the new socket is active,
856  * otherwise we just shortcircuit this and continue with
857  * the new socket.
858  *
859  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
860  * when entering. But other states are possible due to a race condition
861  * where after __inet_lookup_established() fails but before the listener
862  * locked is obtained, other packets cause the same connection to
863  * be created.
864  */
865 
866 int tcp_child_process(struct sock *parent, struct sock *child,
867 		      struct sk_buff *skb)
868 	__releases(&((child)->sk_lock.slock))
869 {
870 	int ret = 0;
871 	int state = child->sk_state;
872 
873 	/* record sk_napi_id and sk_rx_queue_mapping of child. */
874 	sk_mark_napi_id_set(child, skb);
875 
876 	tcp_segs_in(tcp_sk(child), skb);
877 	if (!sock_owned_by_user(child)) {
878 		ret = tcp_rcv_state_process(child, skb);
879 		/* Wakeup parent, send SIGIO */
880 		if (state == TCP_SYN_RECV && child->sk_state != state)
881 			parent->sk_data_ready(parent);
882 	} else {
883 		/* Alas, it is possible again, because we do lookup
884 		 * in main socket hash table and lock on listening
885 		 * socket does not protect us more.
886 		 */
887 		__sk_add_backlog(child, skb);
888 	}
889 
890 	bh_unlock_sock(child);
891 	sock_put(child);
892 	return ret;
893 }
894 EXPORT_SYMBOL(tcp_child_process);
895