xref: /linux/net/ipv4/tcp_minisocks.c (revision 010b64f7f198c96b1c5de995646753de375843c4)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *		Matthew Dillon, <dillon@apollo.west.oic.com>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Jorge Cwik, <jorge@laser.satlink.net>
19  */
20 
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <linux/static_key.h>
27 #include <net/tcp.h>
28 #include <net/inet_common.h>
29 #include <net/xfrm.h>
30 #include <net/busy_poll.h>
31 
32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
33 {
34 	if (seq == s_win)
35 		return true;
36 	if (after(end_seq, s_win) && before(seq, e_win))
37 		return true;
38 	return seq == e_win && seq == end_seq;
39 }
40 
41 static enum tcp_tw_status
42 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 				  const struct sk_buff *skb, int mib_idx)
44 {
45 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46 
47 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 				  &tcptw->tw_last_oow_ack_time)) {
49 		/* Send ACK. Note, we do not put the bucket,
50 		 * it will be released by caller.
51 		 */
52 		return TCP_TW_ACK;
53 	}
54 
55 	/* We are rate-limiting, so just release the tw sock and drop skb. */
56 	inet_twsk_put(tw);
57 	return TCP_TW_SUCCESS;
58 }
59 
60 /*
61  * * Main purpose of TIME-WAIT state is to close connection gracefully,
62  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63  *   (and, probably, tail of data) and one or more our ACKs are lost.
64  * * What is TIME-WAIT timeout? It is associated with maximal packet
65  *   lifetime in the internet, which results in wrong conclusion, that
66  *   it is set to catch "old duplicate segments" wandering out of their path.
67  *   It is not quite correct. This timeout is calculated so that it exceeds
68  *   maximal retransmission timeout enough to allow to lose one (or more)
69  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
70  * * When TIME-WAIT socket receives RST, it means that another end
71  *   finally closed and we are allowed to kill TIME-WAIT too.
72  * * Second purpose of TIME-WAIT is catching old duplicate segments.
73  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
74  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75  * * If we invented some more clever way to catch duplicates
76  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77  *
78  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80  * from the very beginning.
81  *
82  * NOTE. With recycling (and later with fin-wait-2) TW bucket
83  * is _not_ stateless. It means, that strictly speaking we must
84  * spinlock it. I do not want! Well, probability of misbehaviour
85  * is ridiculously low and, seems, we could use some mb() tricks
86  * to avoid misread sequence numbers, states etc.  --ANK
87  *
88  * We don't need to initialize tmp_out.sack_ok as we don't use the results
89  */
90 enum tcp_tw_status
91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 			   const struct tcphdr *th)
93 {
94 	struct tcp_options_received tmp_opt;
95 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 	bool paws_reject = false;
97 
98 	tmp_opt.saw_tstamp = 0;
99 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
100 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
101 
102 		if (tmp_opt.saw_tstamp) {
103 			if (tmp_opt.rcv_tsecr)
104 				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
105 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
106 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
107 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108 		}
109 	}
110 
111 	if (tw->tw_substate == TCP_FIN_WAIT2) {
112 		/* Just repeat all the checks of tcp_rcv_state_process() */
113 
114 		/* Out of window, send ACK */
115 		if (paws_reject ||
116 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117 				   tcptw->tw_rcv_nxt,
118 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 			return tcp_timewait_check_oow_rate_limit(
120 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
121 
122 		if (th->rst)
123 			goto kill;
124 
125 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
126 			return TCP_TW_RST;
127 
128 		/* Dup ACK? */
129 		if (!th->ack ||
130 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
131 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
132 			inet_twsk_put(tw);
133 			return TCP_TW_SUCCESS;
134 		}
135 
136 		/* New data or FIN. If new data arrive after half-duplex close,
137 		 * reset.
138 		 */
139 		if (!th->fin ||
140 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
141 			return TCP_TW_RST;
142 
143 		/* FIN arrived, enter true time-wait state. */
144 		tw->tw_substate	  = TCP_TIME_WAIT;
145 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
146 		if (tmp_opt.saw_tstamp) {
147 			tcptw->tw_ts_recent_stamp = get_seconds();
148 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
149 		}
150 
151 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
152 		return TCP_TW_ACK;
153 	}
154 
155 	/*
156 	 *	Now real TIME-WAIT state.
157 	 *
158 	 *	RFC 1122:
159 	 *	"When a connection is [...] on TIME-WAIT state [...]
160 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
161 	 *	reopen the connection directly, if it:
162 	 *
163 	 *	(1)  assigns its initial sequence number for the new
164 	 *	connection to be larger than the largest sequence
165 	 *	number it used on the previous connection incarnation,
166 	 *	and
167 	 *
168 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
169 	 *	to be an old duplicate".
170 	 */
171 
172 	if (!paws_reject &&
173 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
174 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
175 		/* In window segment, it may be only reset or bare ack. */
176 
177 		if (th->rst) {
178 			/* This is TIME_WAIT assassination, in two flavors.
179 			 * Oh well... nobody has a sufficient solution to this
180 			 * protocol bug yet.
181 			 */
182 			if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
183 kill:
184 				inet_twsk_deschedule_put(tw);
185 				return TCP_TW_SUCCESS;
186 			}
187 		}
188 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
189 
190 		if (tmp_opt.saw_tstamp) {
191 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
192 			tcptw->tw_ts_recent_stamp = get_seconds();
193 		}
194 
195 		inet_twsk_put(tw);
196 		return TCP_TW_SUCCESS;
197 	}
198 
199 	/* Out of window segment.
200 
201 	   All the segments are ACKed immediately.
202 
203 	   The only exception is new SYN. We accept it, if it is
204 	   not old duplicate and we are not in danger to be killed
205 	   by delayed old duplicates. RFC check is that it has
206 	   newer sequence number works at rates <40Mbit/sec.
207 	   However, if paws works, it is reliable AND even more,
208 	   we even may relax silly seq space cutoff.
209 
210 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
211 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
212 	   we must return socket to time-wait state. It is not good,
213 	   but not fatal yet.
214 	 */
215 
216 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
217 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
218 	     (tmp_opt.saw_tstamp &&
219 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
220 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
221 		if (isn == 0)
222 			isn++;
223 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
224 		return TCP_TW_SYN;
225 	}
226 
227 	if (paws_reject)
228 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
229 
230 	if (!th->rst) {
231 		/* In this case we must reset the TIMEWAIT timer.
232 		 *
233 		 * If it is ACKless SYN it may be both old duplicate
234 		 * and new good SYN with random sequence number <rcv_nxt.
235 		 * Do not reschedule in the last case.
236 		 */
237 		if (paws_reject || th->ack)
238 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
239 
240 		return tcp_timewait_check_oow_rate_limit(
241 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
242 	}
243 	inet_twsk_put(tw);
244 	return TCP_TW_SUCCESS;
245 }
246 EXPORT_SYMBOL(tcp_timewait_state_process);
247 
248 /*
249  * Move a socket to time-wait or dead fin-wait-2 state.
250  */
251 void tcp_time_wait(struct sock *sk, int state, int timeo)
252 {
253 	const struct inet_connection_sock *icsk = inet_csk(sk);
254 	const struct tcp_sock *tp = tcp_sk(sk);
255 	struct inet_timewait_sock *tw;
256 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
257 
258 	tw = inet_twsk_alloc(sk, tcp_death_row, state);
259 
260 	if (tw) {
261 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
262 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
263 		struct inet_sock *inet = inet_sk(sk);
264 
265 		tw->tw_transparent	= inet->transparent;
266 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
267 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
268 		tcptw->tw_snd_nxt	= tp->snd_nxt;
269 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
270 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
271 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
272 		tcptw->tw_ts_offset	= tp->tsoffset;
273 		tcptw->tw_last_oow_ack_time = 0;
274 
275 #if IS_ENABLED(CONFIG_IPV6)
276 		if (tw->tw_family == PF_INET6) {
277 			struct ipv6_pinfo *np = inet6_sk(sk);
278 
279 			tw->tw_v6_daddr = sk->sk_v6_daddr;
280 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
281 			tw->tw_tclass = np->tclass;
282 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
283 			tw->tw_ipv6only = sk->sk_ipv6only;
284 		}
285 #endif
286 
287 #ifdef CONFIG_TCP_MD5SIG
288 		/*
289 		 * The timewait bucket does not have the key DB from the
290 		 * sock structure. We just make a quick copy of the
291 		 * md5 key being used (if indeed we are using one)
292 		 * so the timewait ack generating code has the key.
293 		 */
294 		do {
295 			struct tcp_md5sig_key *key;
296 			tcptw->tw_md5_key = NULL;
297 			key = tp->af_specific->md5_lookup(sk, sk);
298 			if (key) {
299 				tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
300 				BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
301 			}
302 		} while (0);
303 #endif
304 
305 		/* Get the TIME_WAIT timeout firing. */
306 		if (timeo < rto)
307 			timeo = rto;
308 
309 		tw->tw_timeout = TCP_TIMEWAIT_LEN;
310 		if (state == TCP_TIME_WAIT)
311 			timeo = TCP_TIMEWAIT_LEN;
312 
313 		inet_twsk_schedule(tw, timeo);
314 		/* Linkage updates. */
315 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
316 		inet_twsk_put(tw);
317 	} else {
318 		/* Sorry, if we're out of memory, just CLOSE this
319 		 * socket up.  We've got bigger problems than
320 		 * non-graceful socket closings.
321 		 */
322 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
323 	}
324 
325 	tcp_update_metrics(sk);
326 	tcp_done(sk);
327 }
328 
329 void tcp_twsk_destructor(struct sock *sk)
330 {
331 #ifdef CONFIG_TCP_MD5SIG
332 	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
333 
334 	if (twsk->tw_md5_key)
335 		kfree_rcu(twsk->tw_md5_key, rcu);
336 #endif
337 }
338 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
339 
340 /* Warning : This function is called without sk_listener being locked.
341  * Be sure to read socket fields once, as their value could change under us.
342  */
343 void tcp_openreq_init_rwin(struct request_sock *req,
344 			   const struct sock *sk_listener,
345 			   const struct dst_entry *dst)
346 {
347 	struct inet_request_sock *ireq = inet_rsk(req);
348 	const struct tcp_sock *tp = tcp_sk(sk_listener);
349 	int full_space = tcp_full_space(sk_listener);
350 	u32 window_clamp;
351 	__u8 rcv_wscale;
352 	u32 rcv_wnd;
353 	int mss;
354 
355 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
356 	window_clamp = READ_ONCE(tp->window_clamp);
357 	/* Set this up on the first call only */
358 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
359 
360 	/* limit the window selection if the user enforce a smaller rx buffer */
361 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
362 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
363 		req->rsk_window_clamp = full_space;
364 
365 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
366 	if (rcv_wnd == 0)
367 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
368 	else if (full_space < rcv_wnd * mss)
369 		full_space = rcv_wnd * mss;
370 
371 	/* tcp_full_space because it is guaranteed to be the first packet */
372 	tcp_select_initial_window(full_space,
373 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
374 		&req->rsk_rcv_wnd,
375 		&req->rsk_window_clamp,
376 		ireq->wscale_ok,
377 		&rcv_wscale,
378 		rcv_wnd);
379 	ireq->rcv_wscale = rcv_wscale;
380 }
381 EXPORT_SYMBOL(tcp_openreq_init_rwin);
382 
383 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
384 				  const struct request_sock *req)
385 {
386 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
387 }
388 
389 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
390 {
391 	struct inet_connection_sock *icsk = inet_csk(sk);
392 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
393 	bool ca_got_dst = false;
394 
395 	if (ca_key != TCP_CA_UNSPEC) {
396 		const struct tcp_congestion_ops *ca;
397 
398 		rcu_read_lock();
399 		ca = tcp_ca_find_key(ca_key);
400 		if (likely(ca && try_module_get(ca->owner))) {
401 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
402 			icsk->icsk_ca_ops = ca;
403 			ca_got_dst = true;
404 		}
405 		rcu_read_unlock();
406 	}
407 
408 	/* If no valid choice made yet, assign current system default ca. */
409 	if (!ca_got_dst &&
410 	    (!icsk->icsk_ca_setsockopt ||
411 	     !try_module_get(icsk->icsk_ca_ops->owner)))
412 		tcp_assign_congestion_control(sk);
413 
414 	tcp_set_ca_state(sk, TCP_CA_Open);
415 }
416 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
417 
418 static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
419 				    struct request_sock *req,
420 				    struct tcp_sock *newtp)
421 {
422 #if IS_ENABLED(CONFIG_SMC)
423 	struct inet_request_sock *ireq;
424 
425 	if (static_branch_unlikely(&tcp_have_smc)) {
426 		ireq = inet_rsk(req);
427 		if (oldtp->syn_smc && !ireq->smc_ok)
428 			newtp->syn_smc = 0;
429 	}
430 #endif
431 }
432 
433 /* This is not only more efficient than what we used to do, it eliminates
434  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
435  *
436  * Actually, we could lots of memory writes here. tp of listening
437  * socket contains all necessary default parameters.
438  */
439 struct sock *tcp_create_openreq_child(const struct sock *sk,
440 				      struct request_sock *req,
441 				      struct sk_buff *skb)
442 {
443 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
444 
445 	if (newsk) {
446 		const struct inet_request_sock *ireq = inet_rsk(req);
447 		struct tcp_request_sock *treq = tcp_rsk(req);
448 		struct inet_connection_sock *newicsk = inet_csk(newsk);
449 		struct tcp_sock *newtp = tcp_sk(newsk);
450 		struct tcp_sock *oldtp = tcp_sk(sk);
451 
452 		smc_check_reset_syn_req(oldtp, req, newtp);
453 
454 		/* Now setup tcp_sock */
455 		newtp->pred_flags = 0;
456 
457 		newtp->rcv_wup = newtp->copied_seq =
458 		newtp->rcv_nxt = treq->rcv_isn + 1;
459 		newtp->segs_in = 1;
460 
461 		newtp->snd_sml = newtp->snd_una =
462 		newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
463 
464 		INIT_LIST_HEAD(&newtp->tsq_node);
465 		INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
466 
467 		tcp_init_wl(newtp, treq->rcv_isn);
468 
469 		newtp->srtt_us = 0;
470 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
471 		minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
472 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
473 		newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
474 
475 		newtp->packets_out = 0;
476 		newtp->retrans_out = 0;
477 		newtp->sacked_out = 0;
478 		newtp->fackets_out = 0;
479 		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
480 		newtp->tlp_high_seq = 0;
481 		newtp->lsndtime = tcp_jiffies32;
482 		newsk->sk_txhash = treq->txhash;
483 		newtp->last_oow_ack_time = 0;
484 		newtp->total_retrans = req->num_retrans;
485 
486 		/* So many TCP implementations out there (incorrectly) count the
487 		 * initial SYN frame in their delayed-ACK and congestion control
488 		 * algorithms that we must have the following bandaid to talk
489 		 * efficiently to them.  -DaveM
490 		 */
491 		newtp->snd_cwnd = TCP_INIT_CWND;
492 		newtp->snd_cwnd_cnt = 0;
493 
494 		/* There's a bubble in the pipe until at least the first ACK. */
495 		newtp->app_limited = ~0U;
496 
497 		tcp_init_xmit_timers(newsk);
498 		newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
499 
500 		newtp->rx_opt.saw_tstamp = 0;
501 
502 		newtp->rx_opt.dsack = 0;
503 		newtp->rx_opt.num_sacks = 0;
504 
505 		newtp->urg_data = 0;
506 
507 		if (sock_flag(newsk, SOCK_KEEPOPEN))
508 			inet_csk_reset_keepalive_timer(newsk,
509 						       keepalive_time_when(newtp));
510 
511 		newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
512 		if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
513 			if (sock_net(sk)->ipv4.sysctl_tcp_fack)
514 				tcp_enable_fack(newtp);
515 		}
516 		newtp->window_clamp = req->rsk_window_clamp;
517 		newtp->rcv_ssthresh = req->rsk_rcv_wnd;
518 		newtp->rcv_wnd = req->rsk_rcv_wnd;
519 		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
520 		if (newtp->rx_opt.wscale_ok) {
521 			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
522 			newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
523 		} else {
524 			newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
525 			newtp->window_clamp = min(newtp->window_clamp, 65535U);
526 		}
527 		newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
528 				  newtp->rx_opt.snd_wscale);
529 		newtp->max_window = newtp->snd_wnd;
530 
531 		if (newtp->rx_opt.tstamp_ok) {
532 			newtp->rx_opt.ts_recent = req->ts_recent;
533 			newtp->rx_opt.ts_recent_stamp = get_seconds();
534 			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
535 		} else {
536 			newtp->rx_opt.ts_recent_stamp = 0;
537 			newtp->tcp_header_len = sizeof(struct tcphdr);
538 		}
539 		newtp->tsoffset = treq->ts_off;
540 #ifdef CONFIG_TCP_MD5SIG
541 		newtp->md5sig_info = NULL;	/*XXX*/
542 		if (newtp->af_specific->md5_lookup(sk, newsk))
543 			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
544 #endif
545 		if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
546 			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
547 		newtp->rx_opt.mss_clamp = req->mss;
548 		tcp_ecn_openreq_child(newtp, req);
549 		newtp->fastopen_req = NULL;
550 		newtp->fastopen_rsk = NULL;
551 		newtp->syn_data_acked = 0;
552 		newtp->rack.mstamp = 0;
553 		newtp->rack.advanced = 0;
554 
555 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
556 	}
557 	return newsk;
558 }
559 EXPORT_SYMBOL(tcp_create_openreq_child);
560 
561 /*
562  * Process an incoming packet for SYN_RECV sockets represented as a
563  * request_sock. Normally sk is the listener socket but for TFO it
564  * points to the child socket.
565  *
566  * XXX (TFO) - The current impl contains a special check for ack
567  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
568  *
569  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
570  */
571 
572 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
573 			   struct request_sock *req,
574 			   bool fastopen)
575 {
576 	struct tcp_options_received tmp_opt;
577 	struct sock *child;
578 	const struct tcphdr *th = tcp_hdr(skb);
579 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
580 	bool paws_reject = false;
581 	bool own_req;
582 
583 	tmp_opt.saw_tstamp = 0;
584 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
585 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
586 
587 		if (tmp_opt.saw_tstamp) {
588 			tmp_opt.ts_recent = req->ts_recent;
589 			if (tmp_opt.rcv_tsecr)
590 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
591 			/* We do not store true stamp, but it is not required,
592 			 * it can be estimated (approximately)
593 			 * from another data.
594 			 */
595 			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
596 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
597 		}
598 	}
599 
600 	/* Check for pure retransmitted SYN. */
601 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
602 	    flg == TCP_FLAG_SYN &&
603 	    !paws_reject) {
604 		/*
605 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
606 		 * this case on figure 6 and figure 8, but formal
607 		 * protocol description says NOTHING.
608 		 * To be more exact, it says that we should send ACK,
609 		 * because this segment (at least, if it has no data)
610 		 * is out of window.
611 		 *
612 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
613 		 *  describe SYN-RECV state. All the description
614 		 *  is wrong, we cannot believe to it and should
615 		 *  rely only on common sense and implementation
616 		 *  experience.
617 		 *
618 		 * Enforce "SYN-ACK" according to figure 8, figure 6
619 		 * of RFC793, fixed by RFC1122.
620 		 *
621 		 * Note that even if there is new data in the SYN packet
622 		 * they will be thrown away too.
623 		 *
624 		 * Reset timer after retransmitting SYNACK, similar to
625 		 * the idea of fast retransmit in recovery.
626 		 */
627 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
628 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
629 					  &tcp_rsk(req)->last_oow_ack_time) &&
630 
631 		    !inet_rtx_syn_ack(sk, req)) {
632 			unsigned long expires = jiffies;
633 
634 			expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
635 				       TCP_RTO_MAX);
636 			if (!fastopen)
637 				mod_timer_pending(&req->rsk_timer, expires);
638 			else
639 				req->rsk_timer.expires = expires;
640 		}
641 		return NULL;
642 	}
643 
644 	/* Further reproduces section "SEGMENT ARRIVES"
645 	   for state SYN-RECEIVED of RFC793.
646 	   It is broken, however, it does not work only
647 	   when SYNs are crossed.
648 
649 	   You would think that SYN crossing is impossible here, since
650 	   we should have a SYN_SENT socket (from connect()) on our end,
651 	   but this is not true if the crossed SYNs were sent to both
652 	   ends by a malicious third party.  We must defend against this,
653 	   and to do that we first verify the ACK (as per RFC793, page
654 	   36) and reset if it is invalid.  Is this a true full defense?
655 	   To convince ourselves, let us consider a way in which the ACK
656 	   test can still pass in this 'malicious crossed SYNs' case.
657 	   Malicious sender sends identical SYNs (and thus identical sequence
658 	   numbers) to both A and B:
659 
660 		A: gets SYN, seq=7
661 		B: gets SYN, seq=7
662 
663 	   By our good fortune, both A and B select the same initial
664 	   send sequence number of seven :-)
665 
666 		A: sends SYN|ACK, seq=7, ack_seq=8
667 		B: sends SYN|ACK, seq=7, ack_seq=8
668 
669 	   So we are now A eating this SYN|ACK, ACK test passes.  So
670 	   does sequence test, SYN is truncated, and thus we consider
671 	   it a bare ACK.
672 
673 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
674 	   bare ACK.  Otherwise, we create an established connection.  Both
675 	   ends (listening sockets) accept the new incoming connection and try
676 	   to talk to each other. 8-)
677 
678 	   Note: This case is both harmless, and rare.  Possibility is about the
679 	   same as us discovering intelligent life on another plant tomorrow.
680 
681 	   But generally, we should (RFC lies!) to accept ACK
682 	   from SYNACK both here and in tcp_rcv_state_process().
683 	   tcp_rcv_state_process() does not, hence, we do not too.
684 
685 	   Note that the case is absolutely generic:
686 	   we cannot optimize anything here without
687 	   violating protocol. All the checks must be made
688 	   before attempt to create socket.
689 	 */
690 
691 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
692 	 *                  and the incoming segment acknowledges something not yet
693 	 *                  sent (the segment carries an unacceptable ACK) ...
694 	 *                  a reset is sent."
695 	 *
696 	 * Invalid ACK: reset will be sent by listening socket.
697 	 * Note that the ACK validity check for a Fast Open socket is done
698 	 * elsewhere and is checked directly against the child socket rather
699 	 * than req because user data may have been sent out.
700 	 */
701 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
702 	    (TCP_SKB_CB(skb)->ack_seq !=
703 	     tcp_rsk(req)->snt_isn + 1))
704 		return sk;
705 
706 	/* Also, it would be not so bad idea to check rcv_tsecr, which
707 	 * is essentially ACK extension and too early or too late values
708 	 * should cause reset in unsynchronized states.
709 	 */
710 
711 	/* RFC793: "first check sequence number". */
712 
713 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
714 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
715 		/* Out of window: send ACK and drop. */
716 		if (!(flg & TCP_FLAG_RST) &&
717 		    !tcp_oow_rate_limited(sock_net(sk), skb,
718 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
719 					  &tcp_rsk(req)->last_oow_ack_time))
720 			req->rsk_ops->send_ack(sk, skb, req);
721 		if (paws_reject)
722 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
723 		return NULL;
724 	}
725 
726 	/* In sequence, PAWS is OK. */
727 
728 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
729 		req->ts_recent = tmp_opt.rcv_tsval;
730 
731 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
732 		/* Truncate SYN, it is out of window starting
733 		   at tcp_rsk(req)->rcv_isn + 1. */
734 		flg &= ~TCP_FLAG_SYN;
735 	}
736 
737 	/* RFC793: "second check the RST bit" and
738 	 *	   "fourth, check the SYN bit"
739 	 */
740 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
741 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
742 		goto embryonic_reset;
743 	}
744 
745 	/* ACK sequence verified above, just make sure ACK is
746 	 * set.  If ACK not set, just silently drop the packet.
747 	 *
748 	 * XXX (TFO) - if we ever allow "data after SYN", the
749 	 * following check needs to be removed.
750 	 */
751 	if (!(flg & TCP_FLAG_ACK))
752 		return NULL;
753 
754 	/* For Fast Open no more processing is needed (sk is the
755 	 * child socket).
756 	 */
757 	if (fastopen)
758 		return sk;
759 
760 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
761 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
762 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
763 		inet_rsk(req)->acked = 1;
764 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
765 		return NULL;
766 	}
767 
768 	/* OK, ACK is valid, create big socket and
769 	 * feed this segment to it. It will repeat all
770 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
771 	 * ESTABLISHED STATE. If it will be dropped after
772 	 * socket is created, wait for troubles.
773 	 */
774 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
775 							 req, &own_req);
776 	if (!child)
777 		goto listen_overflow;
778 
779 	sock_rps_save_rxhash(child, skb);
780 	tcp_synack_rtt_meas(child, req);
781 	return inet_csk_complete_hashdance(sk, child, req, own_req);
782 
783 listen_overflow:
784 	if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
785 		inet_rsk(req)->acked = 1;
786 		return NULL;
787 	}
788 
789 embryonic_reset:
790 	if (!(flg & TCP_FLAG_RST)) {
791 		/* Received a bad SYN pkt - for TFO We try not to reset
792 		 * the local connection unless it's really necessary to
793 		 * avoid becoming vulnerable to outside attack aiming at
794 		 * resetting legit local connections.
795 		 */
796 		req->rsk_ops->send_reset(sk, skb);
797 	} else if (fastopen) { /* received a valid RST pkt */
798 		reqsk_fastopen_remove(sk, req, true);
799 		tcp_reset(sk);
800 	}
801 	if (!fastopen) {
802 		inet_csk_reqsk_queue_drop(sk, req);
803 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
804 	}
805 	return NULL;
806 }
807 EXPORT_SYMBOL(tcp_check_req);
808 
809 /*
810  * Queue segment on the new socket if the new socket is active,
811  * otherwise we just shortcircuit this and continue with
812  * the new socket.
813  *
814  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
815  * when entering. But other states are possible due to a race condition
816  * where after __inet_lookup_established() fails but before the listener
817  * locked is obtained, other packets cause the same connection to
818  * be created.
819  */
820 
821 int tcp_child_process(struct sock *parent, struct sock *child,
822 		      struct sk_buff *skb)
823 {
824 	int ret = 0;
825 	int state = child->sk_state;
826 
827 	/* record NAPI ID of child */
828 	sk_mark_napi_id(child, skb);
829 
830 	tcp_segs_in(tcp_sk(child), skb);
831 	if (!sock_owned_by_user(child)) {
832 		ret = tcp_rcv_state_process(child, skb);
833 		/* Wakeup parent, send SIGIO */
834 		if (state == TCP_SYN_RECV && child->sk_state != state)
835 			parent->sk_data_ready(parent);
836 	} else {
837 		/* Alas, it is possible again, because we do lookup
838 		 * in main socket hash table and lock on listening
839 		 * socket does not protect us more.
840 		 */
841 		__sk_add_backlog(child, skb);
842 	}
843 
844 	bh_unlock_sock(child);
845 	sock_put(child);
846 	return ret;
847 }
848 EXPORT_SYMBOL(tcp_child_process);
849