xref: /linux/net/ipv4/tcp_minisocks.c (revision cd80e7ee47d2fd5c97563c003ff31ce8240ca2d8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Implementation of the Transmission Control Protocol(TCP).
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche, <flla@stud.uni-sb.de>
14  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17  *		Matthew Dillon, <dillon@apollo.west.oic.com>
18  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19  *		Jorge Cwik, <jorge@laser.satlink.net>
20  */
21 
22 #include <net/tcp.h>
23 #include <net/xfrm.h>
24 #include <net/busy_poll.h>
25 #include <net/rstreason.h>
26 
27 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
28 {
29 	if (seq == s_win)
30 		return true;
31 	if (after(end_seq, s_win) && before(seq, e_win))
32 		return true;
33 	return seq == e_win && seq == end_seq;
34 }
35 
36 static enum tcp_tw_status
37 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
38 				  const struct sk_buff *skb, int mib_idx)
39 {
40 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
41 
42 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
43 				  &tcptw->tw_last_oow_ack_time)) {
44 		/* Send ACK. Note, we do not put the bucket,
45 		 * it will be released by caller.
46 		 */
47 		return TCP_TW_ACK;
48 	}
49 
50 	/* We are rate-limiting, so just release the tw sock and drop skb. */
51 	inet_twsk_put(tw);
52 	return TCP_TW_SUCCESS;
53 }
54 
55 static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
56 {
57 #ifdef CONFIG_TCP_AO
58 	struct tcp_ao_info *ao;
59 
60 	ao = rcu_dereference(tcptw->ao_info);
61 	if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
62 		WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
63 #endif
64 	tcptw->tw_rcv_nxt = seq;
65 }
66 
67 /*
68  * * Main purpose of TIME-WAIT state is to close connection gracefully,
69  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
70  *   (and, probably, tail of data) and one or more our ACKs are lost.
71  * * What is TIME-WAIT timeout? It is associated with maximal packet
72  *   lifetime in the internet, which results in wrong conclusion, that
73  *   it is set to catch "old duplicate segments" wandering out of their path.
74  *   It is not quite correct. This timeout is calculated so that it exceeds
75  *   maximal retransmission timeout enough to allow to lose one (or more)
76  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
77  * * When TIME-WAIT socket receives RST, it means that another end
78  *   finally closed and we are allowed to kill TIME-WAIT too.
79  * * Second purpose of TIME-WAIT is catching old duplicate segments.
80  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
81  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
82  * * If we invented some more clever way to catch duplicates
83  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
84  *
85  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
86  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
87  * from the very beginning.
88  *
89  * NOTE. With recycling (and later with fin-wait-2) TW bucket
90  * is _not_ stateless. It means, that strictly speaking we must
91  * spinlock it. I do not want! Well, probability of misbehaviour
92  * is ridiculously low and, seems, we could use some mb() tricks
93  * to avoid misread sequence numbers, states etc.  --ANK
94  *
95  * We don't need to initialize tmp_out.sack_ok as we don't use the results
96  */
97 enum tcp_tw_status
98 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
99 			   const struct tcphdr *th, u32 *tw_isn)
100 {
101 	struct tcp_options_received tmp_opt;
102 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
103 	bool paws_reject = false;
104 
105 	tmp_opt.saw_tstamp = 0;
106 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
107 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
108 
109 		if (tmp_opt.saw_tstamp) {
110 			if (tmp_opt.rcv_tsecr)
111 				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
112 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
113 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
114 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
115 		}
116 	}
117 
118 	if (tw->tw_substate == TCP_FIN_WAIT2) {
119 		/* Just repeat all the checks of tcp_rcv_state_process() */
120 
121 		/* Out of window, send ACK */
122 		if (paws_reject ||
123 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
124 				   tcptw->tw_rcv_nxt,
125 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
126 			return tcp_timewait_check_oow_rate_limit(
127 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
128 
129 		if (th->rst)
130 			goto kill;
131 
132 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
133 			return TCP_TW_RST;
134 
135 		/* Dup ACK? */
136 		if (!th->ack ||
137 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
138 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
139 			inet_twsk_put(tw);
140 			return TCP_TW_SUCCESS;
141 		}
142 
143 		/* New data or FIN. If new data arrive after half-duplex close,
144 		 * reset.
145 		 */
146 		if (!th->fin ||
147 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
148 			return TCP_TW_RST;
149 
150 		/* FIN arrived, enter true time-wait state. */
151 		tw->tw_substate	  = TCP_TIME_WAIT;
152 		twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
153 
154 		if (tmp_opt.saw_tstamp) {
155 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
156 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
157 		}
158 
159 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
160 		return TCP_TW_ACK;
161 	}
162 
163 	/*
164 	 *	Now real TIME-WAIT state.
165 	 *
166 	 *	RFC 1122:
167 	 *	"When a connection is [...] on TIME-WAIT state [...]
168 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
169 	 *	reopen the connection directly, if it:
170 	 *
171 	 *	(1)  assigns its initial sequence number for the new
172 	 *	connection to be larger than the largest sequence
173 	 *	number it used on the previous connection incarnation,
174 	 *	and
175 	 *
176 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
177 	 *	to be an old duplicate".
178 	 */
179 
180 	if (!paws_reject &&
181 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
182 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
183 		/* In window segment, it may be only reset or bare ack. */
184 
185 		if (th->rst) {
186 			/* This is TIME_WAIT assassination, in two flavors.
187 			 * Oh well... nobody has a sufficient solution to this
188 			 * protocol bug yet.
189 			 */
190 			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
191 kill:
192 				inet_twsk_deschedule_put(tw);
193 				return TCP_TW_SUCCESS;
194 			}
195 		} else {
196 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
197 		}
198 
199 		if (tmp_opt.saw_tstamp) {
200 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
201 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
202 		}
203 
204 		inet_twsk_put(tw);
205 		return TCP_TW_SUCCESS;
206 	}
207 
208 	/* Out of window segment.
209 
210 	   All the segments are ACKed immediately.
211 
212 	   The only exception is new SYN. We accept it, if it is
213 	   not old duplicate and we are not in danger to be killed
214 	   by delayed old duplicates. RFC check is that it has
215 	   newer sequence number works at rates <40Mbit/sec.
216 	   However, if paws works, it is reliable AND even more,
217 	   we even may relax silly seq space cutoff.
218 
219 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
220 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
221 	   we must return socket to time-wait state. It is not good,
222 	   but not fatal yet.
223 	 */
224 
225 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
226 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
227 	     (tmp_opt.saw_tstamp &&
228 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
229 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
230 		if (isn == 0)
231 			isn++;
232 		*tw_isn = isn;
233 		return TCP_TW_SYN;
234 	}
235 
236 	if (paws_reject)
237 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
238 
239 	if (!th->rst) {
240 		/* In this case we must reset the TIMEWAIT timer.
241 		 *
242 		 * If it is ACKless SYN it may be both old duplicate
243 		 * and new good SYN with random sequence number <rcv_nxt.
244 		 * Do not reschedule in the last case.
245 		 */
246 		if (paws_reject || th->ack)
247 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
248 
249 		return tcp_timewait_check_oow_rate_limit(
250 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
251 	}
252 	inet_twsk_put(tw);
253 	return TCP_TW_SUCCESS;
254 }
255 EXPORT_SYMBOL(tcp_timewait_state_process);
256 
257 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
258 {
259 #ifdef CONFIG_TCP_MD5SIG
260 	const struct tcp_sock *tp = tcp_sk(sk);
261 	struct tcp_md5sig_key *key;
262 
263 	/*
264 	 * The timewait bucket does not have the key DB from the
265 	 * sock structure. We just make a quick copy of the
266 	 * md5 key being used (if indeed we are using one)
267 	 * so the timewait ack generating code has the key.
268 	 */
269 	tcptw->tw_md5_key = NULL;
270 	if (!static_branch_unlikely(&tcp_md5_needed.key))
271 		return;
272 
273 	key = tp->af_specific->md5_lookup(sk, sk);
274 	if (key) {
275 		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
276 		if (!tcptw->tw_md5_key)
277 			return;
278 		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
279 			goto out_free;
280 		tcp_md5_add_sigpool();
281 	}
282 	return;
283 out_free:
284 	WARN_ON_ONCE(1);
285 	kfree(tcptw->tw_md5_key);
286 	tcptw->tw_md5_key = NULL;
287 #endif
288 }
289 
290 /*
291  * Move a socket to time-wait or dead fin-wait-2 state.
292  */
293 void tcp_time_wait(struct sock *sk, int state, int timeo)
294 {
295 	const struct inet_connection_sock *icsk = inet_csk(sk);
296 	struct tcp_sock *tp = tcp_sk(sk);
297 	struct net *net = sock_net(sk);
298 	struct inet_timewait_sock *tw;
299 
300 	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
301 
302 	if (tw) {
303 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
304 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
305 
306 		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
307 		tw->tw_mark		= sk->sk_mark;
308 		tw->tw_priority		= READ_ONCE(sk->sk_priority);
309 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
310 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
311 		tcptw->tw_snd_nxt	= tp->snd_nxt;
312 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
313 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
314 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
315 		tcptw->tw_ts_offset	= tp->tsoffset;
316 		tw->tw_usec_ts		= tp->tcp_usec_ts;
317 		tcptw->tw_last_oow_ack_time = 0;
318 		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
319 		tw->tw_txhash		= sk->sk_txhash;
320 #if IS_ENABLED(CONFIG_IPV6)
321 		if (tw->tw_family == PF_INET6) {
322 			struct ipv6_pinfo *np = inet6_sk(sk);
323 
324 			tw->tw_v6_daddr = sk->sk_v6_daddr;
325 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
326 			tw->tw_tclass = np->tclass;
327 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
328 			tw->tw_ipv6only = sk->sk_ipv6only;
329 		}
330 #endif
331 
332 		tcp_time_wait_init(sk, tcptw);
333 		tcp_ao_time_wait(tcptw, tp);
334 
335 		/* Get the TIME_WAIT timeout firing. */
336 		if (timeo < rto)
337 			timeo = rto;
338 
339 		if (state == TCP_TIME_WAIT)
340 			timeo = TCP_TIMEWAIT_LEN;
341 
342 		/* tw_timer is pinned, so we need to make sure BH are disabled
343 		 * in following section, otherwise timer handler could run before
344 		 * we complete the initialization.
345 		 */
346 		local_bh_disable();
347 		inet_twsk_schedule(tw, timeo);
348 		/* Linkage updates.
349 		 * Note that access to tw after this point is illegal.
350 		 */
351 		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
352 		local_bh_enable();
353 	} else {
354 		/* Sorry, if we're out of memory, just CLOSE this
355 		 * socket up.  We've got bigger problems than
356 		 * non-graceful socket closings.
357 		 */
358 		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
359 	}
360 
361 	tcp_update_metrics(sk);
362 	tcp_done(sk);
363 }
364 EXPORT_SYMBOL(tcp_time_wait);
365 
366 #ifdef CONFIG_TCP_MD5SIG
367 static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
368 {
369 	struct tcp_md5sig_key *key;
370 
371 	key = container_of(head, struct tcp_md5sig_key, rcu);
372 	kfree(key);
373 	static_branch_slow_dec_deferred(&tcp_md5_needed);
374 	tcp_md5_release_sigpool();
375 }
376 #endif
377 
378 void tcp_twsk_destructor(struct sock *sk)
379 {
380 #ifdef CONFIG_TCP_MD5SIG
381 	if (static_branch_unlikely(&tcp_md5_needed.key)) {
382 		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
383 
384 		if (twsk->tw_md5_key)
385 			call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
386 	}
387 #endif
388 	tcp_ao_destroy_sock(sk, true);
389 }
390 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
391 
392 void tcp_twsk_purge(struct list_head *net_exit_list)
393 {
394 	bool purged_once = false;
395 	struct net *net;
396 
397 	list_for_each_entry(net, net_exit_list, exit_list) {
398 		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
399 			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
400 			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
401 		} else if (!purged_once) {
402 			inet_twsk_purge(&tcp_hashinfo);
403 			purged_once = true;
404 		}
405 	}
406 }
407 
408 /* Warning : This function is called without sk_listener being locked.
409  * Be sure to read socket fields once, as their value could change under us.
410  */
411 void tcp_openreq_init_rwin(struct request_sock *req,
412 			   const struct sock *sk_listener,
413 			   const struct dst_entry *dst)
414 {
415 	struct inet_request_sock *ireq = inet_rsk(req);
416 	const struct tcp_sock *tp = tcp_sk(sk_listener);
417 	int full_space = tcp_full_space(sk_listener);
418 	u32 window_clamp;
419 	__u8 rcv_wscale;
420 	u32 rcv_wnd;
421 	int mss;
422 
423 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
424 	window_clamp = READ_ONCE(tp->window_clamp);
425 	/* Set this up on the first call only */
426 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
427 
428 	/* limit the window selection if the user enforce a smaller rx buffer */
429 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
430 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
431 		req->rsk_window_clamp = full_space;
432 
433 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
434 	if (rcv_wnd == 0)
435 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
436 	else if (full_space < rcv_wnd * mss)
437 		full_space = rcv_wnd * mss;
438 
439 	/* tcp_full_space because it is guaranteed to be the first packet */
440 	tcp_select_initial_window(sk_listener, full_space,
441 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
442 		&req->rsk_rcv_wnd,
443 		&req->rsk_window_clamp,
444 		ireq->wscale_ok,
445 		&rcv_wscale,
446 		rcv_wnd);
447 	ireq->rcv_wscale = rcv_wscale;
448 }
449 EXPORT_SYMBOL(tcp_openreq_init_rwin);
450 
451 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
452 				  const struct request_sock *req)
453 {
454 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
455 }
456 
457 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
458 {
459 	struct inet_connection_sock *icsk = inet_csk(sk);
460 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
461 	bool ca_got_dst = false;
462 
463 	if (ca_key != TCP_CA_UNSPEC) {
464 		const struct tcp_congestion_ops *ca;
465 
466 		rcu_read_lock();
467 		ca = tcp_ca_find_key(ca_key);
468 		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
469 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
470 			icsk->icsk_ca_ops = ca;
471 			ca_got_dst = true;
472 		}
473 		rcu_read_unlock();
474 	}
475 
476 	/* If no valid choice made yet, assign current system default ca. */
477 	if (!ca_got_dst &&
478 	    (!icsk->icsk_ca_setsockopt ||
479 	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
480 		tcp_assign_congestion_control(sk);
481 
482 	tcp_set_ca_state(sk, TCP_CA_Open);
483 }
484 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
485 
486 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
487 				    struct request_sock *req,
488 				    struct tcp_sock *newtp)
489 {
490 #if IS_ENABLED(CONFIG_SMC)
491 	struct inet_request_sock *ireq;
492 
493 	if (static_branch_unlikely(&tcp_have_smc)) {
494 		ireq = inet_rsk(req);
495 		if (oldtp->syn_smc && !ireq->smc_ok)
496 			newtp->syn_smc = 0;
497 	}
498 #endif
499 }
500 
501 /* This is not only more efficient than what we used to do, it eliminates
502  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
503  *
504  * Actually, we could lots of memory writes here. tp of listening
505  * socket contains all necessary default parameters.
506  */
507 struct sock *tcp_create_openreq_child(const struct sock *sk,
508 				      struct request_sock *req,
509 				      struct sk_buff *skb)
510 {
511 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
512 	const struct inet_request_sock *ireq = inet_rsk(req);
513 	struct tcp_request_sock *treq = tcp_rsk(req);
514 	struct inet_connection_sock *newicsk;
515 	const struct tcp_sock *oldtp;
516 	struct tcp_sock *newtp;
517 	u32 seq;
518 #ifdef CONFIG_TCP_AO
519 	struct tcp_ao_key *ao_key;
520 #endif
521 
522 	if (!newsk)
523 		return NULL;
524 
525 	newicsk = inet_csk(newsk);
526 	newtp = tcp_sk(newsk);
527 	oldtp = tcp_sk(sk);
528 
529 	smc_check_reset_syn_req(oldtp, req, newtp);
530 
531 	/* Now setup tcp_sock */
532 	newtp->pred_flags = 0;
533 
534 	seq = treq->rcv_isn + 1;
535 	newtp->rcv_wup = seq;
536 	WRITE_ONCE(newtp->copied_seq, seq);
537 	WRITE_ONCE(newtp->rcv_nxt, seq);
538 	newtp->segs_in = 1;
539 
540 	seq = treq->snt_isn + 1;
541 	newtp->snd_sml = newtp->snd_una = seq;
542 	WRITE_ONCE(newtp->snd_nxt, seq);
543 	newtp->snd_up = seq;
544 
545 	INIT_LIST_HEAD(&newtp->tsq_node);
546 	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
547 
548 	tcp_init_wl(newtp, treq->rcv_isn);
549 
550 	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
551 	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
552 
553 	newtp->lsndtime = tcp_jiffies32;
554 	newsk->sk_txhash = READ_ONCE(treq->txhash);
555 	newtp->total_retrans = req->num_retrans;
556 
557 	tcp_init_xmit_timers(newsk);
558 	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
559 
560 	if (sock_flag(newsk, SOCK_KEEPOPEN))
561 		inet_csk_reset_keepalive_timer(newsk,
562 					       keepalive_time_when(newtp));
563 
564 	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
565 	newtp->rx_opt.sack_ok = ireq->sack_ok;
566 	newtp->window_clamp = req->rsk_window_clamp;
567 	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
568 	newtp->rcv_wnd = req->rsk_rcv_wnd;
569 	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
570 	if (newtp->rx_opt.wscale_ok) {
571 		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
572 		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
573 	} else {
574 		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
575 		newtp->window_clamp = min(newtp->window_clamp, 65535U);
576 	}
577 	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
578 	newtp->max_window = newtp->snd_wnd;
579 
580 	if (newtp->rx_opt.tstamp_ok) {
581 		newtp->tcp_usec_ts = treq->req_usec_ts;
582 		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
583 		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
584 		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
585 	} else {
586 		newtp->tcp_usec_ts = 0;
587 		newtp->rx_opt.ts_recent_stamp = 0;
588 		newtp->tcp_header_len = sizeof(struct tcphdr);
589 	}
590 	if (req->num_timeout) {
591 		newtp->total_rto = req->num_timeout;
592 		newtp->undo_marker = treq->snt_isn;
593 		if (newtp->tcp_usec_ts) {
594 			newtp->retrans_stamp = treq->snt_synack;
595 			newtp->total_rto_time = (u32)(tcp_clock_us() -
596 						      newtp->retrans_stamp) / USEC_PER_MSEC;
597 		} else {
598 			newtp->retrans_stamp = div_u64(treq->snt_synack,
599 						       USEC_PER_SEC / TCP_TS_HZ);
600 			newtp->total_rto_time = tcp_clock_ms() -
601 						newtp->retrans_stamp;
602 		}
603 		newtp->total_rto_recoveries = 1;
604 	}
605 	newtp->tsoffset = treq->ts_off;
606 #ifdef CONFIG_TCP_MD5SIG
607 	newtp->md5sig_info = NULL;	/*XXX*/
608 #endif
609 #ifdef CONFIG_TCP_AO
610 	newtp->ao_info = NULL;
611 	ao_key = treq->af_specific->ao_lookup(sk, req,
612 				tcp_rsk(req)->ao_keyid, -1);
613 	if (ao_key)
614 		newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
615  #endif
616 	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
617 		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
618 	newtp->rx_opt.mss_clamp = req->mss;
619 	tcp_ecn_openreq_child(newtp, req);
620 	newtp->fastopen_req = NULL;
621 	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
622 
623 	newtp->bpf_chg_cc_inprogress = 0;
624 	tcp_bpf_clone(sk, newsk);
625 
626 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
627 
628 	return newsk;
629 }
630 EXPORT_SYMBOL(tcp_create_openreq_child);
631 
632 /*
633  * Process an incoming packet for SYN_RECV sockets represented as a
634  * request_sock. Normally sk is the listener socket but for TFO it
635  * points to the child socket.
636  *
637  * XXX (TFO) - The current impl contains a special check for ack
638  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
639  *
640  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
641  *
642  * Note: If @fastopen is true, this can be called from process context.
643  *       Otherwise, this is from BH context.
644  */
645 
646 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
647 			   struct request_sock *req,
648 			   bool fastopen, bool *req_stolen)
649 {
650 	struct tcp_options_received tmp_opt;
651 	struct sock *child;
652 	const struct tcphdr *th = tcp_hdr(skb);
653 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
654 	bool paws_reject = false;
655 	bool own_req;
656 
657 	tmp_opt.saw_tstamp = 0;
658 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
659 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
660 
661 		if (tmp_opt.saw_tstamp) {
662 			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
663 			if (tmp_opt.rcv_tsecr)
664 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
665 			/* We do not store true stamp, but it is not required,
666 			 * it can be estimated (approximately)
667 			 * from another data.
668 			 */
669 			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
670 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
671 		}
672 	}
673 
674 	/* Check for pure retransmitted SYN. */
675 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
676 	    flg == TCP_FLAG_SYN &&
677 	    !paws_reject) {
678 		/*
679 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
680 		 * this case on figure 6 and figure 8, but formal
681 		 * protocol description says NOTHING.
682 		 * To be more exact, it says that we should send ACK,
683 		 * because this segment (at least, if it has no data)
684 		 * is out of window.
685 		 *
686 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
687 		 *  describe SYN-RECV state. All the description
688 		 *  is wrong, we cannot believe to it and should
689 		 *  rely only on common sense and implementation
690 		 *  experience.
691 		 *
692 		 * Enforce "SYN-ACK" according to figure 8, figure 6
693 		 * of RFC793, fixed by RFC1122.
694 		 *
695 		 * Note that even if there is new data in the SYN packet
696 		 * they will be thrown away too.
697 		 *
698 		 * Reset timer after retransmitting SYNACK, similar to
699 		 * the idea of fast retransmit in recovery.
700 		 */
701 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
702 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
703 					  &tcp_rsk(req)->last_oow_ack_time) &&
704 
705 		    !inet_rtx_syn_ack(sk, req)) {
706 			unsigned long expires = jiffies;
707 
708 			expires += reqsk_timeout(req, TCP_RTO_MAX);
709 			if (!fastopen)
710 				mod_timer_pending(&req->rsk_timer, expires);
711 			else
712 				req->rsk_timer.expires = expires;
713 		}
714 		return NULL;
715 	}
716 
717 	/* Further reproduces section "SEGMENT ARRIVES"
718 	   for state SYN-RECEIVED of RFC793.
719 	   It is broken, however, it does not work only
720 	   when SYNs are crossed.
721 
722 	   You would think that SYN crossing is impossible here, since
723 	   we should have a SYN_SENT socket (from connect()) on our end,
724 	   but this is not true if the crossed SYNs were sent to both
725 	   ends by a malicious third party.  We must defend against this,
726 	   and to do that we first verify the ACK (as per RFC793, page
727 	   36) and reset if it is invalid.  Is this a true full defense?
728 	   To convince ourselves, let us consider a way in which the ACK
729 	   test can still pass in this 'malicious crossed SYNs' case.
730 	   Malicious sender sends identical SYNs (and thus identical sequence
731 	   numbers) to both A and B:
732 
733 		A: gets SYN, seq=7
734 		B: gets SYN, seq=7
735 
736 	   By our good fortune, both A and B select the same initial
737 	   send sequence number of seven :-)
738 
739 		A: sends SYN|ACK, seq=7, ack_seq=8
740 		B: sends SYN|ACK, seq=7, ack_seq=8
741 
742 	   So we are now A eating this SYN|ACK, ACK test passes.  So
743 	   does sequence test, SYN is truncated, and thus we consider
744 	   it a bare ACK.
745 
746 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
747 	   bare ACK.  Otherwise, we create an established connection.  Both
748 	   ends (listening sockets) accept the new incoming connection and try
749 	   to talk to each other. 8-)
750 
751 	   Note: This case is both harmless, and rare.  Possibility is about the
752 	   same as us discovering intelligent life on another plant tomorrow.
753 
754 	   But generally, we should (RFC lies!) to accept ACK
755 	   from SYNACK both here and in tcp_rcv_state_process().
756 	   tcp_rcv_state_process() does not, hence, we do not too.
757 
758 	   Note that the case is absolutely generic:
759 	   we cannot optimize anything here without
760 	   violating protocol. All the checks must be made
761 	   before attempt to create socket.
762 	 */
763 
764 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
765 	 *                  and the incoming segment acknowledges something not yet
766 	 *                  sent (the segment carries an unacceptable ACK) ...
767 	 *                  a reset is sent."
768 	 *
769 	 * Invalid ACK: reset will be sent by listening socket.
770 	 * Note that the ACK validity check for a Fast Open socket is done
771 	 * elsewhere and is checked directly against the child socket rather
772 	 * than req because user data may have been sent out.
773 	 */
774 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
775 	    (TCP_SKB_CB(skb)->ack_seq !=
776 	     tcp_rsk(req)->snt_isn + 1))
777 		return sk;
778 
779 	/* Also, it would be not so bad idea to check rcv_tsecr, which
780 	 * is essentially ACK extension and too early or too late values
781 	 * should cause reset in unsynchronized states.
782 	 */
783 
784 	/* RFC793: "first check sequence number". */
785 
786 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
787 					  TCP_SKB_CB(skb)->end_seq,
788 					  tcp_rsk(req)->rcv_nxt,
789 					  tcp_rsk(req)->rcv_nxt +
790 					  tcp_synack_window(req))) {
791 		/* Out of window: send ACK and drop. */
792 		if (!(flg & TCP_FLAG_RST) &&
793 		    !tcp_oow_rate_limited(sock_net(sk), skb,
794 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
795 					  &tcp_rsk(req)->last_oow_ack_time))
796 			req->rsk_ops->send_ack(sk, skb, req);
797 		if (paws_reject)
798 			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
799 		return NULL;
800 	}
801 
802 	/* In sequence, PAWS is OK. */
803 
804 	/* TODO: We probably should defer ts_recent change once
805 	 * we take ownership of @req.
806 	 */
807 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
808 		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
809 
810 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
811 		/* Truncate SYN, it is out of window starting
812 		   at tcp_rsk(req)->rcv_isn + 1. */
813 		flg &= ~TCP_FLAG_SYN;
814 	}
815 
816 	/* RFC793: "second check the RST bit" and
817 	 *	   "fourth, check the SYN bit"
818 	 */
819 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
820 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
821 		goto embryonic_reset;
822 	}
823 
824 	/* ACK sequence verified above, just make sure ACK is
825 	 * set.  If ACK not set, just silently drop the packet.
826 	 *
827 	 * XXX (TFO) - if we ever allow "data after SYN", the
828 	 * following check needs to be removed.
829 	 */
830 	if (!(flg & TCP_FLAG_ACK))
831 		return NULL;
832 
833 	/* For Fast Open no more processing is needed (sk is the
834 	 * child socket).
835 	 */
836 	if (fastopen)
837 		return sk;
838 
839 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
840 	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
841 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
842 		inet_rsk(req)->acked = 1;
843 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
844 		return NULL;
845 	}
846 
847 	/* OK, ACK is valid, create big socket and
848 	 * feed this segment to it. It will repeat all
849 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
850 	 * ESTABLISHED STATE. If it will be dropped after
851 	 * socket is created, wait for troubles.
852 	 */
853 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
854 							 req, &own_req);
855 	if (!child)
856 		goto listen_overflow;
857 
858 	if (own_req && rsk_drop_req(req)) {
859 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
860 		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
861 		return child;
862 	}
863 
864 	sock_rps_save_rxhash(child, skb);
865 	tcp_synack_rtt_meas(child, req);
866 	*req_stolen = !own_req;
867 	return inet_csk_complete_hashdance(sk, child, req, own_req);
868 
869 listen_overflow:
870 	if (sk != req->rsk_listener)
871 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
872 
873 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
874 		inet_rsk(req)->acked = 1;
875 		return NULL;
876 	}
877 
878 embryonic_reset:
879 	if (!(flg & TCP_FLAG_RST)) {
880 		/* Received a bad SYN pkt - for TFO We try not to reset
881 		 * the local connection unless it's really necessary to
882 		 * avoid becoming vulnerable to outside attack aiming at
883 		 * resetting legit local connections.
884 		 */
885 		req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
886 	} else if (fastopen) { /* received a valid RST pkt */
887 		reqsk_fastopen_remove(sk, req, true);
888 		tcp_reset(sk, skb);
889 	}
890 	if (!fastopen) {
891 		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
892 
893 		if (unlinked)
894 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
895 		*req_stolen = !unlinked;
896 	}
897 	return NULL;
898 }
899 EXPORT_SYMBOL(tcp_check_req);
900 
901 /*
902  * Queue segment on the new socket if the new socket is active,
903  * otherwise we just shortcircuit this and continue with
904  * the new socket.
905  *
906  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
907  * when entering. But other states are possible due to a race condition
908  * where after __inet_lookup_established() fails but before the listener
909  * locked is obtained, other packets cause the same connection to
910  * be created.
911  */
912 
913 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
914 				       struct sk_buff *skb)
915 	__releases(&((child)->sk_lock.slock))
916 {
917 	enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
918 	int state = child->sk_state;
919 
920 	/* record sk_napi_id and sk_rx_queue_mapping of child. */
921 	sk_mark_napi_id_set(child, skb);
922 
923 	tcp_segs_in(tcp_sk(child), skb);
924 	if (!sock_owned_by_user(child)) {
925 		reason = tcp_rcv_state_process(child, skb);
926 		/* Wakeup parent, send SIGIO */
927 		if (state == TCP_SYN_RECV && child->sk_state != state)
928 			parent->sk_data_ready(parent);
929 	} else {
930 		/* Alas, it is possible again, because we do lookup
931 		 * in main socket hash table and lock on listening
932 		 * socket does not protect us more.
933 		 */
934 		__sk_add_backlog(child, skb);
935 	}
936 
937 	bh_unlock_sock(child);
938 	sock_put(child);
939 	return reason;
940 }
941 EXPORT_SYMBOL(tcp_child_process);
942