xref: /linux/net/ipv4/tcp_ipv4.c (revision ead751507de86d90fa250431e9990a8b881f713c)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77 
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
84 
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
87 
88 #ifdef CONFIG_TCP_MD5SIG
89 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
90 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
91 #endif
92 
93 struct inet_hashinfo tcp_hashinfo;
94 EXPORT_SYMBOL(tcp_hashinfo);
95 
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
97 {
98 	return secure_tcp_seq(ip_hdr(skb)->daddr,
99 			      ip_hdr(skb)->saddr,
100 			      tcp_hdr(skb)->dest,
101 			      tcp_hdr(skb)->source);
102 }
103 
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
105 {
106 	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
107 }
108 
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 	struct tcp_sock *tp = tcp_sk(sk);
113 
114 	/* With PAWS, it is safe from the viewpoint
115 	   of data integrity. Even without PAWS it is safe provided sequence
116 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 
118 	   Actually, the idea is close to VJ's one, only timestamp cache is
119 	   held not per host, but per port pair and TW bucket is used as state
120 	   holder.
121 
122 	   If TW bucket has been already destroyed we fall back to VJ's scheme
123 	   and use initial timestamp retrieved from peer table.
124 	 */
125 	if (tcptw->tw_ts_recent_stamp &&
126 	    (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
127 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 		if (tp->write_seq == 0)
130 			tp->write_seq = 1;
131 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
132 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 		sock_hold(sktw);
134 		return 1;
135 	}
136 
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143 {
144 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 	struct inet_sock *inet = inet_sk(sk);
146 	struct tcp_sock *tp = tcp_sk(sk);
147 	__be16 orig_sport, orig_dport;
148 	__be32 daddr, nexthop;
149 	struct flowi4 *fl4;
150 	struct rtable *rt;
151 	int err;
152 	struct ip_options_rcu *inet_opt;
153 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
154 
155 	if (addr_len < sizeof(struct sockaddr_in))
156 		return -EINVAL;
157 
158 	if (usin->sin_family != AF_INET)
159 		return -EAFNOSUPPORT;
160 
161 	nexthop = daddr = usin->sin_addr.s_addr;
162 	inet_opt = rcu_dereference_protected(inet->inet_opt,
163 					     lockdep_sock_is_held(sk));
164 	if (inet_opt && inet_opt->opt.srr) {
165 		if (!daddr)
166 			return -EINVAL;
167 		nexthop = inet_opt->opt.faddr;
168 	}
169 
170 	orig_sport = inet->inet_sport;
171 	orig_dport = usin->sin_port;
172 	fl4 = &inet->cork.fl.u.ip4;
173 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 			      IPPROTO_TCP,
176 			      orig_sport, orig_dport, sk);
177 	if (IS_ERR(rt)) {
178 		err = PTR_ERR(rt);
179 		if (err == -ENETUNREACH)
180 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
181 		return err;
182 	}
183 
184 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185 		ip_rt_put(rt);
186 		return -ENETUNREACH;
187 	}
188 
189 	if (!inet_opt || !inet_opt->opt.srr)
190 		daddr = fl4->daddr;
191 
192 	if (!inet->inet_saddr)
193 		inet->inet_saddr = fl4->saddr;
194 	sk_rcv_saddr_set(sk, inet->inet_saddr);
195 
196 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 		/* Reset inherited state */
198 		tp->rx_opt.ts_recent	   = 0;
199 		tp->rx_opt.ts_recent_stamp = 0;
200 		if (likely(!tp->repair))
201 			tp->write_seq	   = 0;
202 	}
203 
204 	inet->inet_dport = usin->sin_port;
205 	sk_daddr_set(sk, daddr);
206 
207 	inet_csk(sk)->icsk_ext_hdr_len = 0;
208 	if (inet_opt)
209 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
210 
211 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
212 
213 	/* Socket identity is still unknown (sport may be zero).
214 	 * However we set state to SYN-SENT and not releasing socket
215 	 * lock select source port, enter ourselves into the hash tables and
216 	 * complete initialization after this.
217 	 */
218 	tcp_set_state(sk, TCP_SYN_SENT);
219 	err = inet_hash_connect(tcp_death_row, sk);
220 	if (err)
221 		goto failure;
222 
223 	sk_set_txhash(sk);
224 
225 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 			       inet->inet_sport, inet->inet_dport, sk);
227 	if (IS_ERR(rt)) {
228 		err = PTR_ERR(rt);
229 		rt = NULL;
230 		goto failure;
231 	}
232 	/* OK, now commit destination to socket.  */
233 	sk->sk_gso_type = SKB_GSO_TCPV4;
234 	sk_setup_caps(sk, &rt->dst);
235 	rt = NULL;
236 
237 	if (likely(!tp->repair)) {
238 		if (!tp->write_seq)
239 			tp->write_seq = secure_tcp_seq(inet->inet_saddr,
240 						       inet->inet_daddr,
241 						       inet->inet_sport,
242 						       usin->sin_port);
243 		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
244 						 inet->inet_saddr,
245 						 inet->inet_daddr);
246 	}
247 
248 	inet->inet_id = tp->write_seq ^ jiffies;
249 
250 	if (tcp_fastopen_defer_connect(sk, &err))
251 		return err;
252 	if (err)
253 		goto failure;
254 
255 	err = tcp_connect(sk);
256 
257 	if (err)
258 		goto failure;
259 
260 	return 0;
261 
262 failure:
263 	/*
264 	 * This unhashes the socket and releases the local port,
265 	 * if necessary.
266 	 */
267 	tcp_set_state(sk, TCP_CLOSE);
268 	ip_rt_put(rt);
269 	sk->sk_route_caps = 0;
270 	inet->inet_dport = 0;
271 	return err;
272 }
273 EXPORT_SYMBOL(tcp_v4_connect);
274 
275 /*
276  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
277  * It can be called through tcp_release_cb() if socket was owned by user
278  * at the time tcp_v4_err() was called to handle ICMP message.
279  */
280 void tcp_v4_mtu_reduced(struct sock *sk)
281 {
282 	struct inet_sock *inet = inet_sk(sk);
283 	struct dst_entry *dst;
284 	u32 mtu;
285 
286 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
287 		return;
288 	mtu = tcp_sk(sk)->mtu_info;
289 	dst = inet_csk_update_pmtu(sk, mtu);
290 	if (!dst)
291 		return;
292 
293 	/* Something is about to be wrong... Remember soft error
294 	 * for the case, if this connection will not able to recover.
295 	 */
296 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
297 		sk->sk_err_soft = EMSGSIZE;
298 
299 	mtu = dst_mtu(dst);
300 
301 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
302 	    ip_sk_accept_pmtu(sk) &&
303 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 		tcp_sync_mss(sk, mtu);
305 
306 		/* Resend the TCP packet because it's
307 		 * clear that the old packet has been
308 		 * dropped. This is the new "fast" path mtu
309 		 * discovery.
310 		 */
311 		tcp_simple_retransmit(sk);
312 	} /* else let the usual retransmit timer handle it */
313 }
314 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
315 
316 static void do_redirect(struct sk_buff *skb, struct sock *sk)
317 {
318 	struct dst_entry *dst = __sk_dst_check(sk, 0);
319 
320 	if (dst)
321 		dst->ops->redirect(dst, sk, skb);
322 }
323 
324 
325 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
326 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
327 {
328 	struct request_sock *req = inet_reqsk(sk);
329 	struct net *net = sock_net(sk);
330 
331 	/* ICMPs are not backlogged, hence we cannot get
332 	 * an established socket here.
333 	 */
334 	if (seq != tcp_rsk(req)->snt_isn) {
335 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
336 	} else if (abort) {
337 		/*
338 		 * Still in SYN_RECV, just remove it silently.
339 		 * There is no good way to pass the error to the newly
340 		 * created socket, and POSIX does not want network
341 		 * errors returned from accept().
342 		 */
343 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
344 		tcp_listendrop(req->rsk_listener);
345 	}
346 	reqsk_put(req);
347 }
348 EXPORT_SYMBOL(tcp_req_err);
349 
350 /*
351  * This routine is called by the ICMP module when it gets some
352  * sort of error condition.  If err < 0 then the socket should
353  * be closed and the error returned to the user.  If err > 0
354  * it's just the icmp type << 8 | icmp code.  After adjustment
355  * header points to the first 8 bytes of the tcp header.  We need
356  * to find the appropriate port.
357  *
358  * The locking strategy used here is very "optimistic". When
359  * someone else accesses the socket the ICMP is just dropped
360  * and for some paths there is no check at all.
361  * A more general error queue to queue errors for later handling
362  * is probably better.
363  *
364  */
365 
366 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
367 {
368 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
369 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
370 	struct inet_connection_sock *icsk;
371 	struct tcp_sock *tp;
372 	struct inet_sock *inet;
373 	const int type = icmp_hdr(icmp_skb)->type;
374 	const int code = icmp_hdr(icmp_skb)->code;
375 	struct sock *sk;
376 	struct sk_buff *skb;
377 	struct request_sock *fastopen;
378 	u32 seq, snd_una;
379 	s32 remaining;
380 	u32 delta_us;
381 	int err;
382 	struct net *net = dev_net(icmp_skb->dev);
383 
384 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
385 				       th->dest, iph->saddr, ntohs(th->source),
386 				       inet_iif(icmp_skb), 0);
387 	if (!sk) {
388 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
389 		return;
390 	}
391 	if (sk->sk_state == TCP_TIME_WAIT) {
392 		inet_twsk_put(inet_twsk(sk));
393 		return;
394 	}
395 	seq = ntohl(th->seq);
396 	if (sk->sk_state == TCP_NEW_SYN_RECV)
397 		return tcp_req_err(sk, seq,
398 				  type == ICMP_PARAMETERPROB ||
399 				  type == ICMP_TIME_EXCEEDED ||
400 				  (type == ICMP_DEST_UNREACH &&
401 				   (code == ICMP_NET_UNREACH ||
402 				    code == ICMP_HOST_UNREACH)));
403 
404 	bh_lock_sock(sk);
405 	/* If too many ICMPs get dropped on busy
406 	 * servers this needs to be solved differently.
407 	 * We do take care of PMTU discovery (RFC1191) special case :
408 	 * we can receive locally generated ICMP messages while socket is held.
409 	 */
410 	if (sock_owned_by_user(sk)) {
411 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
412 			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
413 	}
414 	if (sk->sk_state == TCP_CLOSE)
415 		goto out;
416 
417 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
418 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
419 		goto out;
420 	}
421 
422 	icsk = inet_csk(sk);
423 	tp = tcp_sk(sk);
424 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
425 	fastopen = tp->fastopen_rsk;
426 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
427 	if (sk->sk_state != TCP_LISTEN &&
428 	    !between(seq, snd_una, tp->snd_nxt)) {
429 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
430 		goto out;
431 	}
432 
433 	switch (type) {
434 	case ICMP_REDIRECT:
435 		if (!sock_owned_by_user(sk))
436 			do_redirect(icmp_skb, sk);
437 		goto out;
438 	case ICMP_SOURCE_QUENCH:
439 		/* Just silently ignore these. */
440 		goto out;
441 	case ICMP_PARAMETERPROB:
442 		err = EPROTO;
443 		break;
444 	case ICMP_DEST_UNREACH:
445 		if (code > NR_ICMP_UNREACH)
446 			goto out;
447 
448 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
449 			/* We are not interested in TCP_LISTEN and open_requests
450 			 * (SYN-ACKs send out by Linux are always <576bytes so
451 			 * they should go through unfragmented).
452 			 */
453 			if (sk->sk_state == TCP_LISTEN)
454 				goto out;
455 
456 			tp->mtu_info = info;
457 			if (!sock_owned_by_user(sk)) {
458 				tcp_v4_mtu_reduced(sk);
459 			} else {
460 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
461 					sock_hold(sk);
462 			}
463 			goto out;
464 		}
465 
466 		err = icmp_err_convert[code].errno;
467 		/* check if icmp_skb allows revert of backoff
468 		 * (see draft-zimmermann-tcp-lcd) */
469 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
470 			break;
471 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
472 		    !icsk->icsk_backoff || fastopen)
473 			break;
474 
475 		if (sock_owned_by_user(sk))
476 			break;
477 
478 		icsk->icsk_backoff--;
479 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
480 					       TCP_TIMEOUT_INIT;
481 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
482 
483 		skb = tcp_write_queue_head(sk);
484 		BUG_ON(!skb);
485 
486 		tcp_mstamp_refresh(tp);
487 		delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
488 		remaining = icsk->icsk_rto -
489 			    usecs_to_jiffies(delta_us);
490 
491 		if (remaining > 0) {
492 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
493 						  remaining, TCP_RTO_MAX);
494 		} else {
495 			/* RTO revert clocked out retransmission.
496 			 * Will retransmit now */
497 			tcp_retransmit_timer(sk);
498 		}
499 
500 		break;
501 	case ICMP_TIME_EXCEEDED:
502 		err = EHOSTUNREACH;
503 		break;
504 	default:
505 		goto out;
506 	}
507 
508 	switch (sk->sk_state) {
509 	case TCP_SYN_SENT:
510 	case TCP_SYN_RECV:
511 		/* Only in fast or simultaneous open. If a fast open socket is
512 		 * is already accepted it is treated as a connected one below.
513 		 */
514 		if (fastopen && !fastopen->sk)
515 			break;
516 
517 		if (!sock_owned_by_user(sk)) {
518 			sk->sk_err = err;
519 
520 			sk->sk_error_report(sk);
521 
522 			tcp_done(sk);
523 		} else {
524 			sk->sk_err_soft = err;
525 		}
526 		goto out;
527 	}
528 
529 	/* If we've already connected we will keep trying
530 	 * until we time out, or the user gives up.
531 	 *
532 	 * rfc1122 4.2.3.9 allows to consider as hard errors
533 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
534 	 * but it is obsoleted by pmtu discovery).
535 	 *
536 	 * Note, that in modern internet, where routing is unreliable
537 	 * and in each dark corner broken firewalls sit, sending random
538 	 * errors ordered by their masters even this two messages finally lose
539 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
540 	 *
541 	 * Now we are in compliance with RFCs.
542 	 *							--ANK (980905)
543 	 */
544 
545 	inet = inet_sk(sk);
546 	if (!sock_owned_by_user(sk) && inet->recverr) {
547 		sk->sk_err = err;
548 		sk->sk_error_report(sk);
549 	} else	{ /* Only an error on timeout */
550 		sk->sk_err_soft = err;
551 	}
552 
553 out:
554 	bh_unlock_sock(sk);
555 	sock_put(sk);
556 }
557 
558 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
559 {
560 	struct tcphdr *th = tcp_hdr(skb);
561 
562 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
563 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
564 		skb->csum_start = skb_transport_header(skb) - skb->head;
565 		skb->csum_offset = offsetof(struct tcphdr, check);
566 	} else {
567 		th->check = tcp_v4_check(skb->len, saddr, daddr,
568 					 csum_partial(th,
569 						      th->doff << 2,
570 						      skb->csum));
571 	}
572 }
573 
574 /* This routine computes an IPv4 TCP checksum. */
575 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
576 {
577 	const struct inet_sock *inet = inet_sk(sk);
578 
579 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
580 }
581 EXPORT_SYMBOL(tcp_v4_send_check);
582 
583 /*
584  *	This routine will send an RST to the other tcp.
585  *
586  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
587  *		      for reset.
588  *	Answer: if a packet caused RST, it is not for a socket
589  *		existing in our system, if it is matched to a socket,
590  *		it is just duplicate segment or bug in other side's TCP.
591  *		So that we build reply only basing on parameters
592  *		arrived with segment.
593  *	Exception: precedence violation. We do not implement it in any case.
594  */
595 
596 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
597 {
598 	const struct tcphdr *th = tcp_hdr(skb);
599 	struct {
600 		struct tcphdr th;
601 #ifdef CONFIG_TCP_MD5SIG
602 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
603 #endif
604 	} rep;
605 	struct ip_reply_arg arg;
606 #ifdef CONFIG_TCP_MD5SIG
607 	struct tcp_md5sig_key *key = NULL;
608 	const __u8 *hash_location = NULL;
609 	unsigned char newhash[16];
610 	int genhash;
611 	struct sock *sk1 = NULL;
612 #endif
613 	struct net *net;
614 
615 	/* Never send a reset in response to a reset. */
616 	if (th->rst)
617 		return;
618 
619 	/* If sk not NULL, it means we did a successful lookup and incoming
620 	 * route had to be correct. prequeue might have dropped our dst.
621 	 */
622 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
623 		return;
624 
625 	/* Swap the send and the receive. */
626 	memset(&rep, 0, sizeof(rep));
627 	rep.th.dest   = th->source;
628 	rep.th.source = th->dest;
629 	rep.th.doff   = sizeof(struct tcphdr) / 4;
630 	rep.th.rst    = 1;
631 
632 	if (th->ack) {
633 		rep.th.seq = th->ack_seq;
634 	} else {
635 		rep.th.ack = 1;
636 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
637 				       skb->len - (th->doff << 2));
638 	}
639 
640 	memset(&arg, 0, sizeof(arg));
641 	arg.iov[0].iov_base = (unsigned char *)&rep;
642 	arg.iov[0].iov_len  = sizeof(rep.th);
643 
644 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
645 #ifdef CONFIG_TCP_MD5SIG
646 	rcu_read_lock();
647 	hash_location = tcp_parse_md5sig_option(th);
648 	if (sk && sk_fullsock(sk)) {
649 		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
650 					&ip_hdr(skb)->saddr, AF_INET);
651 	} else if (hash_location) {
652 		/*
653 		 * active side is lost. Try to find listening socket through
654 		 * source port, and then find md5 key through listening socket.
655 		 * we are not loose security here:
656 		 * Incoming packet is checked with md5 hash with finding key,
657 		 * no RST generated if md5 hash doesn't match.
658 		 */
659 		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
660 					     ip_hdr(skb)->saddr,
661 					     th->source, ip_hdr(skb)->daddr,
662 					     ntohs(th->source), inet_iif(skb),
663 					     tcp_v4_sdif(skb));
664 		/* don't send rst if it can't find key */
665 		if (!sk1)
666 			goto out;
667 
668 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
669 					&ip_hdr(skb)->saddr, AF_INET);
670 		if (!key)
671 			goto out;
672 
673 
674 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
675 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
676 			goto out;
677 
678 	}
679 
680 	if (key) {
681 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
682 				   (TCPOPT_NOP << 16) |
683 				   (TCPOPT_MD5SIG << 8) |
684 				   TCPOLEN_MD5SIG);
685 		/* Update length and the length the header thinks exists */
686 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
687 		rep.th.doff = arg.iov[0].iov_len / 4;
688 
689 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
690 				     key, ip_hdr(skb)->saddr,
691 				     ip_hdr(skb)->daddr, &rep.th);
692 	}
693 #endif
694 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
695 				      ip_hdr(skb)->saddr, /* XXX */
696 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
697 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
698 	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
699 
700 	/* When socket is gone, all binding information is lost.
701 	 * routing might fail in this case. No choice here, if we choose to force
702 	 * input interface, we will misroute in case of asymmetric route.
703 	 */
704 	if (sk)
705 		arg.bound_dev_if = sk->sk_bound_dev_if;
706 
707 	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
708 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
709 
710 	arg.tos = ip_hdr(skb)->tos;
711 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
712 	local_bh_disable();
713 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
714 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
715 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
716 			      &arg, arg.iov[0].iov_len);
717 
718 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
719 	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
720 	local_bh_enable();
721 
722 #ifdef CONFIG_TCP_MD5SIG
723 out:
724 	rcu_read_unlock();
725 #endif
726 }
727 
728 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
729    outside socket context is ugly, certainly. What can I do?
730  */
731 
732 static void tcp_v4_send_ack(const struct sock *sk,
733 			    struct sk_buff *skb, u32 seq, u32 ack,
734 			    u32 win, u32 tsval, u32 tsecr, int oif,
735 			    struct tcp_md5sig_key *key,
736 			    int reply_flags, u8 tos)
737 {
738 	const struct tcphdr *th = tcp_hdr(skb);
739 	struct {
740 		struct tcphdr th;
741 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
742 #ifdef CONFIG_TCP_MD5SIG
743 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
744 #endif
745 			];
746 	} rep;
747 	struct net *net = sock_net(sk);
748 	struct ip_reply_arg arg;
749 
750 	memset(&rep.th, 0, sizeof(struct tcphdr));
751 	memset(&arg, 0, sizeof(arg));
752 
753 	arg.iov[0].iov_base = (unsigned char *)&rep;
754 	arg.iov[0].iov_len  = sizeof(rep.th);
755 	if (tsecr) {
756 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
757 				   (TCPOPT_TIMESTAMP << 8) |
758 				   TCPOLEN_TIMESTAMP);
759 		rep.opt[1] = htonl(tsval);
760 		rep.opt[2] = htonl(tsecr);
761 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
762 	}
763 
764 	/* Swap the send and the receive. */
765 	rep.th.dest    = th->source;
766 	rep.th.source  = th->dest;
767 	rep.th.doff    = arg.iov[0].iov_len / 4;
768 	rep.th.seq     = htonl(seq);
769 	rep.th.ack_seq = htonl(ack);
770 	rep.th.ack     = 1;
771 	rep.th.window  = htons(win);
772 
773 #ifdef CONFIG_TCP_MD5SIG
774 	if (key) {
775 		int offset = (tsecr) ? 3 : 0;
776 
777 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
778 					  (TCPOPT_NOP << 16) |
779 					  (TCPOPT_MD5SIG << 8) |
780 					  TCPOLEN_MD5SIG);
781 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
782 		rep.th.doff = arg.iov[0].iov_len/4;
783 
784 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
785 				    key, ip_hdr(skb)->saddr,
786 				    ip_hdr(skb)->daddr, &rep.th);
787 	}
788 #endif
789 	arg.flags = reply_flags;
790 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
791 				      ip_hdr(skb)->saddr, /* XXX */
792 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
793 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
794 	if (oif)
795 		arg.bound_dev_if = oif;
796 	arg.tos = tos;
797 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
798 	local_bh_disable();
799 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
800 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
801 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
802 			      &arg, arg.iov[0].iov_len);
803 
804 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
805 	local_bh_enable();
806 }
807 
808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
809 {
810 	struct inet_timewait_sock *tw = inet_twsk(sk);
811 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
812 
813 	tcp_v4_send_ack(sk, skb,
814 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
815 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
816 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
817 			tcptw->tw_ts_recent,
818 			tw->tw_bound_dev_if,
819 			tcp_twsk_md5_key(tcptw),
820 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
821 			tw->tw_tos
822 			);
823 
824 	inet_twsk_put(tw);
825 }
826 
827 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
828 				  struct request_sock *req)
829 {
830 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
831 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
832 	 */
833 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
834 					     tcp_sk(sk)->snd_nxt;
835 
836 	/* RFC 7323 2.3
837 	 * The window field (SEG.WND) of every outgoing segment, with the
838 	 * exception of <SYN> segments, MUST be right-shifted by
839 	 * Rcv.Wind.Shift bits:
840 	 */
841 	tcp_v4_send_ack(sk, skb, seq,
842 			tcp_rsk(req)->rcv_nxt,
843 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
844 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
845 			req->ts_recent,
846 			0,
847 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
848 					  AF_INET),
849 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
850 			ip_hdr(skb)->tos);
851 }
852 
853 /*
854  *	Send a SYN-ACK after having received a SYN.
855  *	This still operates on a request_sock only, not on a big
856  *	socket.
857  */
858 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
859 			      struct flowi *fl,
860 			      struct request_sock *req,
861 			      struct tcp_fastopen_cookie *foc,
862 			      enum tcp_synack_type synack_type)
863 {
864 	const struct inet_request_sock *ireq = inet_rsk(req);
865 	struct flowi4 fl4;
866 	int err = -1;
867 	struct sk_buff *skb;
868 
869 	/* First, grab a route. */
870 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
871 		return -1;
872 
873 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
874 
875 	if (skb) {
876 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
877 
878 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 					    ireq->ir_rmt_addr,
880 					    ireq_opt_deref(ireq));
881 		err = net_xmit_eval(err);
882 	}
883 
884 	return err;
885 }
886 
887 /*
888  *	IPv4 request_sock destructor.
889  */
890 static void tcp_v4_reqsk_destructor(struct request_sock *req)
891 {
892 	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
893 }
894 
895 #ifdef CONFIG_TCP_MD5SIG
896 /*
897  * RFC2385 MD5 checksumming requires a mapping of
898  * IP address->MD5 Key.
899  * We need to maintain these in the sk structure.
900  */
901 
902 /* Find the Key structure for an address.  */
903 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
904 					 const union tcp_md5_addr *addr,
905 					 int family)
906 {
907 	const struct tcp_sock *tp = tcp_sk(sk);
908 	struct tcp_md5sig_key *key;
909 	const struct tcp_md5sig_info *md5sig;
910 	__be32 mask;
911 	struct tcp_md5sig_key *best_match = NULL;
912 	bool match;
913 
914 	/* caller either holds rcu_read_lock() or socket lock */
915 	md5sig = rcu_dereference_check(tp->md5sig_info,
916 				       lockdep_sock_is_held(sk));
917 	if (!md5sig)
918 		return NULL;
919 
920 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
921 		if (key->family != family)
922 			continue;
923 
924 		if (family == AF_INET) {
925 			mask = inet_make_mask(key->prefixlen);
926 			match = (key->addr.a4.s_addr & mask) ==
927 				(addr->a4.s_addr & mask);
928 #if IS_ENABLED(CONFIG_IPV6)
929 		} else if (family == AF_INET6) {
930 			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
931 						  key->prefixlen);
932 #endif
933 		} else {
934 			match = false;
935 		}
936 
937 		if (match && (!best_match ||
938 			      key->prefixlen > best_match->prefixlen))
939 			best_match = key;
940 	}
941 	return best_match;
942 }
943 EXPORT_SYMBOL(tcp_md5_do_lookup);
944 
945 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
946 						      const union tcp_md5_addr *addr,
947 						      int family, u8 prefixlen)
948 {
949 	const struct tcp_sock *tp = tcp_sk(sk);
950 	struct tcp_md5sig_key *key;
951 	unsigned int size = sizeof(struct in_addr);
952 	const struct tcp_md5sig_info *md5sig;
953 
954 	/* caller either holds rcu_read_lock() or socket lock */
955 	md5sig = rcu_dereference_check(tp->md5sig_info,
956 				       lockdep_sock_is_held(sk));
957 	if (!md5sig)
958 		return NULL;
959 #if IS_ENABLED(CONFIG_IPV6)
960 	if (family == AF_INET6)
961 		size = sizeof(struct in6_addr);
962 #endif
963 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
964 		if (key->family != family)
965 			continue;
966 		if (!memcmp(&key->addr, addr, size) &&
967 		    key->prefixlen == prefixlen)
968 			return key;
969 	}
970 	return NULL;
971 }
972 
973 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
974 					 const struct sock *addr_sk)
975 {
976 	const union tcp_md5_addr *addr;
977 
978 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
979 	return tcp_md5_do_lookup(sk, addr, AF_INET);
980 }
981 EXPORT_SYMBOL(tcp_v4_md5_lookup);
982 
983 /* This can be called on a newly created socket, from other files */
984 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
985 		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
986 		   gfp_t gfp)
987 {
988 	/* Add Key to the list */
989 	struct tcp_md5sig_key *key;
990 	struct tcp_sock *tp = tcp_sk(sk);
991 	struct tcp_md5sig_info *md5sig;
992 
993 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
994 	if (key) {
995 		/* Pre-existing entry - just update that one. */
996 		memcpy(key->key, newkey, newkeylen);
997 		key->keylen = newkeylen;
998 		return 0;
999 	}
1000 
1001 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1002 					   lockdep_sock_is_held(sk));
1003 	if (!md5sig) {
1004 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1005 		if (!md5sig)
1006 			return -ENOMEM;
1007 
1008 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1009 		INIT_HLIST_HEAD(&md5sig->head);
1010 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1011 	}
1012 
1013 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1014 	if (!key)
1015 		return -ENOMEM;
1016 	if (!tcp_alloc_md5sig_pool()) {
1017 		sock_kfree_s(sk, key, sizeof(*key));
1018 		return -ENOMEM;
1019 	}
1020 
1021 	memcpy(key->key, newkey, newkeylen);
1022 	key->keylen = newkeylen;
1023 	key->family = family;
1024 	key->prefixlen = prefixlen;
1025 	memcpy(&key->addr, addr,
1026 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1027 				      sizeof(struct in_addr));
1028 	hlist_add_head_rcu(&key->node, &md5sig->head);
1029 	return 0;
1030 }
1031 EXPORT_SYMBOL(tcp_md5_do_add);
1032 
1033 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1034 		   u8 prefixlen)
1035 {
1036 	struct tcp_md5sig_key *key;
1037 
1038 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1039 	if (!key)
1040 		return -ENOENT;
1041 	hlist_del_rcu(&key->node);
1042 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1043 	kfree_rcu(key, rcu);
1044 	return 0;
1045 }
1046 EXPORT_SYMBOL(tcp_md5_do_del);
1047 
1048 static void tcp_clear_md5_list(struct sock *sk)
1049 {
1050 	struct tcp_sock *tp = tcp_sk(sk);
1051 	struct tcp_md5sig_key *key;
1052 	struct hlist_node *n;
1053 	struct tcp_md5sig_info *md5sig;
1054 
1055 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1056 
1057 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1058 		hlist_del_rcu(&key->node);
1059 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1060 		kfree_rcu(key, rcu);
1061 	}
1062 }
1063 
1064 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1065 				 char __user *optval, int optlen)
1066 {
1067 	struct tcp_md5sig cmd;
1068 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1069 	u8 prefixlen = 32;
1070 
1071 	if (optlen < sizeof(cmd))
1072 		return -EINVAL;
1073 
1074 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1075 		return -EFAULT;
1076 
1077 	if (sin->sin_family != AF_INET)
1078 		return -EINVAL;
1079 
1080 	if (optname == TCP_MD5SIG_EXT &&
1081 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1082 		prefixlen = cmd.tcpm_prefixlen;
1083 		if (prefixlen > 32)
1084 			return -EINVAL;
1085 	}
1086 
1087 	if (!cmd.tcpm_keylen)
1088 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1089 				      AF_INET, prefixlen);
1090 
1091 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1092 		return -EINVAL;
1093 
1094 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1095 			      AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1096 			      GFP_KERNEL);
1097 }
1098 
1099 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1100 				   __be32 daddr, __be32 saddr,
1101 				   const struct tcphdr *th, int nbytes)
1102 {
1103 	struct tcp4_pseudohdr *bp;
1104 	struct scatterlist sg;
1105 	struct tcphdr *_th;
1106 
1107 	bp = hp->scratch;
1108 	bp->saddr = saddr;
1109 	bp->daddr = daddr;
1110 	bp->pad = 0;
1111 	bp->protocol = IPPROTO_TCP;
1112 	bp->len = cpu_to_be16(nbytes);
1113 
1114 	_th = (struct tcphdr *)(bp + 1);
1115 	memcpy(_th, th, sizeof(*th));
1116 	_th->check = 0;
1117 
1118 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1119 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1120 				sizeof(*bp) + sizeof(*th));
1121 	return crypto_ahash_update(hp->md5_req);
1122 }
1123 
1124 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1125 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1126 {
1127 	struct tcp_md5sig_pool *hp;
1128 	struct ahash_request *req;
1129 
1130 	hp = tcp_get_md5sig_pool();
1131 	if (!hp)
1132 		goto clear_hash_noput;
1133 	req = hp->md5_req;
1134 
1135 	if (crypto_ahash_init(req))
1136 		goto clear_hash;
1137 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1138 		goto clear_hash;
1139 	if (tcp_md5_hash_key(hp, key))
1140 		goto clear_hash;
1141 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1142 	if (crypto_ahash_final(req))
1143 		goto clear_hash;
1144 
1145 	tcp_put_md5sig_pool();
1146 	return 0;
1147 
1148 clear_hash:
1149 	tcp_put_md5sig_pool();
1150 clear_hash_noput:
1151 	memset(md5_hash, 0, 16);
1152 	return 1;
1153 }
1154 
1155 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1156 			const struct sock *sk,
1157 			const struct sk_buff *skb)
1158 {
1159 	struct tcp_md5sig_pool *hp;
1160 	struct ahash_request *req;
1161 	const struct tcphdr *th = tcp_hdr(skb);
1162 	__be32 saddr, daddr;
1163 
1164 	if (sk) { /* valid for establish/request sockets */
1165 		saddr = sk->sk_rcv_saddr;
1166 		daddr = sk->sk_daddr;
1167 	} else {
1168 		const struct iphdr *iph = ip_hdr(skb);
1169 		saddr = iph->saddr;
1170 		daddr = iph->daddr;
1171 	}
1172 
1173 	hp = tcp_get_md5sig_pool();
1174 	if (!hp)
1175 		goto clear_hash_noput;
1176 	req = hp->md5_req;
1177 
1178 	if (crypto_ahash_init(req))
1179 		goto clear_hash;
1180 
1181 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1182 		goto clear_hash;
1183 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 		goto clear_hash;
1185 	if (tcp_md5_hash_key(hp, key))
1186 		goto clear_hash;
1187 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1188 	if (crypto_ahash_final(req))
1189 		goto clear_hash;
1190 
1191 	tcp_put_md5sig_pool();
1192 	return 0;
1193 
1194 clear_hash:
1195 	tcp_put_md5sig_pool();
1196 clear_hash_noput:
1197 	memset(md5_hash, 0, 16);
1198 	return 1;
1199 }
1200 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1201 
1202 #endif
1203 
1204 /* Called with rcu_read_lock() */
1205 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1206 				    const struct sk_buff *skb)
1207 {
1208 #ifdef CONFIG_TCP_MD5SIG
1209 	/*
1210 	 * This gets called for each TCP segment that arrives
1211 	 * so we want to be efficient.
1212 	 * We have 3 drop cases:
1213 	 * o No MD5 hash and one expected.
1214 	 * o MD5 hash and we're not expecting one.
1215 	 * o MD5 hash and its wrong.
1216 	 */
1217 	const __u8 *hash_location = NULL;
1218 	struct tcp_md5sig_key *hash_expected;
1219 	const struct iphdr *iph = ip_hdr(skb);
1220 	const struct tcphdr *th = tcp_hdr(skb);
1221 	int genhash;
1222 	unsigned char newhash[16];
1223 
1224 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1225 					  AF_INET);
1226 	hash_location = tcp_parse_md5sig_option(th);
1227 
1228 	/* We've parsed the options - do we have a hash? */
1229 	if (!hash_expected && !hash_location)
1230 		return false;
1231 
1232 	if (hash_expected && !hash_location) {
1233 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1234 		return true;
1235 	}
1236 
1237 	if (!hash_expected && hash_location) {
1238 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1239 		return true;
1240 	}
1241 
1242 	/* Okay, so this is hash_expected and hash_location -
1243 	 * so we need to calculate the checksum.
1244 	 */
1245 	genhash = tcp_v4_md5_hash_skb(newhash,
1246 				      hash_expected,
1247 				      NULL, skb);
1248 
1249 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1250 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1251 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1252 				     &iph->saddr, ntohs(th->source),
1253 				     &iph->daddr, ntohs(th->dest),
1254 				     genhash ? " tcp_v4_calc_md5_hash failed"
1255 				     : "");
1256 		return true;
1257 	}
1258 	return false;
1259 #endif
1260 	return false;
1261 }
1262 
1263 static void tcp_v4_init_req(struct request_sock *req,
1264 			    const struct sock *sk_listener,
1265 			    struct sk_buff *skb)
1266 {
1267 	struct inet_request_sock *ireq = inet_rsk(req);
1268 	struct net *net = sock_net(sk_listener);
1269 
1270 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1271 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1272 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1273 }
1274 
1275 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1276 					  struct flowi *fl,
1277 					  const struct request_sock *req)
1278 {
1279 	return inet_csk_route_req(sk, &fl->u.ip4, req);
1280 }
1281 
1282 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1283 	.family		=	PF_INET,
1284 	.obj_size	=	sizeof(struct tcp_request_sock),
1285 	.rtx_syn_ack	=	tcp_rtx_synack,
1286 	.send_ack	=	tcp_v4_reqsk_send_ack,
1287 	.destructor	=	tcp_v4_reqsk_destructor,
1288 	.send_reset	=	tcp_v4_send_reset,
1289 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1290 };
1291 
1292 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1293 	.mss_clamp	=	TCP_MSS_DEFAULT,
1294 #ifdef CONFIG_TCP_MD5SIG
1295 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1296 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1297 #endif
1298 	.init_req	=	tcp_v4_init_req,
1299 #ifdef CONFIG_SYN_COOKIES
1300 	.cookie_init_seq =	cookie_v4_init_sequence,
1301 #endif
1302 	.route_req	=	tcp_v4_route_req,
1303 	.init_seq	=	tcp_v4_init_seq,
1304 	.init_ts_off	=	tcp_v4_init_ts_off,
1305 	.send_synack	=	tcp_v4_send_synack,
1306 };
1307 
1308 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1309 {
1310 	/* Never answer to SYNs send to broadcast or multicast */
1311 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1312 		goto drop;
1313 
1314 	return tcp_conn_request(&tcp_request_sock_ops,
1315 				&tcp_request_sock_ipv4_ops, sk, skb);
1316 
1317 drop:
1318 	tcp_listendrop(sk);
1319 	return 0;
1320 }
1321 EXPORT_SYMBOL(tcp_v4_conn_request);
1322 
1323 
1324 /*
1325  * The three way handshake has completed - we got a valid synack -
1326  * now create the new socket.
1327  */
1328 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1329 				  struct request_sock *req,
1330 				  struct dst_entry *dst,
1331 				  struct request_sock *req_unhash,
1332 				  bool *own_req)
1333 {
1334 	struct inet_request_sock *ireq;
1335 	struct inet_sock *newinet;
1336 	struct tcp_sock *newtp;
1337 	struct sock *newsk;
1338 #ifdef CONFIG_TCP_MD5SIG
1339 	struct tcp_md5sig_key *key;
1340 #endif
1341 	struct ip_options_rcu *inet_opt;
1342 
1343 	if (sk_acceptq_is_full(sk))
1344 		goto exit_overflow;
1345 
1346 	newsk = tcp_create_openreq_child(sk, req, skb);
1347 	if (!newsk)
1348 		goto exit_nonewsk;
1349 
1350 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1351 	inet_sk_rx_dst_set(newsk, skb);
1352 
1353 	newtp		      = tcp_sk(newsk);
1354 	newinet		      = inet_sk(newsk);
1355 	ireq		      = inet_rsk(req);
1356 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1357 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1358 	newsk->sk_bound_dev_if = ireq->ir_iif;
1359 	newinet->inet_saddr   = ireq->ir_loc_addr;
1360 	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1361 	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1362 	newinet->mc_index     = inet_iif(skb);
1363 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1364 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1365 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1366 	if (inet_opt)
1367 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1368 	newinet->inet_id = newtp->write_seq ^ jiffies;
1369 
1370 	if (!dst) {
1371 		dst = inet_csk_route_child_sock(sk, newsk, req);
1372 		if (!dst)
1373 			goto put_and_exit;
1374 	} else {
1375 		/* syncookie case : see end of cookie_v4_check() */
1376 	}
1377 	sk_setup_caps(newsk, dst);
1378 
1379 	tcp_ca_openreq_child(newsk, dst);
1380 
1381 	tcp_sync_mss(newsk, dst_mtu(dst));
1382 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1383 
1384 	tcp_initialize_rcv_mss(newsk);
1385 
1386 #ifdef CONFIG_TCP_MD5SIG
1387 	/* Copy over the MD5 key from the original socket */
1388 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1389 				AF_INET);
1390 	if (key) {
1391 		/*
1392 		 * We're using one, so create a matching key
1393 		 * on the newsk structure. If we fail to get
1394 		 * memory, then we end up not copying the key
1395 		 * across. Shucks.
1396 		 */
1397 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1398 			       AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1399 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1400 	}
1401 #endif
1402 
1403 	if (__inet_inherit_port(sk, newsk) < 0)
1404 		goto put_and_exit;
1405 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1406 	if (likely(*own_req)) {
1407 		tcp_move_syn(newtp, req);
1408 		ireq->ireq_opt = NULL;
1409 	} else {
1410 		newinet->inet_opt = NULL;
1411 	}
1412 	return newsk;
1413 
1414 exit_overflow:
1415 	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1416 exit_nonewsk:
1417 	dst_release(dst);
1418 exit:
1419 	tcp_listendrop(sk);
1420 	return NULL;
1421 put_and_exit:
1422 	newinet->inet_opt = NULL;
1423 	inet_csk_prepare_forced_close(newsk);
1424 	tcp_done(newsk);
1425 	goto exit;
1426 }
1427 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1428 
1429 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1430 {
1431 #ifdef CONFIG_SYN_COOKIES
1432 	const struct tcphdr *th = tcp_hdr(skb);
1433 
1434 	if (!th->syn)
1435 		sk = cookie_v4_check(sk, skb);
1436 #endif
1437 	return sk;
1438 }
1439 
1440 /* The socket must have it's spinlock held when we get
1441  * here, unless it is a TCP_LISTEN socket.
1442  *
1443  * We have a potential double-lock case here, so even when
1444  * doing backlog processing we use the BH locking scheme.
1445  * This is because we cannot sleep with the original spinlock
1446  * held.
1447  */
1448 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1449 {
1450 	struct sock *rsk;
1451 
1452 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1453 		struct dst_entry *dst = sk->sk_rx_dst;
1454 
1455 		sock_rps_save_rxhash(sk, skb);
1456 		sk_mark_napi_id(sk, skb);
1457 		if (dst) {
1458 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1459 			    !dst->ops->check(dst, 0)) {
1460 				dst_release(dst);
1461 				sk->sk_rx_dst = NULL;
1462 			}
1463 		}
1464 		tcp_rcv_established(sk, skb, tcp_hdr(skb));
1465 		return 0;
1466 	}
1467 
1468 	if (tcp_checksum_complete(skb))
1469 		goto csum_err;
1470 
1471 	if (sk->sk_state == TCP_LISTEN) {
1472 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1473 
1474 		if (!nsk)
1475 			goto discard;
1476 		if (nsk != sk) {
1477 			if (tcp_child_process(sk, nsk, skb)) {
1478 				rsk = nsk;
1479 				goto reset;
1480 			}
1481 			return 0;
1482 		}
1483 	} else
1484 		sock_rps_save_rxhash(sk, skb);
1485 
1486 	if (tcp_rcv_state_process(sk, skb)) {
1487 		rsk = sk;
1488 		goto reset;
1489 	}
1490 	return 0;
1491 
1492 reset:
1493 	tcp_v4_send_reset(rsk, skb);
1494 discard:
1495 	kfree_skb(skb);
1496 	/* Be careful here. If this function gets more complicated and
1497 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1498 	 * might be destroyed here. This current version compiles correctly,
1499 	 * but you have been warned.
1500 	 */
1501 	return 0;
1502 
1503 csum_err:
1504 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1505 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1506 	goto discard;
1507 }
1508 EXPORT_SYMBOL(tcp_v4_do_rcv);
1509 
1510 int tcp_v4_early_demux(struct sk_buff *skb)
1511 {
1512 	const struct iphdr *iph;
1513 	const struct tcphdr *th;
1514 	struct sock *sk;
1515 
1516 	if (skb->pkt_type != PACKET_HOST)
1517 		return 0;
1518 
1519 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1520 		return 0;
1521 
1522 	iph = ip_hdr(skb);
1523 	th = tcp_hdr(skb);
1524 
1525 	if (th->doff < sizeof(struct tcphdr) / 4)
1526 		return 0;
1527 
1528 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1529 				       iph->saddr, th->source,
1530 				       iph->daddr, ntohs(th->dest),
1531 				       skb->skb_iif, inet_sdif(skb));
1532 	if (sk) {
1533 		skb->sk = sk;
1534 		skb->destructor = sock_edemux;
1535 		if (sk_fullsock(sk)) {
1536 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1537 
1538 			if (dst)
1539 				dst = dst_check(dst, 0);
1540 			if (dst &&
1541 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1542 				skb_dst_set_noref(skb, dst);
1543 		}
1544 	}
1545 	return 0;
1546 }
1547 
1548 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1549 {
1550 	u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1551 
1552 	/* Only socket owner can try to collapse/prune rx queues
1553 	 * to reduce memory overhead, so add a little headroom here.
1554 	 * Few sockets backlog are possibly concurrently non empty.
1555 	 */
1556 	limit += 64*1024;
1557 
1558 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1559 	 * we can fix skb->truesize to its real value to avoid future drops.
1560 	 * This is valid because skb is not yet charged to the socket.
1561 	 * It has been noticed pure SACK packets were sometimes dropped
1562 	 * (if cooked by drivers without copybreak feature).
1563 	 */
1564 	skb_condense(skb);
1565 
1566 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1567 		bh_unlock_sock(sk);
1568 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1569 		return true;
1570 	}
1571 	return false;
1572 }
1573 EXPORT_SYMBOL(tcp_add_backlog);
1574 
1575 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1576 {
1577 	struct tcphdr *th = (struct tcphdr *)skb->data;
1578 	unsigned int eaten = skb->len;
1579 	int err;
1580 
1581 	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1582 	if (!err) {
1583 		eaten -= skb->len;
1584 		TCP_SKB_CB(skb)->end_seq -= eaten;
1585 	}
1586 	return err;
1587 }
1588 EXPORT_SYMBOL(tcp_filter);
1589 
1590 /*
1591  *	From tcp_input.c
1592  */
1593 
1594 int tcp_v4_rcv(struct sk_buff *skb)
1595 {
1596 	struct net *net = dev_net(skb->dev);
1597 	int sdif = inet_sdif(skb);
1598 	const struct iphdr *iph;
1599 	const struct tcphdr *th;
1600 	bool refcounted;
1601 	struct sock *sk;
1602 	int ret;
1603 
1604 	if (skb->pkt_type != PACKET_HOST)
1605 		goto discard_it;
1606 
1607 	/* Count it even if it's bad */
1608 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1609 
1610 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1611 		goto discard_it;
1612 
1613 	th = (const struct tcphdr *)skb->data;
1614 
1615 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1616 		goto bad_packet;
1617 	if (!pskb_may_pull(skb, th->doff * 4))
1618 		goto discard_it;
1619 
1620 	/* An explanation is required here, I think.
1621 	 * Packet length and doff are validated by header prediction,
1622 	 * provided case of th->doff==0 is eliminated.
1623 	 * So, we defer the checks. */
1624 
1625 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1626 		goto csum_error;
1627 
1628 	th = (const struct tcphdr *)skb->data;
1629 	iph = ip_hdr(skb);
1630 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1631 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1632 	 */
1633 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1634 		sizeof(struct inet_skb_parm));
1635 	barrier();
1636 
1637 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1638 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1639 				    skb->len - th->doff * 4);
1640 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1641 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1642 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1643 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1644 	TCP_SKB_CB(skb)->sacked	 = 0;
1645 	TCP_SKB_CB(skb)->has_rxtstamp =
1646 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1647 
1648 lookup:
1649 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1650 			       th->dest, sdif, &refcounted);
1651 	if (!sk)
1652 		goto no_tcp_socket;
1653 
1654 process:
1655 	if (sk->sk_state == TCP_TIME_WAIT)
1656 		goto do_time_wait;
1657 
1658 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1659 		struct request_sock *req = inet_reqsk(sk);
1660 		struct sock *nsk;
1661 
1662 		sk = req->rsk_listener;
1663 		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1664 			sk_drops_add(sk, skb);
1665 			reqsk_put(req);
1666 			goto discard_it;
1667 		}
1668 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1669 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1670 			goto lookup;
1671 		}
1672 		/* We own a reference on the listener, increase it again
1673 		 * as we might lose it too soon.
1674 		 */
1675 		sock_hold(sk);
1676 		refcounted = true;
1677 		nsk = NULL;
1678 		if (!tcp_filter(sk, skb))
1679 			nsk = tcp_check_req(sk, skb, req, false);
1680 		if (!nsk) {
1681 			reqsk_put(req);
1682 			goto discard_and_relse;
1683 		}
1684 		if (nsk == sk) {
1685 			reqsk_put(req);
1686 		} else if (tcp_child_process(sk, nsk, skb)) {
1687 			tcp_v4_send_reset(nsk, skb);
1688 			goto discard_and_relse;
1689 		} else {
1690 			sock_put(sk);
1691 			return 0;
1692 		}
1693 	}
1694 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1695 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1696 		goto discard_and_relse;
1697 	}
1698 
1699 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1700 		goto discard_and_relse;
1701 
1702 	if (tcp_v4_inbound_md5_hash(sk, skb))
1703 		goto discard_and_relse;
1704 
1705 	nf_reset(skb);
1706 
1707 	if (tcp_filter(sk, skb))
1708 		goto discard_and_relse;
1709 	th = (const struct tcphdr *)skb->data;
1710 	iph = ip_hdr(skb);
1711 
1712 	skb->dev = NULL;
1713 
1714 	if (sk->sk_state == TCP_LISTEN) {
1715 		ret = tcp_v4_do_rcv(sk, skb);
1716 		goto put_and_return;
1717 	}
1718 
1719 	sk_incoming_cpu_update(sk);
1720 
1721 	bh_lock_sock_nested(sk);
1722 	tcp_segs_in(tcp_sk(sk), skb);
1723 	ret = 0;
1724 	if (!sock_owned_by_user(sk)) {
1725 		ret = tcp_v4_do_rcv(sk, skb);
1726 	} else if (tcp_add_backlog(sk, skb)) {
1727 		goto discard_and_relse;
1728 	}
1729 	bh_unlock_sock(sk);
1730 
1731 put_and_return:
1732 	if (refcounted)
1733 		sock_put(sk);
1734 
1735 	return ret;
1736 
1737 no_tcp_socket:
1738 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1739 		goto discard_it;
1740 
1741 	if (tcp_checksum_complete(skb)) {
1742 csum_error:
1743 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1744 bad_packet:
1745 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1746 	} else {
1747 		tcp_v4_send_reset(NULL, skb);
1748 	}
1749 
1750 discard_it:
1751 	/* Discard frame. */
1752 	kfree_skb(skb);
1753 	return 0;
1754 
1755 discard_and_relse:
1756 	sk_drops_add(sk, skb);
1757 	if (refcounted)
1758 		sock_put(sk);
1759 	goto discard_it;
1760 
1761 do_time_wait:
1762 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1763 		inet_twsk_put(inet_twsk(sk));
1764 		goto discard_it;
1765 	}
1766 
1767 	if (tcp_checksum_complete(skb)) {
1768 		inet_twsk_put(inet_twsk(sk));
1769 		goto csum_error;
1770 	}
1771 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1772 	case TCP_TW_SYN: {
1773 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1774 							&tcp_hashinfo, skb,
1775 							__tcp_hdrlen(th),
1776 							iph->saddr, th->source,
1777 							iph->daddr, th->dest,
1778 							inet_iif(skb),
1779 							sdif);
1780 		if (sk2) {
1781 			inet_twsk_deschedule_put(inet_twsk(sk));
1782 			sk = sk2;
1783 			refcounted = false;
1784 			goto process;
1785 		}
1786 		/* Fall through to ACK */
1787 	}
1788 	case TCP_TW_ACK:
1789 		tcp_v4_timewait_ack(sk, skb);
1790 		break;
1791 	case TCP_TW_RST:
1792 		tcp_v4_send_reset(sk, skb);
1793 		inet_twsk_deschedule_put(inet_twsk(sk));
1794 		goto discard_it;
1795 	case TCP_TW_SUCCESS:;
1796 	}
1797 	goto discard_it;
1798 }
1799 
1800 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1801 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1802 	.twsk_unique	= tcp_twsk_unique,
1803 	.twsk_destructor= tcp_twsk_destructor,
1804 };
1805 
1806 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1807 {
1808 	struct dst_entry *dst = skb_dst(skb);
1809 
1810 	if (dst && dst_hold_safe(dst)) {
1811 		sk->sk_rx_dst = dst;
1812 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1813 	}
1814 }
1815 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1816 
1817 const struct inet_connection_sock_af_ops ipv4_specific = {
1818 	.queue_xmit	   = ip_queue_xmit,
1819 	.send_check	   = tcp_v4_send_check,
1820 	.rebuild_header	   = inet_sk_rebuild_header,
1821 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1822 	.conn_request	   = tcp_v4_conn_request,
1823 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1824 	.net_header_len	   = sizeof(struct iphdr),
1825 	.setsockopt	   = ip_setsockopt,
1826 	.getsockopt	   = ip_getsockopt,
1827 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1828 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1829 #ifdef CONFIG_COMPAT
1830 	.compat_setsockopt = compat_ip_setsockopt,
1831 	.compat_getsockopt = compat_ip_getsockopt,
1832 #endif
1833 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1834 };
1835 EXPORT_SYMBOL(ipv4_specific);
1836 
1837 #ifdef CONFIG_TCP_MD5SIG
1838 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1839 	.md5_lookup		= tcp_v4_md5_lookup,
1840 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1841 	.md5_parse		= tcp_v4_parse_md5_keys,
1842 };
1843 #endif
1844 
1845 /* NOTE: A lot of things set to zero explicitly by call to
1846  *       sk_alloc() so need not be done here.
1847  */
1848 static int tcp_v4_init_sock(struct sock *sk)
1849 {
1850 	struct inet_connection_sock *icsk = inet_csk(sk);
1851 
1852 	tcp_init_sock(sk);
1853 
1854 	icsk->icsk_af_ops = &ipv4_specific;
1855 
1856 #ifdef CONFIG_TCP_MD5SIG
1857 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1858 #endif
1859 
1860 	return 0;
1861 }
1862 
1863 void tcp_v4_destroy_sock(struct sock *sk)
1864 {
1865 	struct tcp_sock *tp = tcp_sk(sk);
1866 
1867 	tcp_clear_xmit_timers(sk);
1868 
1869 	tcp_cleanup_congestion_control(sk);
1870 
1871 	tcp_cleanup_ulp(sk);
1872 
1873 	/* Cleanup up the write buffer. */
1874 	tcp_write_queue_purge(sk);
1875 
1876 	/* Check if we want to disable active TFO */
1877 	tcp_fastopen_active_disable_ofo_check(sk);
1878 
1879 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1880 	skb_rbtree_purge(&tp->out_of_order_queue);
1881 
1882 #ifdef CONFIG_TCP_MD5SIG
1883 	/* Clean up the MD5 key list, if any */
1884 	if (tp->md5sig_info) {
1885 		tcp_clear_md5_list(sk);
1886 		kfree_rcu(tp->md5sig_info, rcu);
1887 		tp->md5sig_info = NULL;
1888 	}
1889 #endif
1890 
1891 	/* Clean up a referenced TCP bind bucket. */
1892 	if (inet_csk(sk)->icsk_bind_hash)
1893 		inet_put_port(sk);
1894 
1895 	BUG_ON(tp->fastopen_rsk);
1896 
1897 	/* If socket is aborted during connect operation */
1898 	tcp_free_fastopen_req(tp);
1899 	tcp_saved_syn_free(tp);
1900 
1901 	sk_sockets_allocated_dec(sk);
1902 }
1903 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1904 
1905 #ifdef CONFIG_PROC_FS
1906 /* Proc filesystem TCP sock list dumping. */
1907 
1908 /*
1909  * Get next listener socket follow cur.  If cur is NULL, get first socket
1910  * starting from bucket given in st->bucket; when st->bucket is zero the
1911  * very first socket in the hash table is returned.
1912  */
1913 static void *listening_get_next(struct seq_file *seq, void *cur)
1914 {
1915 	struct tcp_iter_state *st = seq->private;
1916 	struct net *net = seq_file_net(seq);
1917 	struct inet_listen_hashbucket *ilb;
1918 	struct sock *sk = cur;
1919 
1920 	if (!sk) {
1921 get_head:
1922 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1923 		spin_lock(&ilb->lock);
1924 		sk = sk_head(&ilb->head);
1925 		st->offset = 0;
1926 		goto get_sk;
1927 	}
1928 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1929 	++st->num;
1930 	++st->offset;
1931 
1932 	sk = sk_next(sk);
1933 get_sk:
1934 	sk_for_each_from(sk) {
1935 		if (!net_eq(sock_net(sk), net))
1936 			continue;
1937 		if (sk->sk_family == st->family)
1938 			return sk;
1939 	}
1940 	spin_unlock(&ilb->lock);
1941 	st->offset = 0;
1942 	if (++st->bucket < INET_LHTABLE_SIZE)
1943 		goto get_head;
1944 	return NULL;
1945 }
1946 
1947 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1948 {
1949 	struct tcp_iter_state *st = seq->private;
1950 	void *rc;
1951 
1952 	st->bucket = 0;
1953 	st->offset = 0;
1954 	rc = listening_get_next(seq, NULL);
1955 
1956 	while (rc && *pos) {
1957 		rc = listening_get_next(seq, rc);
1958 		--*pos;
1959 	}
1960 	return rc;
1961 }
1962 
1963 static inline bool empty_bucket(const struct tcp_iter_state *st)
1964 {
1965 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1966 }
1967 
1968 /*
1969  * Get first established socket starting from bucket given in st->bucket.
1970  * If st->bucket is zero, the very first socket in the hash is returned.
1971  */
1972 static void *established_get_first(struct seq_file *seq)
1973 {
1974 	struct tcp_iter_state *st = seq->private;
1975 	struct net *net = seq_file_net(seq);
1976 	void *rc = NULL;
1977 
1978 	st->offset = 0;
1979 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1980 		struct sock *sk;
1981 		struct hlist_nulls_node *node;
1982 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1983 
1984 		/* Lockless fast path for the common case of empty buckets */
1985 		if (empty_bucket(st))
1986 			continue;
1987 
1988 		spin_lock_bh(lock);
1989 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1990 			if (sk->sk_family != st->family ||
1991 			    !net_eq(sock_net(sk), net)) {
1992 				continue;
1993 			}
1994 			rc = sk;
1995 			goto out;
1996 		}
1997 		spin_unlock_bh(lock);
1998 	}
1999 out:
2000 	return rc;
2001 }
2002 
2003 static void *established_get_next(struct seq_file *seq, void *cur)
2004 {
2005 	struct sock *sk = cur;
2006 	struct hlist_nulls_node *node;
2007 	struct tcp_iter_state *st = seq->private;
2008 	struct net *net = seq_file_net(seq);
2009 
2010 	++st->num;
2011 	++st->offset;
2012 
2013 	sk = sk_nulls_next(sk);
2014 
2015 	sk_nulls_for_each_from(sk, node) {
2016 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2017 			return sk;
2018 	}
2019 
2020 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2021 	++st->bucket;
2022 	return established_get_first(seq);
2023 }
2024 
2025 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2026 {
2027 	struct tcp_iter_state *st = seq->private;
2028 	void *rc;
2029 
2030 	st->bucket = 0;
2031 	rc = established_get_first(seq);
2032 
2033 	while (rc && pos) {
2034 		rc = established_get_next(seq, rc);
2035 		--pos;
2036 	}
2037 	return rc;
2038 }
2039 
2040 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2041 {
2042 	void *rc;
2043 	struct tcp_iter_state *st = seq->private;
2044 
2045 	st->state = TCP_SEQ_STATE_LISTENING;
2046 	rc	  = listening_get_idx(seq, &pos);
2047 
2048 	if (!rc) {
2049 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2050 		rc	  = established_get_idx(seq, pos);
2051 	}
2052 
2053 	return rc;
2054 }
2055 
2056 static void *tcp_seek_last_pos(struct seq_file *seq)
2057 {
2058 	struct tcp_iter_state *st = seq->private;
2059 	int offset = st->offset;
2060 	int orig_num = st->num;
2061 	void *rc = NULL;
2062 
2063 	switch (st->state) {
2064 	case TCP_SEQ_STATE_LISTENING:
2065 		if (st->bucket >= INET_LHTABLE_SIZE)
2066 			break;
2067 		st->state = TCP_SEQ_STATE_LISTENING;
2068 		rc = listening_get_next(seq, NULL);
2069 		while (offset-- && rc)
2070 			rc = listening_get_next(seq, rc);
2071 		if (rc)
2072 			break;
2073 		st->bucket = 0;
2074 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2075 		/* Fallthrough */
2076 	case TCP_SEQ_STATE_ESTABLISHED:
2077 		if (st->bucket > tcp_hashinfo.ehash_mask)
2078 			break;
2079 		rc = established_get_first(seq);
2080 		while (offset-- && rc)
2081 			rc = established_get_next(seq, rc);
2082 	}
2083 
2084 	st->num = orig_num;
2085 
2086 	return rc;
2087 }
2088 
2089 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2090 {
2091 	struct tcp_iter_state *st = seq->private;
2092 	void *rc;
2093 
2094 	if (*pos && *pos == st->last_pos) {
2095 		rc = tcp_seek_last_pos(seq);
2096 		if (rc)
2097 			goto out;
2098 	}
2099 
2100 	st->state = TCP_SEQ_STATE_LISTENING;
2101 	st->num = 0;
2102 	st->bucket = 0;
2103 	st->offset = 0;
2104 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2105 
2106 out:
2107 	st->last_pos = *pos;
2108 	return rc;
2109 }
2110 
2111 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2112 {
2113 	struct tcp_iter_state *st = seq->private;
2114 	void *rc = NULL;
2115 
2116 	if (v == SEQ_START_TOKEN) {
2117 		rc = tcp_get_idx(seq, 0);
2118 		goto out;
2119 	}
2120 
2121 	switch (st->state) {
2122 	case TCP_SEQ_STATE_LISTENING:
2123 		rc = listening_get_next(seq, v);
2124 		if (!rc) {
2125 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2126 			st->bucket = 0;
2127 			st->offset = 0;
2128 			rc	  = established_get_first(seq);
2129 		}
2130 		break;
2131 	case TCP_SEQ_STATE_ESTABLISHED:
2132 		rc = established_get_next(seq, v);
2133 		break;
2134 	}
2135 out:
2136 	++*pos;
2137 	st->last_pos = *pos;
2138 	return rc;
2139 }
2140 
2141 static void tcp_seq_stop(struct seq_file *seq, void *v)
2142 {
2143 	struct tcp_iter_state *st = seq->private;
2144 
2145 	switch (st->state) {
2146 	case TCP_SEQ_STATE_LISTENING:
2147 		if (v != SEQ_START_TOKEN)
2148 			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2149 		break;
2150 	case TCP_SEQ_STATE_ESTABLISHED:
2151 		if (v)
2152 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2153 		break;
2154 	}
2155 }
2156 
2157 int tcp_seq_open(struct inode *inode, struct file *file)
2158 {
2159 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2160 	struct tcp_iter_state *s;
2161 	int err;
2162 
2163 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2164 			  sizeof(struct tcp_iter_state));
2165 	if (err < 0)
2166 		return err;
2167 
2168 	s = ((struct seq_file *)file->private_data)->private;
2169 	s->family		= afinfo->family;
2170 	s->last_pos		= 0;
2171 	return 0;
2172 }
2173 EXPORT_SYMBOL(tcp_seq_open);
2174 
2175 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2176 {
2177 	int rc = 0;
2178 	struct proc_dir_entry *p;
2179 
2180 	afinfo->seq_ops.start		= tcp_seq_start;
2181 	afinfo->seq_ops.next		= tcp_seq_next;
2182 	afinfo->seq_ops.stop		= tcp_seq_stop;
2183 
2184 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2185 			     afinfo->seq_fops, afinfo);
2186 	if (!p)
2187 		rc = -ENOMEM;
2188 	return rc;
2189 }
2190 EXPORT_SYMBOL(tcp_proc_register);
2191 
2192 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2193 {
2194 	remove_proc_entry(afinfo->name, net->proc_net);
2195 }
2196 EXPORT_SYMBOL(tcp_proc_unregister);
2197 
2198 static void get_openreq4(const struct request_sock *req,
2199 			 struct seq_file *f, int i)
2200 {
2201 	const struct inet_request_sock *ireq = inet_rsk(req);
2202 	long delta = req->rsk_timer.expires - jiffies;
2203 
2204 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2205 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2206 		i,
2207 		ireq->ir_loc_addr,
2208 		ireq->ir_num,
2209 		ireq->ir_rmt_addr,
2210 		ntohs(ireq->ir_rmt_port),
2211 		TCP_SYN_RECV,
2212 		0, 0, /* could print option size, but that is af dependent. */
2213 		1,    /* timers active (only the expire timer) */
2214 		jiffies_delta_to_clock_t(delta),
2215 		req->num_timeout,
2216 		from_kuid_munged(seq_user_ns(f),
2217 				 sock_i_uid(req->rsk_listener)),
2218 		0,  /* non standard timer */
2219 		0, /* open_requests have no inode */
2220 		0,
2221 		req);
2222 }
2223 
2224 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2225 {
2226 	int timer_active;
2227 	unsigned long timer_expires;
2228 	const struct tcp_sock *tp = tcp_sk(sk);
2229 	const struct inet_connection_sock *icsk = inet_csk(sk);
2230 	const struct inet_sock *inet = inet_sk(sk);
2231 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2232 	__be32 dest = inet->inet_daddr;
2233 	__be32 src = inet->inet_rcv_saddr;
2234 	__u16 destp = ntohs(inet->inet_dport);
2235 	__u16 srcp = ntohs(inet->inet_sport);
2236 	int rx_queue;
2237 	int state;
2238 
2239 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2240 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2241 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2242 		timer_active	= 1;
2243 		timer_expires	= icsk->icsk_timeout;
2244 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2245 		timer_active	= 4;
2246 		timer_expires	= icsk->icsk_timeout;
2247 	} else if (timer_pending(&sk->sk_timer)) {
2248 		timer_active	= 2;
2249 		timer_expires	= sk->sk_timer.expires;
2250 	} else {
2251 		timer_active	= 0;
2252 		timer_expires = jiffies;
2253 	}
2254 
2255 	state = sk_state_load(sk);
2256 	if (state == TCP_LISTEN)
2257 		rx_queue = sk->sk_ack_backlog;
2258 	else
2259 		/* Because we don't lock the socket,
2260 		 * we might find a transient negative value.
2261 		 */
2262 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2263 
2264 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2265 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2266 		i, src, srcp, dest, destp, state,
2267 		tp->write_seq - tp->snd_una,
2268 		rx_queue,
2269 		timer_active,
2270 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2271 		icsk->icsk_retransmits,
2272 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2273 		icsk->icsk_probes_out,
2274 		sock_i_ino(sk),
2275 		refcount_read(&sk->sk_refcnt), sk,
2276 		jiffies_to_clock_t(icsk->icsk_rto),
2277 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2278 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2279 		tp->snd_cwnd,
2280 		state == TCP_LISTEN ?
2281 		    fastopenq->max_qlen :
2282 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2283 }
2284 
2285 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2286 			       struct seq_file *f, int i)
2287 {
2288 	long delta = tw->tw_timer.expires - jiffies;
2289 	__be32 dest, src;
2290 	__u16 destp, srcp;
2291 
2292 	dest  = tw->tw_daddr;
2293 	src   = tw->tw_rcv_saddr;
2294 	destp = ntohs(tw->tw_dport);
2295 	srcp  = ntohs(tw->tw_sport);
2296 
2297 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2298 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2299 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2300 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2301 		refcount_read(&tw->tw_refcnt), tw);
2302 }
2303 
2304 #define TMPSZ 150
2305 
2306 static int tcp4_seq_show(struct seq_file *seq, void *v)
2307 {
2308 	struct tcp_iter_state *st;
2309 	struct sock *sk = v;
2310 
2311 	seq_setwidth(seq, TMPSZ - 1);
2312 	if (v == SEQ_START_TOKEN) {
2313 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2314 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2315 			   "inode");
2316 		goto out;
2317 	}
2318 	st = seq->private;
2319 
2320 	if (sk->sk_state == TCP_TIME_WAIT)
2321 		get_timewait4_sock(v, seq, st->num);
2322 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2323 		get_openreq4(v, seq, st->num);
2324 	else
2325 		get_tcp4_sock(v, seq, st->num);
2326 out:
2327 	seq_pad(seq, '\n');
2328 	return 0;
2329 }
2330 
2331 static const struct file_operations tcp_afinfo_seq_fops = {
2332 	.owner   = THIS_MODULE,
2333 	.open    = tcp_seq_open,
2334 	.read    = seq_read,
2335 	.llseek  = seq_lseek,
2336 	.release = seq_release_net
2337 };
2338 
2339 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2340 	.name		= "tcp",
2341 	.family		= AF_INET,
2342 	.seq_fops	= &tcp_afinfo_seq_fops,
2343 	.seq_ops	= {
2344 		.show		= tcp4_seq_show,
2345 	},
2346 };
2347 
2348 static int __net_init tcp4_proc_init_net(struct net *net)
2349 {
2350 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2351 }
2352 
2353 static void __net_exit tcp4_proc_exit_net(struct net *net)
2354 {
2355 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2356 }
2357 
2358 static struct pernet_operations tcp4_net_ops = {
2359 	.init = tcp4_proc_init_net,
2360 	.exit = tcp4_proc_exit_net,
2361 };
2362 
2363 int __init tcp4_proc_init(void)
2364 {
2365 	return register_pernet_subsys(&tcp4_net_ops);
2366 }
2367 
2368 void tcp4_proc_exit(void)
2369 {
2370 	unregister_pernet_subsys(&tcp4_net_ops);
2371 }
2372 #endif /* CONFIG_PROC_FS */
2373 
2374 struct proto tcp_prot = {
2375 	.name			= "TCP",
2376 	.owner			= THIS_MODULE,
2377 	.close			= tcp_close,
2378 	.connect		= tcp_v4_connect,
2379 	.disconnect		= tcp_disconnect,
2380 	.accept			= inet_csk_accept,
2381 	.ioctl			= tcp_ioctl,
2382 	.init			= tcp_v4_init_sock,
2383 	.destroy		= tcp_v4_destroy_sock,
2384 	.shutdown		= tcp_shutdown,
2385 	.setsockopt		= tcp_setsockopt,
2386 	.getsockopt		= tcp_getsockopt,
2387 	.keepalive		= tcp_set_keepalive,
2388 	.recvmsg		= tcp_recvmsg,
2389 	.sendmsg		= tcp_sendmsg,
2390 	.sendpage		= tcp_sendpage,
2391 	.backlog_rcv		= tcp_v4_do_rcv,
2392 	.release_cb		= tcp_release_cb,
2393 	.hash			= inet_hash,
2394 	.unhash			= inet_unhash,
2395 	.get_port		= inet_csk_get_port,
2396 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2397 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2398 	.stream_memory_free	= tcp_stream_memory_free,
2399 	.sockets_allocated	= &tcp_sockets_allocated,
2400 	.orphan_count		= &tcp_orphan_count,
2401 	.memory_allocated	= &tcp_memory_allocated,
2402 	.memory_pressure	= &tcp_memory_pressure,
2403 	.sysctl_mem		= sysctl_tcp_mem,
2404 	.sysctl_wmem		= sysctl_tcp_wmem,
2405 	.sysctl_rmem		= sysctl_tcp_rmem,
2406 	.max_header		= MAX_TCP_HEADER,
2407 	.obj_size		= sizeof(struct tcp_sock),
2408 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2409 	.twsk_prot		= &tcp_timewait_sock_ops,
2410 	.rsk_prot		= &tcp_request_sock_ops,
2411 	.h.hashinfo		= &tcp_hashinfo,
2412 	.no_autobind		= true,
2413 #ifdef CONFIG_COMPAT
2414 	.compat_setsockopt	= compat_tcp_setsockopt,
2415 	.compat_getsockopt	= compat_tcp_getsockopt,
2416 #endif
2417 	.diag_destroy		= tcp_abort,
2418 };
2419 EXPORT_SYMBOL(tcp_prot);
2420 
2421 static void __net_exit tcp_sk_exit(struct net *net)
2422 {
2423 	int cpu;
2424 
2425 	for_each_possible_cpu(cpu)
2426 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2427 	free_percpu(net->ipv4.tcp_sk);
2428 }
2429 
2430 static int __net_init tcp_sk_init(struct net *net)
2431 {
2432 	int res, cpu, cnt;
2433 
2434 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2435 	if (!net->ipv4.tcp_sk)
2436 		return -ENOMEM;
2437 
2438 	for_each_possible_cpu(cpu) {
2439 		struct sock *sk;
2440 
2441 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2442 					   IPPROTO_TCP, net);
2443 		if (res)
2444 			goto fail;
2445 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2446 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2447 	}
2448 
2449 	net->ipv4.sysctl_tcp_ecn = 2;
2450 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2451 
2452 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2453 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2454 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2455 
2456 	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2457 	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2458 	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2459 
2460 	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2461 	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2462 	net->ipv4.sysctl_tcp_syncookies = 1;
2463 	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2464 	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2465 	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2466 	net->ipv4.sysctl_tcp_orphan_retries = 0;
2467 	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2468 	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2469 	net->ipv4.sysctl_tcp_tw_reuse = 0;
2470 
2471 	cnt = tcp_hashinfo.ehash_mask + 1;
2472 	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2473 	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2474 
2475 	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2476 	net->ipv4.sysctl_tcp_sack = 1;
2477 	net->ipv4.sysctl_tcp_window_scaling = 1;
2478 	net->ipv4.sysctl_tcp_timestamps = 1;
2479 
2480 	return 0;
2481 fail:
2482 	tcp_sk_exit(net);
2483 
2484 	return res;
2485 }
2486 
2487 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2488 {
2489 	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2490 }
2491 
2492 static struct pernet_operations __net_initdata tcp_sk_ops = {
2493        .init	   = tcp_sk_init,
2494        .exit	   = tcp_sk_exit,
2495        .exit_batch = tcp_sk_exit_batch,
2496 };
2497 
2498 void __init tcp_v4_init(void)
2499 {
2500 	if (register_pernet_subsys(&tcp_sk_ops))
2501 		panic("Failed to create the TCP control socket.\n");
2502 }
2503