xref: /linux/net/ipv4/tcp_ipv4.c (revision 7056741fd9fc14a65608549a4657cf5178f05f63)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97 
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100 
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 					  ip_hdr(skb)->saddr,
105 					  tcp_hdr(skb)->dest,
106 					  tcp_hdr(skb)->source);
107 }
108 
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 	struct tcp_sock *tp = tcp_sk(sk);
113 
114 	/* With PAWS, it is safe from the viewpoint
115 	   of data integrity. Even without PAWS it is safe provided sequence
116 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 
118 	   Actually, the idea is close to VJ's one, only timestamp cache is
119 	   held not per host, but per port pair and TW bucket is used as state
120 	   holder.
121 
122 	   If TW bucket has been already destroyed we fall back to VJ's scheme
123 	   and use initial timestamp retrieved from peer table.
124 	 */
125 	if (tcptw->tw_ts_recent_stamp &&
126 	    (twp == NULL || (sysctl_tcp_tw_reuse &&
127 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 		if (tp->write_seq == 0)
130 			tp->write_seq = 1;
131 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
132 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 		sock_hold(sktw);
134 		return 1;
135 	}
136 
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 
141 static int tcp_repair_connect(struct sock *sk)
142 {
143 	tcp_connect_init(sk);
144 	tcp_finish_connect(sk, NULL);
145 
146 	return 0;
147 }
148 
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151 {
152 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 	struct inet_sock *inet = inet_sk(sk);
154 	struct tcp_sock *tp = tcp_sk(sk);
155 	__be16 orig_sport, orig_dport;
156 	__be32 daddr, nexthop;
157 	struct flowi4 *fl4;
158 	struct rtable *rt;
159 	int err;
160 	struct ip_options_rcu *inet_opt;
161 
162 	if (addr_len < sizeof(struct sockaddr_in))
163 		return -EINVAL;
164 
165 	if (usin->sin_family != AF_INET)
166 		return -EAFNOSUPPORT;
167 
168 	nexthop = daddr = usin->sin_addr.s_addr;
169 	inet_opt = rcu_dereference_protected(inet->inet_opt,
170 					     sock_owned_by_user(sk));
171 	if (inet_opt && inet_opt->opt.srr) {
172 		if (!daddr)
173 			return -EINVAL;
174 		nexthop = inet_opt->opt.faddr;
175 	}
176 
177 	orig_sport = inet->inet_sport;
178 	orig_dport = usin->sin_port;
179 	fl4 = &inet->cork.fl.u.ip4;
180 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 			      IPPROTO_TCP,
183 			      orig_sport, orig_dport, sk, true);
184 	if (IS_ERR(rt)) {
185 		err = PTR_ERR(rt);
186 		if (err == -ENETUNREACH)
187 			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
188 		return err;
189 	}
190 
191 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 		ip_rt_put(rt);
193 		return -ENETUNREACH;
194 	}
195 
196 	if (!inet_opt || !inet_opt->opt.srr)
197 		daddr = fl4->daddr;
198 
199 	if (!inet->inet_saddr)
200 		inet->inet_saddr = fl4->saddr;
201 	inet->inet_rcv_saddr = inet->inet_saddr;
202 
203 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 		/* Reset inherited state */
205 		tp->rx_opt.ts_recent	   = 0;
206 		tp->rx_opt.ts_recent_stamp = 0;
207 		if (likely(!tp->repair))
208 			tp->write_seq	   = 0;
209 	}
210 
211 	if (tcp_death_row.sysctl_tw_recycle &&
212 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 		tcp_fetch_timewait_stamp(sk, &rt->dst);
214 
215 	inet->inet_dport = usin->sin_port;
216 	inet->inet_daddr = daddr;
217 
218 	inet_csk(sk)->icsk_ext_hdr_len = 0;
219 	if (inet_opt)
220 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
221 
222 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
223 
224 	/* Socket identity is still unknown (sport may be zero).
225 	 * However we set state to SYN-SENT and not releasing socket
226 	 * lock select source port, enter ourselves into the hash tables and
227 	 * complete initialization after this.
228 	 */
229 	tcp_set_state(sk, TCP_SYN_SENT);
230 	err = inet_hash_connect(&tcp_death_row, sk);
231 	if (err)
232 		goto failure;
233 
234 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 			       inet->inet_sport, inet->inet_dport, sk);
236 	if (IS_ERR(rt)) {
237 		err = PTR_ERR(rt);
238 		rt = NULL;
239 		goto failure;
240 	}
241 	/* OK, now commit destination to socket.  */
242 	sk->sk_gso_type = SKB_GSO_TCPV4;
243 	sk_setup_caps(sk, &rt->dst);
244 
245 	if (!tp->write_seq && likely(!tp->repair))
246 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 							   inet->inet_daddr,
248 							   inet->inet_sport,
249 							   usin->sin_port);
250 
251 	inet->inet_id = tp->write_seq ^ jiffies;
252 
253 	if (likely(!tp->repair))
254 		err = tcp_connect(sk);
255 	else
256 		err = tcp_repair_connect(sk);
257 
258 	rt = NULL;
259 	if (err)
260 		goto failure;
261 
262 	return 0;
263 
264 failure:
265 	/*
266 	 * This unhashes the socket and releases the local port,
267 	 * if necessary.
268 	 */
269 	tcp_set_state(sk, TCP_CLOSE);
270 	ip_rt_put(rt);
271 	sk->sk_route_caps = 0;
272 	inet->inet_dport = 0;
273 	return err;
274 }
275 EXPORT_SYMBOL(tcp_v4_connect);
276 
277 /*
278  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279  * It can be called through tcp_release_cb() if socket was owned by user
280  * at the time tcp_v4_err() was called to handle ICMP message.
281  */
282 static void tcp_v4_mtu_reduced(struct sock *sk)
283 {
284 	struct dst_entry *dst;
285 	struct inet_sock *inet = inet_sk(sk);
286 	u32 mtu = tcp_sk(sk)->mtu_info;
287 
288 	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 	 * send out by Linux are always <576bytes so they should go through
290 	 * unfragmented).
291 	 */
292 	if (sk->sk_state == TCP_LISTEN)
293 		return;
294 
295 	dst = inet_csk_update_pmtu(sk, mtu);
296 	if (!dst)
297 		return;
298 
299 	/* Something is about to be wrong... Remember soft error
300 	 * for the case, if this connection will not able to recover.
301 	 */
302 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 		sk->sk_err_soft = EMSGSIZE;
304 
305 	mtu = dst_mtu(dst);
306 
307 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309 		tcp_sync_mss(sk, mtu);
310 
311 		/* Resend the TCP packet because it's
312 		 * clear that the old packet has been
313 		 * dropped. This is the new "fast" path mtu
314 		 * discovery.
315 		 */
316 		tcp_simple_retransmit(sk);
317 	} /* else let the usual retransmit timer handle it */
318 }
319 
320 static void do_redirect(struct sk_buff *skb, struct sock *sk)
321 {
322 	struct dst_entry *dst = __sk_dst_check(sk, 0);
323 
324 	if (dst)
325 		dst->ops->redirect(dst, sk, skb);
326 }
327 
328 /*
329  * This routine is called by the ICMP module when it gets some
330  * sort of error condition.  If err < 0 then the socket should
331  * be closed and the error returned to the user.  If err > 0
332  * it's just the icmp type << 8 | icmp code.  After adjustment
333  * header points to the first 8 bytes of the tcp header.  We need
334  * to find the appropriate port.
335  *
336  * The locking strategy used here is very "optimistic". When
337  * someone else accesses the socket the ICMP is just dropped
338  * and for some paths there is no check at all.
339  * A more general error queue to queue errors for later handling
340  * is probably better.
341  *
342  */
343 
344 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
345 {
346 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
347 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
348 	struct inet_connection_sock *icsk;
349 	struct tcp_sock *tp;
350 	struct inet_sock *inet;
351 	const int type = icmp_hdr(icmp_skb)->type;
352 	const int code = icmp_hdr(icmp_skb)->code;
353 	struct sock *sk;
354 	struct sk_buff *skb;
355 	struct request_sock *req;
356 	__u32 seq;
357 	__u32 remaining;
358 	int err;
359 	struct net *net = dev_net(icmp_skb->dev);
360 
361 	if (icmp_skb->len < (iph->ihl << 2) + 8) {
362 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
363 		return;
364 	}
365 
366 	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
367 			iph->saddr, th->source, inet_iif(icmp_skb));
368 	if (!sk) {
369 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
370 		return;
371 	}
372 	if (sk->sk_state == TCP_TIME_WAIT) {
373 		inet_twsk_put(inet_twsk(sk));
374 		return;
375 	}
376 
377 	bh_lock_sock(sk);
378 	/* If too many ICMPs get dropped on busy
379 	 * servers this needs to be solved differently.
380 	 * We do take care of PMTU discovery (RFC1191) special case :
381 	 * we can receive locally generated ICMP messages while socket is held.
382 	 */
383 	if (sock_owned_by_user(sk) &&
384 	    type != ICMP_DEST_UNREACH &&
385 	    code != ICMP_FRAG_NEEDED)
386 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
387 
388 	if (sk->sk_state == TCP_CLOSE)
389 		goto out;
390 
391 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
392 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
393 		goto out;
394 	}
395 
396 	icsk = inet_csk(sk);
397 	tp = tcp_sk(sk);
398 	req = tp->fastopen_rsk;
399 	seq = ntohl(th->seq);
400 	if (sk->sk_state != TCP_LISTEN &&
401 	    !between(seq, tp->snd_una, tp->snd_nxt) &&
402 	    (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
403 		/* For a Fast Open socket, allow seq to be snt_isn. */
404 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
405 		goto out;
406 	}
407 
408 	switch (type) {
409 	case ICMP_REDIRECT:
410 		do_redirect(icmp_skb, sk);
411 		goto out;
412 	case ICMP_SOURCE_QUENCH:
413 		/* Just silently ignore these. */
414 		goto out;
415 	case ICMP_PARAMETERPROB:
416 		err = EPROTO;
417 		break;
418 	case ICMP_DEST_UNREACH:
419 		if (code > NR_ICMP_UNREACH)
420 			goto out;
421 
422 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
423 			tp->mtu_info = info;
424 			if (!sock_owned_by_user(sk)) {
425 				tcp_v4_mtu_reduced(sk);
426 			} else {
427 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
428 					sock_hold(sk);
429 			}
430 			goto out;
431 		}
432 
433 		err = icmp_err_convert[code].errno;
434 		/* check if icmp_skb allows revert of backoff
435 		 * (see draft-zimmermann-tcp-lcd) */
436 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
437 			break;
438 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
439 		    !icsk->icsk_backoff)
440 			break;
441 
442 		/* XXX (TFO) - revisit the following logic for TFO */
443 
444 		if (sock_owned_by_user(sk))
445 			break;
446 
447 		icsk->icsk_backoff--;
448 		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
449 			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
450 		tcp_bound_rto(sk);
451 
452 		skb = tcp_write_queue_head(sk);
453 		BUG_ON(!skb);
454 
455 		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
456 				tcp_time_stamp - TCP_SKB_CB(skb)->when);
457 
458 		if (remaining) {
459 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
460 						  remaining, TCP_RTO_MAX);
461 		} else {
462 			/* RTO revert clocked out retransmission.
463 			 * Will retransmit now */
464 			tcp_retransmit_timer(sk);
465 		}
466 
467 		break;
468 	case ICMP_TIME_EXCEEDED:
469 		err = EHOSTUNREACH;
470 		break;
471 	default:
472 		goto out;
473 	}
474 
475 	/* XXX (TFO) - if it's a TFO socket and has been accepted, rather
476 	 * than following the TCP_SYN_RECV case and closing the socket,
477 	 * we ignore the ICMP error and keep trying like a fully established
478 	 * socket. Is this the right thing to do?
479 	 */
480 	if (req && req->sk == NULL)
481 		goto out;
482 
483 	switch (sk->sk_state) {
484 		struct request_sock *req, **prev;
485 	case TCP_LISTEN:
486 		if (sock_owned_by_user(sk))
487 			goto out;
488 
489 		req = inet_csk_search_req(sk, &prev, th->dest,
490 					  iph->daddr, iph->saddr);
491 		if (!req)
492 			goto out;
493 
494 		/* ICMPs are not backlogged, hence we cannot get
495 		   an established socket here.
496 		 */
497 		WARN_ON(req->sk);
498 
499 		if (seq != tcp_rsk(req)->snt_isn) {
500 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
501 			goto out;
502 		}
503 
504 		/*
505 		 * Still in SYN_RECV, just remove it silently.
506 		 * There is no good way to pass the error to the newly
507 		 * created socket, and POSIX does not want network
508 		 * errors returned from accept().
509 		 */
510 		inet_csk_reqsk_queue_drop(sk, req, prev);
511 		goto out;
512 
513 	case TCP_SYN_SENT:
514 	case TCP_SYN_RECV:  /* Cannot happen.
515 			       It can f.e. if SYNs crossed,
516 			       or Fast Open.
517 			     */
518 		if (!sock_owned_by_user(sk)) {
519 			sk->sk_err = err;
520 
521 			sk->sk_error_report(sk);
522 
523 			tcp_done(sk);
524 		} else {
525 			sk->sk_err_soft = err;
526 		}
527 		goto out;
528 	}
529 
530 	/* If we've already connected we will keep trying
531 	 * until we time out, or the user gives up.
532 	 *
533 	 * rfc1122 4.2.3.9 allows to consider as hard errors
534 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
535 	 * but it is obsoleted by pmtu discovery).
536 	 *
537 	 * Note, that in modern internet, where routing is unreliable
538 	 * and in each dark corner broken firewalls sit, sending random
539 	 * errors ordered by their masters even this two messages finally lose
540 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
541 	 *
542 	 * Now we are in compliance with RFCs.
543 	 *							--ANK (980905)
544 	 */
545 
546 	inet = inet_sk(sk);
547 	if (!sock_owned_by_user(sk) && inet->recverr) {
548 		sk->sk_err = err;
549 		sk->sk_error_report(sk);
550 	} else	{ /* Only an error on timeout */
551 		sk->sk_err_soft = err;
552 	}
553 
554 out:
555 	bh_unlock_sock(sk);
556 	sock_put(sk);
557 }
558 
559 static void __tcp_v4_send_check(struct sk_buff *skb,
560 				__be32 saddr, __be32 daddr)
561 {
562 	struct tcphdr *th = tcp_hdr(skb);
563 
564 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
565 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
566 		skb->csum_start = skb_transport_header(skb) - skb->head;
567 		skb->csum_offset = offsetof(struct tcphdr, check);
568 	} else {
569 		th->check = tcp_v4_check(skb->len, saddr, daddr,
570 					 csum_partial(th,
571 						      th->doff << 2,
572 						      skb->csum));
573 	}
574 }
575 
576 /* This routine computes an IPv4 TCP checksum. */
577 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
578 {
579 	const struct inet_sock *inet = inet_sk(sk);
580 
581 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
582 }
583 EXPORT_SYMBOL(tcp_v4_send_check);
584 
585 int tcp_v4_gso_send_check(struct sk_buff *skb)
586 {
587 	const struct iphdr *iph;
588 	struct tcphdr *th;
589 
590 	if (!pskb_may_pull(skb, sizeof(*th)))
591 		return -EINVAL;
592 
593 	iph = ip_hdr(skb);
594 	th = tcp_hdr(skb);
595 
596 	th->check = 0;
597 	skb->ip_summed = CHECKSUM_PARTIAL;
598 	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
599 	return 0;
600 }
601 
602 /*
603  *	This routine will send an RST to the other tcp.
604  *
605  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
606  *		      for reset.
607  *	Answer: if a packet caused RST, it is not for a socket
608  *		existing in our system, if it is matched to a socket,
609  *		it is just duplicate segment or bug in other side's TCP.
610  *		So that we build reply only basing on parameters
611  *		arrived with segment.
612  *	Exception: precedence violation. We do not implement it in any case.
613  */
614 
615 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
616 {
617 	const struct tcphdr *th = tcp_hdr(skb);
618 	struct {
619 		struct tcphdr th;
620 #ifdef CONFIG_TCP_MD5SIG
621 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
622 #endif
623 	} rep;
624 	struct ip_reply_arg arg;
625 #ifdef CONFIG_TCP_MD5SIG
626 	struct tcp_md5sig_key *key;
627 	const __u8 *hash_location = NULL;
628 	unsigned char newhash[16];
629 	int genhash;
630 	struct sock *sk1 = NULL;
631 #endif
632 	struct net *net;
633 
634 	/* Never send a reset in response to a reset. */
635 	if (th->rst)
636 		return;
637 
638 	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
639 		return;
640 
641 	/* Swap the send and the receive. */
642 	memset(&rep, 0, sizeof(rep));
643 	rep.th.dest   = th->source;
644 	rep.th.source = th->dest;
645 	rep.th.doff   = sizeof(struct tcphdr) / 4;
646 	rep.th.rst    = 1;
647 
648 	if (th->ack) {
649 		rep.th.seq = th->ack_seq;
650 	} else {
651 		rep.th.ack = 1;
652 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
653 				       skb->len - (th->doff << 2));
654 	}
655 
656 	memset(&arg, 0, sizeof(arg));
657 	arg.iov[0].iov_base = (unsigned char *)&rep;
658 	arg.iov[0].iov_len  = sizeof(rep.th);
659 
660 #ifdef CONFIG_TCP_MD5SIG
661 	hash_location = tcp_parse_md5sig_option(th);
662 	if (!sk && hash_location) {
663 		/*
664 		 * active side is lost. Try to find listening socket through
665 		 * source port, and then find md5 key through listening socket.
666 		 * we are not loose security here:
667 		 * Incoming packet is checked with md5 hash with finding key,
668 		 * no RST generated if md5 hash doesn't match.
669 		 */
670 		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
671 					     &tcp_hashinfo, ip_hdr(skb)->daddr,
672 					     ntohs(th->source), inet_iif(skb));
673 		/* don't send rst if it can't find key */
674 		if (!sk1)
675 			return;
676 		rcu_read_lock();
677 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
678 					&ip_hdr(skb)->saddr, AF_INET);
679 		if (!key)
680 			goto release_sk1;
681 
682 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
683 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
684 			goto release_sk1;
685 	} else {
686 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
687 					     &ip_hdr(skb)->saddr,
688 					     AF_INET) : NULL;
689 	}
690 
691 	if (key) {
692 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
693 				   (TCPOPT_NOP << 16) |
694 				   (TCPOPT_MD5SIG << 8) |
695 				   TCPOLEN_MD5SIG);
696 		/* Update length and the length the header thinks exists */
697 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
698 		rep.th.doff = arg.iov[0].iov_len / 4;
699 
700 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
701 				     key, ip_hdr(skb)->saddr,
702 				     ip_hdr(skb)->daddr, &rep.th);
703 	}
704 #endif
705 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
706 				      ip_hdr(skb)->saddr, /* XXX */
707 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
708 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
709 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
710 	/* When socket is gone, all binding information is lost.
711 	 * routing might fail in this case. No choice here, if we choose to force
712 	 * input interface, we will misroute in case of asymmetric route.
713 	 */
714 	if (sk)
715 		arg.bound_dev_if = sk->sk_bound_dev_if;
716 
717 	net = dev_net(skb_dst(skb)->dev);
718 	arg.tos = ip_hdr(skb)->tos;
719 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
720 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
721 
722 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
723 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
724 
725 #ifdef CONFIG_TCP_MD5SIG
726 release_sk1:
727 	if (sk1) {
728 		rcu_read_unlock();
729 		sock_put(sk1);
730 	}
731 #endif
732 }
733 
734 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
735    outside socket context is ugly, certainly. What can I do?
736  */
737 
738 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
739 			    u32 win, u32 ts, int oif,
740 			    struct tcp_md5sig_key *key,
741 			    int reply_flags, u8 tos)
742 {
743 	const struct tcphdr *th = tcp_hdr(skb);
744 	struct {
745 		struct tcphdr th;
746 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
747 #ifdef CONFIG_TCP_MD5SIG
748 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
749 #endif
750 			];
751 	} rep;
752 	struct ip_reply_arg arg;
753 	struct net *net = dev_net(skb_dst(skb)->dev);
754 
755 	memset(&rep.th, 0, sizeof(struct tcphdr));
756 	memset(&arg, 0, sizeof(arg));
757 
758 	arg.iov[0].iov_base = (unsigned char *)&rep;
759 	arg.iov[0].iov_len  = sizeof(rep.th);
760 	if (ts) {
761 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
762 				   (TCPOPT_TIMESTAMP << 8) |
763 				   TCPOLEN_TIMESTAMP);
764 		rep.opt[1] = htonl(tcp_time_stamp);
765 		rep.opt[2] = htonl(ts);
766 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
767 	}
768 
769 	/* Swap the send and the receive. */
770 	rep.th.dest    = th->source;
771 	rep.th.source  = th->dest;
772 	rep.th.doff    = arg.iov[0].iov_len / 4;
773 	rep.th.seq     = htonl(seq);
774 	rep.th.ack_seq = htonl(ack);
775 	rep.th.ack     = 1;
776 	rep.th.window  = htons(win);
777 
778 #ifdef CONFIG_TCP_MD5SIG
779 	if (key) {
780 		int offset = (ts) ? 3 : 0;
781 
782 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
783 					  (TCPOPT_NOP << 16) |
784 					  (TCPOPT_MD5SIG << 8) |
785 					  TCPOLEN_MD5SIG);
786 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
787 		rep.th.doff = arg.iov[0].iov_len/4;
788 
789 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
790 				    key, ip_hdr(skb)->saddr,
791 				    ip_hdr(skb)->daddr, &rep.th);
792 	}
793 #endif
794 	arg.flags = reply_flags;
795 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
796 				      ip_hdr(skb)->saddr, /* XXX */
797 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
798 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
799 	if (oif)
800 		arg.bound_dev_if = oif;
801 	arg.tos = tos;
802 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
803 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
804 
805 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
806 }
807 
808 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
809 {
810 	struct inet_timewait_sock *tw = inet_twsk(sk);
811 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
812 
813 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
814 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
815 			tcptw->tw_ts_recent,
816 			tw->tw_bound_dev_if,
817 			tcp_twsk_md5_key(tcptw),
818 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
819 			tw->tw_tos
820 			);
821 
822 	inet_twsk_put(tw);
823 }
824 
825 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
826 				  struct request_sock *req)
827 {
828 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
829 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
830 	 */
831 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
832 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
833 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
834 			req->ts_recent,
835 			0,
836 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
837 					  AF_INET),
838 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
839 			ip_hdr(skb)->tos);
840 }
841 
842 /*
843  *	Send a SYN-ACK after having received a SYN.
844  *	This still operates on a request_sock only, not on a big
845  *	socket.
846  */
847 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
848 			      struct request_sock *req,
849 			      struct request_values *rvp,
850 			      u16 queue_mapping,
851 			      bool nocache)
852 {
853 	const struct inet_request_sock *ireq = inet_rsk(req);
854 	struct flowi4 fl4;
855 	int err = -1;
856 	struct sk_buff * skb;
857 
858 	/* First, grab a route. */
859 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
860 		return -1;
861 
862 	skb = tcp_make_synack(sk, dst, req, rvp, NULL);
863 
864 	if (skb) {
865 		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
866 
867 		skb_set_queue_mapping(skb, queue_mapping);
868 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
869 					    ireq->rmt_addr,
870 					    ireq->opt);
871 		err = net_xmit_eval(err);
872 		if (!tcp_rsk(req)->snt_synack && !err)
873 			tcp_rsk(req)->snt_synack = tcp_time_stamp;
874 	}
875 
876 	return err;
877 }
878 
879 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
880 			      struct request_values *rvp)
881 {
882 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
883 	return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
884 }
885 
886 /*
887  *	IPv4 request_sock destructor.
888  */
889 static void tcp_v4_reqsk_destructor(struct request_sock *req)
890 {
891 	kfree(inet_rsk(req)->opt);
892 }
893 
894 /*
895  * Return true if a syncookie should be sent
896  */
897 bool tcp_syn_flood_action(struct sock *sk,
898 			 const struct sk_buff *skb,
899 			 const char *proto)
900 {
901 	const char *msg = "Dropping request";
902 	bool want_cookie = false;
903 	struct listen_sock *lopt;
904 
905 
906 
907 #ifdef CONFIG_SYN_COOKIES
908 	if (sysctl_tcp_syncookies) {
909 		msg = "Sending cookies";
910 		want_cookie = true;
911 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
912 	} else
913 #endif
914 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
915 
916 	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
917 	if (!lopt->synflood_warned) {
918 		lopt->synflood_warned = 1;
919 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
920 			proto, ntohs(tcp_hdr(skb)->dest), msg);
921 	}
922 	return want_cookie;
923 }
924 EXPORT_SYMBOL(tcp_syn_flood_action);
925 
926 /*
927  * Save and compile IPv4 options into the request_sock if needed.
928  */
929 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
930 {
931 	const struct ip_options *opt = &(IPCB(skb)->opt);
932 	struct ip_options_rcu *dopt = NULL;
933 
934 	if (opt && opt->optlen) {
935 		int opt_size = sizeof(*dopt) + opt->optlen;
936 
937 		dopt = kmalloc(opt_size, GFP_ATOMIC);
938 		if (dopt) {
939 			if (ip_options_echo(&dopt->opt, skb)) {
940 				kfree(dopt);
941 				dopt = NULL;
942 			}
943 		}
944 	}
945 	return dopt;
946 }
947 
948 #ifdef CONFIG_TCP_MD5SIG
949 /*
950  * RFC2385 MD5 checksumming requires a mapping of
951  * IP address->MD5 Key.
952  * We need to maintain these in the sk structure.
953  */
954 
955 /* Find the Key structure for an address.  */
956 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
957 					 const union tcp_md5_addr *addr,
958 					 int family)
959 {
960 	struct tcp_sock *tp = tcp_sk(sk);
961 	struct tcp_md5sig_key *key;
962 	struct hlist_node *pos;
963 	unsigned int size = sizeof(struct in_addr);
964 	struct tcp_md5sig_info *md5sig;
965 
966 	/* caller either holds rcu_read_lock() or socket lock */
967 	md5sig = rcu_dereference_check(tp->md5sig_info,
968 				       sock_owned_by_user(sk) ||
969 				       lockdep_is_held(&sk->sk_lock.slock));
970 	if (!md5sig)
971 		return NULL;
972 #if IS_ENABLED(CONFIG_IPV6)
973 	if (family == AF_INET6)
974 		size = sizeof(struct in6_addr);
975 #endif
976 	hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
977 		if (key->family != family)
978 			continue;
979 		if (!memcmp(&key->addr, addr, size))
980 			return key;
981 	}
982 	return NULL;
983 }
984 EXPORT_SYMBOL(tcp_md5_do_lookup);
985 
986 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
987 					 struct sock *addr_sk)
988 {
989 	union tcp_md5_addr *addr;
990 
991 	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
992 	return tcp_md5_do_lookup(sk, addr, AF_INET);
993 }
994 EXPORT_SYMBOL(tcp_v4_md5_lookup);
995 
996 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
997 						      struct request_sock *req)
998 {
999 	union tcp_md5_addr *addr;
1000 
1001 	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
1002 	return tcp_md5_do_lookup(sk, addr, AF_INET);
1003 }
1004 
1005 /* This can be called on a newly created socket, from other files */
1006 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1007 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1008 {
1009 	/* Add Key to the list */
1010 	struct tcp_md5sig_key *key;
1011 	struct tcp_sock *tp = tcp_sk(sk);
1012 	struct tcp_md5sig_info *md5sig;
1013 
1014 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1015 	if (key) {
1016 		/* Pre-existing entry - just update that one. */
1017 		memcpy(key->key, newkey, newkeylen);
1018 		key->keylen = newkeylen;
1019 		return 0;
1020 	}
1021 
1022 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1023 					   sock_owned_by_user(sk));
1024 	if (!md5sig) {
1025 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1026 		if (!md5sig)
1027 			return -ENOMEM;
1028 
1029 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1030 		INIT_HLIST_HEAD(&md5sig->head);
1031 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1032 	}
1033 
1034 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1035 	if (!key)
1036 		return -ENOMEM;
1037 	if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1038 		sock_kfree_s(sk, key, sizeof(*key));
1039 		return -ENOMEM;
1040 	}
1041 
1042 	memcpy(key->key, newkey, newkeylen);
1043 	key->keylen = newkeylen;
1044 	key->family = family;
1045 	memcpy(&key->addr, addr,
1046 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1047 				      sizeof(struct in_addr));
1048 	hlist_add_head_rcu(&key->node, &md5sig->head);
1049 	return 0;
1050 }
1051 EXPORT_SYMBOL(tcp_md5_do_add);
1052 
1053 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1054 {
1055 	struct tcp_sock *tp = tcp_sk(sk);
1056 	struct tcp_md5sig_key *key;
1057 	struct tcp_md5sig_info *md5sig;
1058 
1059 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1060 	if (!key)
1061 		return -ENOENT;
1062 	hlist_del_rcu(&key->node);
1063 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064 	kfree_rcu(key, rcu);
1065 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1066 					   sock_owned_by_user(sk));
1067 	if (hlist_empty(&md5sig->head))
1068 		tcp_free_md5sig_pool();
1069 	return 0;
1070 }
1071 EXPORT_SYMBOL(tcp_md5_do_del);
1072 
1073 void tcp_clear_md5_list(struct sock *sk)
1074 {
1075 	struct tcp_sock *tp = tcp_sk(sk);
1076 	struct tcp_md5sig_key *key;
1077 	struct hlist_node *pos, *n;
1078 	struct tcp_md5sig_info *md5sig;
1079 
1080 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1081 
1082 	if (!hlist_empty(&md5sig->head))
1083 		tcp_free_md5sig_pool();
1084 	hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1085 		hlist_del_rcu(&key->node);
1086 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1087 		kfree_rcu(key, rcu);
1088 	}
1089 }
1090 
1091 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1092 				 int optlen)
1093 {
1094 	struct tcp_md5sig cmd;
1095 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1096 
1097 	if (optlen < sizeof(cmd))
1098 		return -EINVAL;
1099 
1100 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1101 		return -EFAULT;
1102 
1103 	if (sin->sin_family != AF_INET)
1104 		return -EINVAL;
1105 
1106 	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1107 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1108 				      AF_INET);
1109 
1110 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1111 		return -EINVAL;
1112 
1113 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1114 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1115 			      GFP_KERNEL);
1116 }
1117 
1118 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1119 					__be32 daddr, __be32 saddr, int nbytes)
1120 {
1121 	struct tcp4_pseudohdr *bp;
1122 	struct scatterlist sg;
1123 
1124 	bp = &hp->md5_blk.ip4;
1125 
1126 	/*
1127 	 * 1. the TCP pseudo-header (in the order: source IP address,
1128 	 * destination IP address, zero-padded protocol number, and
1129 	 * segment length)
1130 	 */
1131 	bp->saddr = saddr;
1132 	bp->daddr = daddr;
1133 	bp->pad = 0;
1134 	bp->protocol = IPPROTO_TCP;
1135 	bp->len = cpu_to_be16(nbytes);
1136 
1137 	sg_init_one(&sg, bp, sizeof(*bp));
1138 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1139 }
1140 
1141 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1142 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1143 {
1144 	struct tcp_md5sig_pool *hp;
1145 	struct hash_desc *desc;
1146 
1147 	hp = tcp_get_md5sig_pool();
1148 	if (!hp)
1149 		goto clear_hash_noput;
1150 	desc = &hp->md5_desc;
1151 
1152 	if (crypto_hash_init(desc))
1153 		goto clear_hash;
1154 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1155 		goto clear_hash;
1156 	if (tcp_md5_hash_header(hp, th))
1157 		goto clear_hash;
1158 	if (tcp_md5_hash_key(hp, key))
1159 		goto clear_hash;
1160 	if (crypto_hash_final(desc, md5_hash))
1161 		goto clear_hash;
1162 
1163 	tcp_put_md5sig_pool();
1164 	return 0;
1165 
1166 clear_hash:
1167 	tcp_put_md5sig_pool();
1168 clear_hash_noput:
1169 	memset(md5_hash, 0, 16);
1170 	return 1;
1171 }
1172 
1173 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1174 			const struct sock *sk, const struct request_sock *req,
1175 			const struct sk_buff *skb)
1176 {
1177 	struct tcp_md5sig_pool *hp;
1178 	struct hash_desc *desc;
1179 	const struct tcphdr *th = tcp_hdr(skb);
1180 	__be32 saddr, daddr;
1181 
1182 	if (sk) {
1183 		saddr = inet_sk(sk)->inet_saddr;
1184 		daddr = inet_sk(sk)->inet_daddr;
1185 	} else if (req) {
1186 		saddr = inet_rsk(req)->loc_addr;
1187 		daddr = inet_rsk(req)->rmt_addr;
1188 	} else {
1189 		const struct iphdr *iph = ip_hdr(skb);
1190 		saddr = iph->saddr;
1191 		daddr = iph->daddr;
1192 	}
1193 
1194 	hp = tcp_get_md5sig_pool();
1195 	if (!hp)
1196 		goto clear_hash_noput;
1197 	desc = &hp->md5_desc;
1198 
1199 	if (crypto_hash_init(desc))
1200 		goto clear_hash;
1201 
1202 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1203 		goto clear_hash;
1204 	if (tcp_md5_hash_header(hp, th))
1205 		goto clear_hash;
1206 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1207 		goto clear_hash;
1208 	if (tcp_md5_hash_key(hp, key))
1209 		goto clear_hash;
1210 	if (crypto_hash_final(desc, md5_hash))
1211 		goto clear_hash;
1212 
1213 	tcp_put_md5sig_pool();
1214 	return 0;
1215 
1216 clear_hash:
1217 	tcp_put_md5sig_pool();
1218 clear_hash_noput:
1219 	memset(md5_hash, 0, 16);
1220 	return 1;
1221 }
1222 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1223 
1224 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1225 {
1226 	/*
1227 	 * This gets called for each TCP segment that arrives
1228 	 * so we want to be efficient.
1229 	 * We have 3 drop cases:
1230 	 * o No MD5 hash and one expected.
1231 	 * o MD5 hash and we're not expecting one.
1232 	 * o MD5 hash and its wrong.
1233 	 */
1234 	const __u8 *hash_location = NULL;
1235 	struct tcp_md5sig_key *hash_expected;
1236 	const struct iphdr *iph = ip_hdr(skb);
1237 	const struct tcphdr *th = tcp_hdr(skb);
1238 	int genhash;
1239 	unsigned char newhash[16];
1240 
1241 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1242 					  AF_INET);
1243 	hash_location = tcp_parse_md5sig_option(th);
1244 
1245 	/* We've parsed the options - do we have a hash? */
1246 	if (!hash_expected && !hash_location)
1247 		return false;
1248 
1249 	if (hash_expected && !hash_location) {
1250 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1251 		return true;
1252 	}
1253 
1254 	if (!hash_expected && hash_location) {
1255 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1256 		return true;
1257 	}
1258 
1259 	/* Okay, so this is hash_expected and hash_location -
1260 	 * so we need to calculate the checksum.
1261 	 */
1262 	genhash = tcp_v4_md5_hash_skb(newhash,
1263 				      hash_expected,
1264 				      NULL, NULL, skb);
1265 
1266 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1267 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1268 				     &iph->saddr, ntohs(th->source),
1269 				     &iph->daddr, ntohs(th->dest),
1270 				     genhash ? " tcp_v4_calc_md5_hash failed"
1271 				     : "");
1272 		return true;
1273 	}
1274 	return false;
1275 }
1276 
1277 #endif
1278 
1279 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1280 	.family		=	PF_INET,
1281 	.obj_size	=	sizeof(struct tcp_request_sock),
1282 	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1283 	.send_ack	=	tcp_v4_reqsk_send_ack,
1284 	.destructor	=	tcp_v4_reqsk_destructor,
1285 	.send_reset	=	tcp_v4_send_reset,
1286 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1287 };
1288 
1289 #ifdef CONFIG_TCP_MD5SIG
1290 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1291 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1292 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1293 };
1294 #endif
1295 
1296 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1297 			       struct request_sock *req,
1298 			       struct tcp_fastopen_cookie *foc,
1299 			       struct tcp_fastopen_cookie *valid_foc)
1300 {
1301 	bool skip_cookie = false;
1302 	struct fastopen_queue *fastopenq;
1303 
1304 	if (likely(!fastopen_cookie_present(foc))) {
1305 		/* See include/net/tcp.h for the meaning of these knobs */
1306 		if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1307 		    ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1308 		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1309 			skip_cookie = true; /* no cookie to validate */
1310 		else
1311 			return false;
1312 	}
1313 	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1314 	/* A FO option is present; bump the counter. */
1315 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1316 
1317 	/* Make sure the listener has enabled fastopen, and we don't
1318 	 * exceed the max # of pending TFO requests allowed before trying
1319 	 * to validating the cookie in order to avoid burning CPU cycles
1320 	 * unnecessarily.
1321 	 *
1322 	 * XXX (TFO) - The implication of checking the max_qlen before
1323 	 * processing a cookie request is that clients can't differentiate
1324 	 * between qlen overflow causing Fast Open to be disabled
1325 	 * temporarily vs a server not supporting Fast Open at all.
1326 	 */
1327 	if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1328 	    fastopenq == NULL || fastopenq->max_qlen == 0)
1329 		return false;
1330 
1331 	if (fastopenq->qlen >= fastopenq->max_qlen) {
1332 		struct request_sock *req1;
1333 		spin_lock(&fastopenq->lock);
1334 		req1 = fastopenq->rskq_rst_head;
1335 		if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1336 			spin_unlock(&fastopenq->lock);
1337 			NET_INC_STATS_BH(sock_net(sk),
1338 			    LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1339 			/* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1340 			foc->len = -1;
1341 			return false;
1342 		}
1343 		fastopenq->rskq_rst_head = req1->dl_next;
1344 		fastopenq->qlen--;
1345 		spin_unlock(&fastopenq->lock);
1346 		reqsk_free(req1);
1347 	}
1348 	if (skip_cookie) {
1349 		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1350 		return true;
1351 	}
1352 	if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1353 		if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1354 			tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1355 			if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1356 			    memcmp(&foc->val[0], &valid_foc->val[0],
1357 			    TCP_FASTOPEN_COOKIE_SIZE) != 0)
1358 				return false;
1359 			valid_foc->len = -1;
1360 		}
1361 		/* Acknowledge the data received from the peer. */
1362 		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1363 		return true;
1364 	} else if (foc->len == 0) { /* Client requesting a cookie */
1365 		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1366 		NET_INC_STATS_BH(sock_net(sk),
1367 		    LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1368 	} else {
1369 		/* Client sent a cookie with wrong size. Treat it
1370 		 * the same as invalid and return a valid one.
1371 		 */
1372 		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1373 	}
1374 	return false;
1375 }
1376 
1377 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1378 				    struct sk_buff *skb,
1379 				    struct sk_buff *skb_synack,
1380 				    struct request_sock *req,
1381 				    struct request_values *rvp)
1382 {
1383 	struct tcp_sock *tp = tcp_sk(sk);
1384 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1385 	const struct inet_request_sock *ireq = inet_rsk(req);
1386 	struct sock *child;
1387 	int err;
1388 
1389 	req->retrans = 0;
1390 	req->sk = NULL;
1391 
1392 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1393 	if (child == NULL) {
1394 		NET_INC_STATS_BH(sock_net(sk),
1395 				 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1396 		kfree_skb(skb_synack);
1397 		return -1;
1398 	}
1399 	err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1400 				    ireq->rmt_addr, ireq->opt);
1401 	err = net_xmit_eval(err);
1402 	if (!err)
1403 		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1404 	/* XXX (TFO) - is it ok to ignore error and continue? */
1405 
1406 	spin_lock(&queue->fastopenq->lock);
1407 	queue->fastopenq->qlen++;
1408 	spin_unlock(&queue->fastopenq->lock);
1409 
1410 	/* Initialize the child socket. Have to fix some values to take
1411 	 * into account the child is a Fast Open socket and is created
1412 	 * only out of the bits carried in the SYN packet.
1413 	 */
1414 	tp = tcp_sk(child);
1415 
1416 	tp->fastopen_rsk = req;
1417 	/* Do a hold on the listner sk so that if the listener is being
1418 	 * closed, the child that has been accepted can live on and still
1419 	 * access listen_lock.
1420 	 */
1421 	sock_hold(sk);
1422 	tcp_rsk(req)->listener = sk;
1423 
1424 	/* RFC1323: The window in SYN & SYN/ACK segments is never
1425 	 * scaled. So correct it appropriately.
1426 	 */
1427 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1428 
1429 	/* Activate the retrans timer so that SYNACK can be retransmitted.
1430 	 * The request socket is not added to the SYN table of the parent
1431 	 * because it's been added to the accept queue directly.
1432 	 */
1433 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1434 	    TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1435 
1436 	/* Add the child socket directly into the accept queue */
1437 	inet_csk_reqsk_queue_add(sk, req, child);
1438 
1439 	/* Now finish processing the fastopen child socket. */
1440 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
1441 	tcp_init_congestion_control(child);
1442 	tcp_mtup_init(child);
1443 	tcp_init_buffer_space(child);
1444 	tcp_init_metrics(child);
1445 
1446 	/* Queue the data carried in the SYN packet. We need to first
1447 	 * bump skb's refcnt because the caller will attempt to free it.
1448 	 *
1449 	 * XXX (TFO) - we honor a zero-payload TFO request for now.
1450 	 * (Any reason not to?)
1451 	 */
1452 	if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1453 		/* Don't queue the skb if there is no payload in SYN.
1454 		 * XXX (TFO) - How about SYN+FIN?
1455 		 */
1456 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1457 	} else {
1458 		skb = skb_get(skb);
1459 		skb_dst_drop(skb);
1460 		__skb_pull(skb, tcp_hdr(skb)->doff * 4);
1461 		skb_set_owner_r(skb, child);
1462 		__skb_queue_tail(&child->sk_receive_queue, skb);
1463 		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1464 	}
1465 	sk->sk_data_ready(sk, 0);
1466 	bh_unlock_sock(child);
1467 	sock_put(child);
1468 	WARN_ON(req->sk == NULL);
1469 	return 0;
1470 }
1471 
1472 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1473 {
1474 	struct tcp_extend_values tmp_ext;
1475 	struct tcp_options_received tmp_opt;
1476 	const u8 *hash_location;
1477 	struct request_sock *req;
1478 	struct inet_request_sock *ireq;
1479 	struct tcp_sock *tp = tcp_sk(sk);
1480 	struct dst_entry *dst = NULL;
1481 	__be32 saddr = ip_hdr(skb)->saddr;
1482 	__be32 daddr = ip_hdr(skb)->daddr;
1483 	__u32 isn = TCP_SKB_CB(skb)->when;
1484 	bool want_cookie = false;
1485 	struct flowi4 fl4;
1486 	struct tcp_fastopen_cookie foc = { .len = -1 };
1487 	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1488 	struct sk_buff *skb_synack;
1489 	int do_fastopen;
1490 
1491 	/* Never answer to SYNs send to broadcast or multicast */
1492 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1493 		goto drop;
1494 
1495 	/* TW buckets are converted to open requests without
1496 	 * limitations, they conserve resources and peer is
1497 	 * evidently real one.
1498 	 */
1499 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1500 		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1501 		if (!want_cookie)
1502 			goto drop;
1503 	}
1504 
1505 	/* Accept backlog is full. If we have already queued enough
1506 	 * of warm entries in syn queue, drop request. It is better than
1507 	 * clogging syn queue with openreqs with exponentially increasing
1508 	 * timeout.
1509 	 */
1510 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1511 		goto drop;
1512 
1513 	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1514 	if (!req)
1515 		goto drop;
1516 
1517 #ifdef CONFIG_TCP_MD5SIG
1518 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1519 #endif
1520 
1521 	tcp_clear_options(&tmp_opt);
1522 	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1523 	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1524 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1525 	    want_cookie ? NULL : &foc);
1526 
1527 	if (tmp_opt.cookie_plus > 0 &&
1528 	    tmp_opt.saw_tstamp &&
1529 	    !tp->rx_opt.cookie_out_never &&
1530 	    (sysctl_tcp_cookie_size > 0 ||
1531 	     (tp->cookie_values != NULL &&
1532 	      tp->cookie_values->cookie_desired > 0))) {
1533 		u8 *c;
1534 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1535 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1536 
1537 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1538 			goto drop_and_release;
1539 
1540 		/* Secret recipe starts with IP addresses */
1541 		*mess++ ^= (__force u32)daddr;
1542 		*mess++ ^= (__force u32)saddr;
1543 
1544 		/* plus variable length Initiator Cookie */
1545 		c = (u8 *)mess;
1546 		while (l-- > 0)
1547 			*c++ ^= *hash_location++;
1548 
1549 		want_cookie = false;	/* not our kind of cookie */
1550 		tmp_ext.cookie_out_never = 0; /* false */
1551 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1552 	} else if (!tp->rx_opt.cookie_in_always) {
1553 		/* redundant indications, but ensure initialization. */
1554 		tmp_ext.cookie_out_never = 1; /* true */
1555 		tmp_ext.cookie_plus = 0;
1556 	} else {
1557 		goto drop_and_release;
1558 	}
1559 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1560 
1561 	if (want_cookie && !tmp_opt.saw_tstamp)
1562 		tcp_clear_options(&tmp_opt);
1563 
1564 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1565 	tcp_openreq_init(req, &tmp_opt, skb);
1566 
1567 	ireq = inet_rsk(req);
1568 	ireq->loc_addr = daddr;
1569 	ireq->rmt_addr = saddr;
1570 	ireq->no_srccheck = inet_sk(sk)->transparent;
1571 	ireq->opt = tcp_v4_save_options(skb);
1572 
1573 	if (security_inet_conn_request(sk, skb, req))
1574 		goto drop_and_free;
1575 
1576 	if (!want_cookie || tmp_opt.tstamp_ok)
1577 		TCP_ECN_create_request(req, skb);
1578 
1579 	if (want_cookie) {
1580 		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1581 		req->cookie_ts = tmp_opt.tstamp_ok;
1582 	} else if (!isn) {
1583 		/* VJ's idea. We save last timestamp seen
1584 		 * from the destination in peer table, when entering
1585 		 * state TIME-WAIT, and check against it before
1586 		 * accepting new connection request.
1587 		 *
1588 		 * If "isn" is not zero, this request hit alive
1589 		 * timewait bucket, so that all the necessary checks
1590 		 * are made in the function processing timewait state.
1591 		 */
1592 		if (tmp_opt.saw_tstamp &&
1593 		    tcp_death_row.sysctl_tw_recycle &&
1594 		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1595 		    fl4.daddr == saddr) {
1596 			if (!tcp_peer_is_proven(req, dst, true)) {
1597 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1598 				goto drop_and_release;
1599 			}
1600 		}
1601 		/* Kill the following clause, if you dislike this way. */
1602 		else if (!sysctl_tcp_syncookies &&
1603 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1604 			  (sysctl_max_syn_backlog >> 2)) &&
1605 			 !tcp_peer_is_proven(req, dst, false)) {
1606 			/* Without syncookies last quarter of
1607 			 * backlog is filled with destinations,
1608 			 * proven to be alive.
1609 			 * It means that we continue to communicate
1610 			 * to destinations, already remembered
1611 			 * to the moment of synflood.
1612 			 */
1613 			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1614 				       &saddr, ntohs(tcp_hdr(skb)->source));
1615 			goto drop_and_release;
1616 		}
1617 
1618 		isn = tcp_v4_init_sequence(skb);
1619 	}
1620 	tcp_rsk(req)->snt_isn = isn;
1621 
1622 	if (dst == NULL) {
1623 		dst = inet_csk_route_req(sk, &fl4, req);
1624 		if (dst == NULL)
1625 			goto drop_and_free;
1626 	}
1627 	do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1628 
1629 	/* We don't call tcp_v4_send_synack() directly because we need
1630 	 * to make sure a child socket can be created successfully before
1631 	 * sending back synack!
1632 	 *
1633 	 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1634 	 * (or better yet, call tcp_send_synack() in the child context
1635 	 * directly, but will have to fix bunch of other code first)
1636 	 * after syn_recv_sock() except one will need to first fix the
1637 	 * latter to remove its dependency on the current implementation
1638 	 * of tcp_v4_send_synack()->tcp_select_initial_window().
1639 	 */
1640 	skb_synack = tcp_make_synack(sk, dst, req,
1641 	    (struct request_values *)&tmp_ext,
1642 	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1643 
1644 	if (skb_synack) {
1645 		__tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1646 		skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1647 	} else
1648 		goto drop_and_free;
1649 
1650 	if (likely(!do_fastopen)) {
1651 		int err;
1652 		err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1653 		     ireq->rmt_addr, ireq->opt);
1654 		err = net_xmit_eval(err);
1655 		if (err || want_cookie)
1656 			goto drop_and_free;
1657 
1658 		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1659 		tcp_rsk(req)->listener = NULL;
1660 		/* Add the request_sock to the SYN table */
1661 		inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1662 		if (fastopen_cookie_present(&foc) && foc.len != 0)
1663 			NET_INC_STATS_BH(sock_net(sk),
1664 			    LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1665 	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1666 	    (struct request_values *)&tmp_ext))
1667 		goto drop_and_free;
1668 
1669 	return 0;
1670 
1671 drop_and_release:
1672 	dst_release(dst);
1673 drop_and_free:
1674 	reqsk_free(req);
1675 drop:
1676 	return 0;
1677 }
1678 EXPORT_SYMBOL(tcp_v4_conn_request);
1679 
1680 
1681 /*
1682  * The three way handshake has completed - we got a valid synack -
1683  * now create the new socket.
1684  */
1685 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1686 				  struct request_sock *req,
1687 				  struct dst_entry *dst)
1688 {
1689 	struct inet_request_sock *ireq;
1690 	struct inet_sock *newinet;
1691 	struct tcp_sock *newtp;
1692 	struct sock *newsk;
1693 #ifdef CONFIG_TCP_MD5SIG
1694 	struct tcp_md5sig_key *key;
1695 #endif
1696 	struct ip_options_rcu *inet_opt;
1697 
1698 	if (sk_acceptq_is_full(sk))
1699 		goto exit_overflow;
1700 
1701 	newsk = tcp_create_openreq_child(sk, req, skb);
1702 	if (!newsk)
1703 		goto exit_nonewsk;
1704 
1705 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1706 	inet_sk_rx_dst_set(newsk, skb);
1707 
1708 	newtp		      = tcp_sk(newsk);
1709 	newinet		      = inet_sk(newsk);
1710 	ireq		      = inet_rsk(req);
1711 	newinet->inet_daddr   = ireq->rmt_addr;
1712 	newinet->inet_rcv_saddr = ireq->loc_addr;
1713 	newinet->inet_saddr	      = ireq->loc_addr;
1714 	inet_opt	      = ireq->opt;
1715 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1716 	ireq->opt	      = NULL;
1717 	newinet->mc_index     = inet_iif(skb);
1718 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1719 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1720 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1721 	if (inet_opt)
1722 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1723 	newinet->inet_id = newtp->write_seq ^ jiffies;
1724 
1725 	if (!dst) {
1726 		dst = inet_csk_route_child_sock(sk, newsk, req);
1727 		if (!dst)
1728 			goto put_and_exit;
1729 	} else {
1730 		/* syncookie case : see end of cookie_v4_check() */
1731 	}
1732 	sk_setup_caps(newsk, dst);
1733 
1734 	tcp_mtup_init(newsk);
1735 	tcp_sync_mss(newsk, dst_mtu(dst));
1736 	newtp->advmss = dst_metric_advmss(dst);
1737 	if (tcp_sk(sk)->rx_opt.user_mss &&
1738 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1739 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1740 
1741 	tcp_initialize_rcv_mss(newsk);
1742 	tcp_synack_rtt_meas(newsk, req);
1743 	newtp->total_retrans = req->retrans;
1744 
1745 #ifdef CONFIG_TCP_MD5SIG
1746 	/* Copy over the MD5 key from the original socket */
1747 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1748 				AF_INET);
1749 	if (key != NULL) {
1750 		/*
1751 		 * We're using one, so create a matching key
1752 		 * on the newsk structure. If we fail to get
1753 		 * memory, then we end up not copying the key
1754 		 * across. Shucks.
1755 		 */
1756 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1757 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1758 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1759 	}
1760 #endif
1761 
1762 	if (__inet_inherit_port(sk, newsk) < 0)
1763 		goto put_and_exit;
1764 	__inet_hash_nolisten(newsk, NULL);
1765 
1766 	return newsk;
1767 
1768 exit_overflow:
1769 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1770 exit_nonewsk:
1771 	dst_release(dst);
1772 exit:
1773 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1774 	return NULL;
1775 put_and_exit:
1776 	tcp_clear_xmit_timers(newsk);
1777 	tcp_cleanup_congestion_control(newsk);
1778 	bh_unlock_sock(newsk);
1779 	sock_put(newsk);
1780 	goto exit;
1781 }
1782 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1783 
1784 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1785 {
1786 	struct tcphdr *th = tcp_hdr(skb);
1787 	const struct iphdr *iph = ip_hdr(skb);
1788 	struct sock *nsk;
1789 	struct request_sock **prev;
1790 	/* Find possible connection requests. */
1791 	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1792 						       iph->saddr, iph->daddr);
1793 	if (req)
1794 		return tcp_check_req(sk, skb, req, prev, false);
1795 
1796 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1797 			th->source, iph->daddr, th->dest, inet_iif(skb));
1798 
1799 	if (nsk) {
1800 		if (nsk->sk_state != TCP_TIME_WAIT) {
1801 			bh_lock_sock(nsk);
1802 			return nsk;
1803 		}
1804 		inet_twsk_put(inet_twsk(nsk));
1805 		return NULL;
1806 	}
1807 
1808 #ifdef CONFIG_SYN_COOKIES
1809 	if (!th->syn)
1810 		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1811 #endif
1812 	return sk;
1813 }
1814 
1815 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1816 {
1817 	const struct iphdr *iph = ip_hdr(skb);
1818 
1819 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1820 		if (!tcp_v4_check(skb->len, iph->saddr,
1821 				  iph->daddr, skb->csum)) {
1822 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1823 			return 0;
1824 		}
1825 	}
1826 
1827 	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1828 				       skb->len, IPPROTO_TCP, 0);
1829 
1830 	if (skb->len <= 76) {
1831 		return __skb_checksum_complete(skb);
1832 	}
1833 	return 0;
1834 }
1835 
1836 
1837 /* The socket must have it's spinlock held when we get
1838  * here.
1839  *
1840  * We have a potential double-lock case here, so even when
1841  * doing backlog processing we use the BH locking scheme.
1842  * This is because we cannot sleep with the original spinlock
1843  * held.
1844  */
1845 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1846 {
1847 	struct sock *rsk;
1848 #ifdef CONFIG_TCP_MD5SIG
1849 	/*
1850 	 * We really want to reject the packet as early as possible
1851 	 * if:
1852 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1853 	 *  o There is an MD5 option and we're not expecting one
1854 	 */
1855 	if (tcp_v4_inbound_md5_hash(sk, skb))
1856 		goto discard;
1857 #endif
1858 
1859 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1860 		struct dst_entry *dst = sk->sk_rx_dst;
1861 
1862 		sock_rps_save_rxhash(sk, skb);
1863 		if (dst) {
1864 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1865 			    dst->ops->check(dst, 0) == NULL) {
1866 				dst_release(dst);
1867 				sk->sk_rx_dst = NULL;
1868 			}
1869 		}
1870 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1871 			rsk = sk;
1872 			goto reset;
1873 		}
1874 		return 0;
1875 	}
1876 
1877 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1878 		goto csum_err;
1879 
1880 	if (sk->sk_state == TCP_LISTEN) {
1881 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1882 		if (!nsk)
1883 			goto discard;
1884 
1885 		if (nsk != sk) {
1886 			sock_rps_save_rxhash(nsk, skb);
1887 			if (tcp_child_process(sk, nsk, skb)) {
1888 				rsk = nsk;
1889 				goto reset;
1890 			}
1891 			return 0;
1892 		}
1893 	} else
1894 		sock_rps_save_rxhash(sk, skb);
1895 
1896 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1897 		rsk = sk;
1898 		goto reset;
1899 	}
1900 	return 0;
1901 
1902 reset:
1903 	tcp_v4_send_reset(rsk, skb);
1904 discard:
1905 	kfree_skb(skb);
1906 	/* Be careful here. If this function gets more complicated and
1907 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1908 	 * might be destroyed here. This current version compiles correctly,
1909 	 * but you have been warned.
1910 	 */
1911 	return 0;
1912 
1913 csum_err:
1914 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1915 	goto discard;
1916 }
1917 EXPORT_SYMBOL(tcp_v4_do_rcv);
1918 
1919 void tcp_v4_early_demux(struct sk_buff *skb)
1920 {
1921 	struct net *net = dev_net(skb->dev);
1922 	const struct iphdr *iph;
1923 	const struct tcphdr *th;
1924 	struct sock *sk;
1925 
1926 	if (skb->pkt_type != PACKET_HOST)
1927 		return;
1928 
1929 	if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1930 		return;
1931 
1932 	iph = ip_hdr(skb);
1933 	th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1934 
1935 	if (th->doff < sizeof(struct tcphdr) / 4)
1936 		return;
1937 
1938 	sk = __inet_lookup_established(net, &tcp_hashinfo,
1939 				       iph->saddr, th->source,
1940 				       iph->daddr, ntohs(th->dest),
1941 				       skb->skb_iif);
1942 	if (sk) {
1943 		skb->sk = sk;
1944 		skb->destructor = sock_edemux;
1945 		if (sk->sk_state != TCP_TIME_WAIT) {
1946 			struct dst_entry *dst = sk->sk_rx_dst;
1947 
1948 			if (dst)
1949 				dst = dst_check(dst, 0);
1950 			if (dst &&
1951 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1952 				skb_dst_set_noref(skb, dst);
1953 		}
1954 	}
1955 }
1956 
1957 /*
1958  *	From tcp_input.c
1959  */
1960 
1961 int tcp_v4_rcv(struct sk_buff *skb)
1962 {
1963 	const struct iphdr *iph;
1964 	const struct tcphdr *th;
1965 	struct sock *sk;
1966 	int ret;
1967 	struct net *net = dev_net(skb->dev);
1968 
1969 	if (skb->pkt_type != PACKET_HOST)
1970 		goto discard_it;
1971 
1972 	/* Count it even if it's bad */
1973 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1974 
1975 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1976 		goto discard_it;
1977 
1978 	th = tcp_hdr(skb);
1979 
1980 	if (th->doff < sizeof(struct tcphdr) / 4)
1981 		goto bad_packet;
1982 	if (!pskb_may_pull(skb, th->doff * 4))
1983 		goto discard_it;
1984 
1985 	/* An explanation is required here, I think.
1986 	 * Packet length and doff are validated by header prediction,
1987 	 * provided case of th->doff==0 is eliminated.
1988 	 * So, we defer the checks. */
1989 	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1990 		goto bad_packet;
1991 
1992 	th = tcp_hdr(skb);
1993 	iph = ip_hdr(skb);
1994 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1995 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1996 				    skb->len - th->doff * 4);
1997 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1998 	TCP_SKB_CB(skb)->when	 = 0;
1999 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2000 	TCP_SKB_CB(skb)->sacked	 = 0;
2001 
2002 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
2003 	if (!sk)
2004 		goto no_tcp_socket;
2005 
2006 process:
2007 	if (sk->sk_state == TCP_TIME_WAIT)
2008 		goto do_time_wait;
2009 
2010 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2011 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2012 		goto discard_and_relse;
2013 	}
2014 
2015 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2016 		goto discard_and_relse;
2017 	nf_reset(skb);
2018 
2019 	if (sk_filter(sk, skb))
2020 		goto discard_and_relse;
2021 
2022 	skb->dev = NULL;
2023 
2024 	bh_lock_sock_nested(sk);
2025 	ret = 0;
2026 	if (!sock_owned_by_user(sk)) {
2027 #ifdef CONFIG_NET_DMA
2028 		struct tcp_sock *tp = tcp_sk(sk);
2029 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2030 			tp->ucopy.dma_chan = net_dma_find_channel();
2031 		if (tp->ucopy.dma_chan)
2032 			ret = tcp_v4_do_rcv(sk, skb);
2033 		else
2034 #endif
2035 		{
2036 			if (!tcp_prequeue(sk, skb))
2037 				ret = tcp_v4_do_rcv(sk, skb);
2038 		}
2039 	} else if (unlikely(sk_add_backlog(sk, skb,
2040 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
2041 		bh_unlock_sock(sk);
2042 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2043 		goto discard_and_relse;
2044 	}
2045 	bh_unlock_sock(sk);
2046 
2047 	sock_put(sk);
2048 
2049 	return ret;
2050 
2051 no_tcp_socket:
2052 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2053 		goto discard_it;
2054 
2055 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2056 bad_packet:
2057 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2058 	} else {
2059 		tcp_v4_send_reset(NULL, skb);
2060 	}
2061 
2062 discard_it:
2063 	/* Discard frame. */
2064 	kfree_skb(skb);
2065 	return 0;
2066 
2067 discard_and_relse:
2068 	sock_put(sk);
2069 	goto discard_it;
2070 
2071 do_time_wait:
2072 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2073 		inet_twsk_put(inet_twsk(sk));
2074 		goto discard_it;
2075 	}
2076 
2077 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2078 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2079 		inet_twsk_put(inet_twsk(sk));
2080 		goto discard_it;
2081 	}
2082 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2083 	case TCP_TW_SYN: {
2084 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2085 							&tcp_hashinfo,
2086 							iph->daddr, th->dest,
2087 							inet_iif(skb));
2088 		if (sk2) {
2089 			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2090 			inet_twsk_put(inet_twsk(sk));
2091 			sk = sk2;
2092 			goto process;
2093 		}
2094 		/* Fall through to ACK */
2095 	}
2096 	case TCP_TW_ACK:
2097 		tcp_v4_timewait_ack(sk, skb);
2098 		break;
2099 	case TCP_TW_RST:
2100 		goto no_tcp_socket;
2101 	case TCP_TW_SUCCESS:;
2102 	}
2103 	goto discard_it;
2104 }
2105 
2106 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2107 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
2108 	.twsk_unique	= tcp_twsk_unique,
2109 	.twsk_destructor= tcp_twsk_destructor,
2110 };
2111 
2112 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2113 {
2114 	struct dst_entry *dst = skb_dst(skb);
2115 
2116 	dst_hold(dst);
2117 	sk->sk_rx_dst = dst;
2118 	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2119 }
2120 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2121 
2122 const struct inet_connection_sock_af_ops ipv4_specific = {
2123 	.queue_xmit	   = ip_queue_xmit,
2124 	.send_check	   = tcp_v4_send_check,
2125 	.rebuild_header	   = inet_sk_rebuild_header,
2126 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2127 	.conn_request	   = tcp_v4_conn_request,
2128 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
2129 	.net_header_len	   = sizeof(struct iphdr),
2130 	.setsockopt	   = ip_setsockopt,
2131 	.getsockopt	   = ip_getsockopt,
2132 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
2133 	.sockaddr_len	   = sizeof(struct sockaddr_in),
2134 	.bind_conflict	   = inet_csk_bind_conflict,
2135 #ifdef CONFIG_COMPAT
2136 	.compat_setsockopt = compat_ip_setsockopt,
2137 	.compat_getsockopt = compat_ip_getsockopt,
2138 #endif
2139 };
2140 EXPORT_SYMBOL(ipv4_specific);
2141 
2142 #ifdef CONFIG_TCP_MD5SIG
2143 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2144 	.md5_lookup		= tcp_v4_md5_lookup,
2145 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2146 	.md5_parse		= tcp_v4_parse_md5_keys,
2147 };
2148 #endif
2149 
2150 /* NOTE: A lot of things set to zero explicitly by call to
2151  *       sk_alloc() so need not be done here.
2152  */
2153 static int tcp_v4_init_sock(struct sock *sk)
2154 {
2155 	struct inet_connection_sock *icsk = inet_csk(sk);
2156 
2157 	tcp_init_sock(sk);
2158 
2159 	icsk->icsk_af_ops = &ipv4_specific;
2160 
2161 #ifdef CONFIG_TCP_MD5SIG
2162 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2163 #endif
2164 
2165 	return 0;
2166 }
2167 
2168 void tcp_v4_destroy_sock(struct sock *sk)
2169 {
2170 	struct tcp_sock *tp = tcp_sk(sk);
2171 
2172 	tcp_clear_xmit_timers(sk);
2173 
2174 	tcp_cleanup_congestion_control(sk);
2175 
2176 	/* Cleanup up the write buffer. */
2177 	tcp_write_queue_purge(sk);
2178 
2179 	/* Cleans up our, hopefully empty, out_of_order_queue. */
2180 	__skb_queue_purge(&tp->out_of_order_queue);
2181 
2182 #ifdef CONFIG_TCP_MD5SIG
2183 	/* Clean up the MD5 key list, if any */
2184 	if (tp->md5sig_info) {
2185 		tcp_clear_md5_list(sk);
2186 		kfree_rcu(tp->md5sig_info, rcu);
2187 		tp->md5sig_info = NULL;
2188 	}
2189 #endif
2190 
2191 #ifdef CONFIG_NET_DMA
2192 	/* Cleans up our sk_async_wait_queue */
2193 	__skb_queue_purge(&sk->sk_async_wait_queue);
2194 #endif
2195 
2196 	/* Clean prequeue, it must be empty really */
2197 	__skb_queue_purge(&tp->ucopy.prequeue);
2198 
2199 	/* Clean up a referenced TCP bind bucket. */
2200 	if (inet_csk(sk)->icsk_bind_hash)
2201 		inet_put_port(sk);
2202 
2203 	/* TCP Cookie Transactions */
2204 	if (tp->cookie_values != NULL) {
2205 		kref_put(&tp->cookie_values->kref,
2206 			 tcp_cookie_values_release);
2207 		tp->cookie_values = NULL;
2208 	}
2209 	BUG_ON(tp->fastopen_rsk != NULL);
2210 
2211 	/* If socket is aborted during connect operation */
2212 	tcp_free_fastopen_req(tp);
2213 
2214 	sk_sockets_allocated_dec(sk);
2215 	sock_release_memcg(sk);
2216 }
2217 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2218 
2219 #ifdef CONFIG_PROC_FS
2220 /* Proc filesystem TCP sock list dumping. */
2221 
2222 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2223 {
2224 	return hlist_nulls_empty(head) ? NULL :
2225 		list_entry(head->first, struct inet_timewait_sock, tw_node);
2226 }
2227 
2228 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2229 {
2230 	return !is_a_nulls(tw->tw_node.next) ?
2231 		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2232 }
2233 
2234 /*
2235  * Get next listener socket follow cur.  If cur is NULL, get first socket
2236  * starting from bucket given in st->bucket; when st->bucket is zero the
2237  * very first socket in the hash table is returned.
2238  */
2239 static void *listening_get_next(struct seq_file *seq, void *cur)
2240 {
2241 	struct inet_connection_sock *icsk;
2242 	struct hlist_nulls_node *node;
2243 	struct sock *sk = cur;
2244 	struct inet_listen_hashbucket *ilb;
2245 	struct tcp_iter_state *st = seq->private;
2246 	struct net *net = seq_file_net(seq);
2247 
2248 	if (!sk) {
2249 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2250 		spin_lock_bh(&ilb->lock);
2251 		sk = sk_nulls_head(&ilb->head);
2252 		st->offset = 0;
2253 		goto get_sk;
2254 	}
2255 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2256 	++st->num;
2257 	++st->offset;
2258 
2259 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2260 		struct request_sock *req = cur;
2261 
2262 		icsk = inet_csk(st->syn_wait_sk);
2263 		req = req->dl_next;
2264 		while (1) {
2265 			while (req) {
2266 				if (req->rsk_ops->family == st->family) {
2267 					cur = req;
2268 					goto out;
2269 				}
2270 				req = req->dl_next;
2271 			}
2272 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2273 				break;
2274 get_req:
2275 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2276 		}
2277 		sk	  = sk_nulls_next(st->syn_wait_sk);
2278 		st->state = TCP_SEQ_STATE_LISTENING;
2279 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2280 	} else {
2281 		icsk = inet_csk(sk);
2282 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2283 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2284 			goto start_req;
2285 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2286 		sk = sk_nulls_next(sk);
2287 	}
2288 get_sk:
2289 	sk_nulls_for_each_from(sk, node) {
2290 		if (!net_eq(sock_net(sk), net))
2291 			continue;
2292 		if (sk->sk_family == st->family) {
2293 			cur = sk;
2294 			goto out;
2295 		}
2296 		icsk = inet_csk(sk);
2297 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2298 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2299 start_req:
2300 			st->uid		= sock_i_uid(sk);
2301 			st->syn_wait_sk = sk;
2302 			st->state	= TCP_SEQ_STATE_OPENREQ;
2303 			st->sbucket	= 0;
2304 			goto get_req;
2305 		}
2306 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2307 	}
2308 	spin_unlock_bh(&ilb->lock);
2309 	st->offset = 0;
2310 	if (++st->bucket < INET_LHTABLE_SIZE) {
2311 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2312 		spin_lock_bh(&ilb->lock);
2313 		sk = sk_nulls_head(&ilb->head);
2314 		goto get_sk;
2315 	}
2316 	cur = NULL;
2317 out:
2318 	return cur;
2319 }
2320 
2321 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2322 {
2323 	struct tcp_iter_state *st = seq->private;
2324 	void *rc;
2325 
2326 	st->bucket = 0;
2327 	st->offset = 0;
2328 	rc = listening_get_next(seq, NULL);
2329 
2330 	while (rc && *pos) {
2331 		rc = listening_get_next(seq, rc);
2332 		--*pos;
2333 	}
2334 	return rc;
2335 }
2336 
2337 static inline bool empty_bucket(struct tcp_iter_state *st)
2338 {
2339 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2340 		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2341 }
2342 
2343 /*
2344  * Get first established socket starting from bucket given in st->bucket.
2345  * If st->bucket is zero, the very first socket in the hash is returned.
2346  */
2347 static void *established_get_first(struct seq_file *seq)
2348 {
2349 	struct tcp_iter_state *st = seq->private;
2350 	struct net *net = seq_file_net(seq);
2351 	void *rc = NULL;
2352 
2353 	st->offset = 0;
2354 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2355 		struct sock *sk;
2356 		struct hlist_nulls_node *node;
2357 		struct inet_timewait_sock *tw;
2358 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2359 
2360 		/* Lockless fast path for the common case of empty buckets */
2361 		if (empty_bucket(st))
2362 			continue;
2363 
2364 		spin_lock_bh(lock);
2365 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2366 			if (sk->sk_family != st->family ||
2367 			    !net_eq(sock_net(sk), net)) {
2368 				continue;
2369 			}
2370 			rc = sk;
2371 			goto out;
2372 		}
2373 		st->state = TCP_SEQ_STATE_TIME_WAIT;
2374 		inet_twsk_for_each(tw, node,
2375 				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2376 			if (tw->tw_family != st->family ||
2377 			    !net_eq(twsk_net(tw), net)) {
2378 				continue;
2379 			}
2380 			rc = tw;
2381 			goto out;
2382 		}
2383 		spin_unlock_bh(lock);
2384 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2385 	}
2386 out:
2387 	return rc;
2388 }
2389 
2390 static void *established_get_next(struct seq_file *seq, void *cur)
2391 {
2392 	struct sock *sk = cur;
2393 	struct inet_timewait_sock *tw;
2394 	struct hlist_nulls_node *node;
2395 	struct tcp_iter_state *st = seq->private;
2396 	struct net *net = seq_file_net(seq);
2397 
2398 	++st->num;
2399 	++st->offset;
2400 
2401 	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2402 		tw = cur;
2403 		tw = tw_next(tw);
2404 get_tw:
2405 		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2406 			tw = tw_next(tw);
2407 		}
2408 		if (tw) {
2409 			cur = tw;
2410 			goto out;
2411 		}
2412 		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2413 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2414 
2415 		/* Look for next non empty bucket */
2416 		st->offset = 0;
2417 		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2418 				empty_bucket(st))
2419 			;
2420 		if (st->bucket > tcp_hashinfo.ehash_mask)
2421 			return NULL;
2422 
2423 		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2424 		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2425 	} else
2426 		sk = sk_nulls_next(sk);
2427 
2428 	sk_nulls_for_each_from(sk, node) {
2429 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2430 			goto found;
2431 	}
2432 
2433 	st->state = TCP_SEQ_STATE_TIME_WAIT;
2434 	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2435 	goto get_tw;
2436 found:
2437 	cur = sk;
2438 out:
2439 	return cur;
2440 }
2441 
2442 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2443 {
2444 	struct tcp_iter_state *st = seq->private;
2445 	void *rc;
2446 
2447 	st->bucket = 0;
2448 	rc = established_get_first(seq);
2449 
2450 	while (rc && pos) {
2451 		rc = established_get_next(seq, rc);
2452 		--pos;
2453 	}
2454 	return rc;
2455 }
2456 
2457 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2458 {
2459 	void *rc;
2460 	struct tcp_iter_state *st = seq->private;
2461 
2462 	st->state = TCP_SEQ_STATE_LISTENING;
2463 	rc	  = listening_get_idx(seq, &pos);
2464 
2465 	if (!rc) {
2466 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2467 		rc	  = established_get_idx(seq, pos);
2468 	}
2469 
2470 	return rc;
2471 }
2472 
2473 static void *tcp_seek_last_pos(struct seq_file *seq)
2474 {
2475 	struct tcp_iter_state *st = seq->private;
2476 	int offset = st->offset;
2477 	int orig_num = st->num;
2478 	void *rc = NULL;
2479 
2480 	switch (st->state) {
2481 	case TCP_SEQ_STATE_OPENREQ:
2482 	case TCP_SEQ_STATE_LISTENING:
2483 		if (st->bucket >= INET_LHTABLE_SIZE)
2484 			break;
2485 		st->state = TCP_SEQ_STATE_LISTENING;
2486 		rc = listening_get_next(seq, NULL);
2487 		while (offset-- && rc)
2488 			rc = listening_get_next(seq, rc);
2489 		if (rc)
2490 			break;
2491 		st->bucket = 0;
2492 		/* Fallthrough */
2493 	case TCP_SEQ_STATE_ESTABLISHED:
2494 	case TCP_SEQ_STATE_TIME_WAIT:
2495 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2496 		if (st->bucket > tcp_hashinfo.ehash_mask)
2497 			break;
2498 		rc = established_get_first(seq);
2499 		while (offset-- && rc)
2500 			rc = established_get_next(seq, rc);
2501 	}
2502 
2503 	st->num = orig_num;
2504 
2505 	return rc;
2506 }
2507 
2508 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2509 {
2510 	struct tcp_iter_state *st = seq->private;
2511 	void *rc;
2512 
2513 	if (*pos && *pos == st->last_pos) {
2514 		rc = tcp_seek_last_pos(seq);
2515 		if (rc)
2516 			goto out;
2517 	}
2518 
2519 	st->state = TCP_SEQ_STATE_LISTENING;
2520 	st->num = 0;
2521 	st->bucket = 0;
2522 	st->offset = 0;
2523 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2524 
2525 out:
2526 	st->last_pos = *pos;
2527 	return rc;
2528 }
2529 
2530 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2531 {
2532 	struct tcp_iter_state *st = seq->private;
2533 	void *rc = NULL;
2534 
2535 	if (v == SEQ_START_TOKEN) {
2536 		rc = tcp_get_idx(seq, 0);
2537 		goto out;
2538 	}
2539 
2540 	switch (st->state) {
2541 	case TCP_SEQ_STATE_OPENREQ:
2542 	case TCP_SEQ_STATE_LISTENING:
2543 		rc = listening_get_next(seq, v);
2544 		if (!rc) {
2545 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2546 			st->bucket = 0;
2547 			st->offset = 0;
2548 			rc	  = established_get_first(seq);
2549 		}
2550 		break;
2551 	case TCP_SEQ_STATE_ESTABLISHED:
2552 	case TCP_SEQ_STATE_TIME_WAIT:
2553 		rc = established_get_next(seq, v);
2554 		break;
2555 	}
2556 out:
2557 	++*pos;
2558 	st->last_pos = *pos;
2559 	return rc;
2560 }
2561 
2562 static void tcp_seq_stop(struct seq_file *seq, void *v)
2563 {
2564 	struct tcp_iter_state *st = seq->private;
2565 
2566 	switch (st->state) {
2567 	case TCP_SEQ_STATE_OPENREQ:
2568 		if (v) {
2569 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2570 			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2571 		}
2572 	case TCP_SEQ_STATE_LISTENING:
2573 		if (v != SEQ_START_TOKEN)
2574 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2575 		break;
2576 	case TCP_SEQ_STATE_TIME_WAIT:
2577 	case TCP_SEQ_STATE_ESTABLISHED:
2578 		if (v)
2579 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2580 		break;
2581 	}
2582 }
2583 
2584 int tcp_seq_open(struct inode *inode, struct file *file)
2585 {
2586 	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2587 	struct tcp_iter_state *s;
2588 	int err;
2589 
2590 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2591 			  sizeof(struct tcp_iter_state));
2592 	if (err < 0)
2593 		return err;
2594 
2595 	s = ((struct seq_file *)file->private_data)->private;
2596 	s->family		= afinfo->family;
2597 	s->last_pos 		= 0;
2598 	return 0;
2599 }
2600 EXPORT_SYMBOL(tcp_seq_open);
2601 
2602 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2603 {
2604 	int rc = 0;
2605 	struct proc_dir_entry *p;
2606 
2607 	afinfo->seq_ops.start		= tcp_seq_start;
2608 	afinfo->seq_ops.next		= tcp_seq_next;
2609 	afinfo->seq_ops.stop		= tcp_seq_stop;
2610 
2611 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2612 			     afinfo->seq_fops, afinfo);
2613 	if (!p)
2614 		rc = -ENOMEM;
2615 	return rc;
2616 }
2617 EXPORT_SYMBOL(tcp_proc_register);
2618 
2619 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2620 {
2621 	proc_net_remove(net, afinfo->name);
2622 }
2623 EXPORT_SYMBOL(tcp_proc_unregister);
2624 
2625 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2626 			 struct seq_file *f, int i, kuid_t uid, int *len)
2627 {
2628 	const struct inet_request_sock *ireq = inet_rsk(req);
2629 	long delta = req->expires - jiffies;
2630 
2631 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2632 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2633 		i,
2634 		ireq->loc_addr,
2635 		ntohs(inet_sk(sk)->inet_sport),
2636 		ireq->rmt_addr,
2637 		ntohs(ireq->rmt_port),
2638 		TCP_SYN_RECV,
2639 		0, 0, /* could print option size, but that is af dependent. */
2640 		1,    /* timers active (only the expire timer) */
2641 		jiffies_delta_to_clock_t(delta),
2642 		req->retrans,
2643 		from_kuid_munged(seq_user_ns(f), uid),
2644 		0,  /* non standard timer */
2645 		0, /* open_requests have no inode */
2646 		atomic_read(&sk->sk_refcnt),
2647 		req,
2648 		len);
2649 }
2650 
2651 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2652 {
2653 	int timer_active;
2654 	unsigned long timer_expires;
2655 	const struct tcp_sock *tp = tcp_sk(sk);
2656 	const struct inet_connection_sock *icsk = inet_csk(sk);
2657 	const struct inet_sock *inet = inet_sk(sk);
2658 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2659 	__be32 dest = inet->inet_daddr;
2660 	__be32 src = inet->inet_rcv_saddr;
2661 	__u16 destp = ntohs(inet->inet_dport);
2662 	__u16 srcp = ntohs(inet->inet_sport);
2663 	int rx_queue;
2664 
2665 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2666 		timer_active	= 1;
2667 		timer_expires	= icsk->icsk_timeout;
2668 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2669 		timer_active	= 4;
2670 		timer_expires	= icsk->icsk_timeout;
2671 	} else if (timer_pending(&sk->sk_timer)) {
2672 		timer_active	= 2;
2673 		timer_expires	= sk->sk_timer.expires;
2674 	} else {
2675 		timer_active	= 0;
2676 		timer_expires = jiffies;
2677 	}
2678 
2679 	if (sk->sk_state == TCP_LISTEN)
2680 		rx_queue = sk->sk_ack_backlog;
2681 	else
2682 		/*
2683 		 * because we dont lock socket, we might find a transient negative value
2684 		 */
2685 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2686 
2687 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2688 			"%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2689 		i, src, srcp, dest, destp, sk->sk_state,
2690 		tp->write_seq - tp->snd_una,
2691 		rx_queue,
2692 		timer_active,
2693 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2694 		icsk->icsk_retransmits,
2695 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2696 		icsk->icsk_probes_out,
2697 		sock_i_ino(sk),
2698 		atomic_read(&sk->sk_refcnt), sk,
2699 		jiffies_to_clock_t(icsk->icsk_rto),
2700 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2701 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2702 		tp->snd_cwnd,
2703 		sk->sk_state == TCP_LISTEN ?
2704 		    (fastopenq ? fastopenq->max_qlen : 0) :
2705 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2706 		len);
2707 }
2708 
2709 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2710 			       struct seq_file *f, int i, int *len)
2711 {
2712 	__be32 dest, src;
2713 	__u16 destp, srcp;
2714 	long delta = tw->tw_ttd - jiffies;
2715 
2716 	dest  = tw->tw_daddr;
2717 	src   = tw->tw_rcv_saddr;
2718 	destp = ntohs(tw->tw_dport);
2719 	srcp  = ntohs(tw->tw_sport);
2720 
2721 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2722 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2723 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2724 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2725 		atomic_read(&tw->tw_refcnt), tw, len);
2726 }
2727 
2728 #define TMPSZ 150
2729 
2730 static int tcp4_seq_show(struct seq_file *seq, void *v)
2731 {
2732 	struct tcp_iter_state *st;
2733 	int len;
2734 
2735 	if (v == SEQ_START_TOKEN) {
2736 		seq_printf(seq, "%-*s\n", TMPSZ - 1,
2737 			   "  sl  local_address rem_address   st tx_queue "
2738 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2739 			   "inode");
2740 		goto out;
2741 	}
2742 	st = seq->private;
2743 
2744 	switch (st->state) {
2745 	case TCP_SEQ_STATE_LISTENING:
2746 	case TCP_SEQ_STATE_ESTABLISHED:
2747 		get_tcp4_sock(v, seq, st->num, &len);
2748 		break;
2749 	case TCP_SEQ_STATE_OPENREQ:
2750 		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2751 		break;
2752 	case TCP_SEQ_STATE_TIME_WAIT:
2753 		get_timewait4_sock(v, seq, st->num, &len);
2754 		break;
2755 	}
2756 	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2757 out:
2758 	return 0;
2759 }
2760 
2761 static const struct file_operations tcp_afinfo_seq_fops = {
2762 	.owner   = THIS_MODULE,
2763 	.open    = tcp_seq_open,
2764 	.read    = seq_read,
2765 	.llseek  = seq_lseek,
2766 	.release = seq_release_net
2767 };
2768 
2769 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2770 	.name		= "tcp",
2771 	.family		= AF_INET,
2772 	.seq_fops	= &tcp_afinfo_seq_fops,
2773 	.seq_ops	= {
2774 		.show		= tcp4_seq_show,
2775 	},
2776 };
2777 
2778 static int __net_init tcp4_proc_init_net(struct net *net)
2779 {
2780 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2781 }
2782 
2783 static void __net_exit tcp4_proc_exit_net(struct net *net)
2784 {
2785 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2786 }
2787 
2788 static struct pernet_operations tcp4_net_ops = {
2789 	.init = tcp4_proc_init_net,
2790 	.exit = tcp4_proc_exit_net,
2791 };
2792 
2793 int __init tcp4_proc_init(void)
2794 {
2795 	return register_pernet_subsys(&tcp4_net_ops);
2796 }
2797 
2798 void tcp4_proc_exit(void)
2799 {
2800 	unregister_pernet_subsys(&tcp4_net_ops);
2801 }
2802 #endif /* CONFIG_PROC_FS */
2803 
2804 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2805 {
2806 	const struct iphdr *iph = skb_gro_network_header(skb);
2807 	__wsum wsum;
2808 	__sum16 sum;
2809 
2810 	switch (skb->ip_summed) {
2811 	case CHECKSUM_COMPLETE:
2812 		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2813 				  skb->csum)) {
2814 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2815 			break;
2816 		}
2817 flush:
2818 		NAPI_GRO_CB(skb)->flush = 1;
2819 		return NULL;
2820 
2821 	case CHECKSUM_NONE:
2822 		wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2823 					  skb_gro_len(skb), IPPROTO_TCP, 0);
2824 		sum = csum_fold(skb_checksum(skb,
2825 					     skb_gro_offset(skb),
2826 					     skb_gro_len(skb),
2827 					     wsum));
2828 		if (sum)
2829 			goto flush;
2830 
2831 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2832 		break;
2833 	}
2834 
2835 	return tcp_gro_receive(head, skb);
2836 }
2837 
2838 int tcp4_gro_complete(struct sk_buff *skb)
2839 {
2840 	const struct iphdr *iph = ip_hdr(skb);
2841 	struct tcphdr *th = tcp_hdr(skb);
2842 
2843 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2844 				  iph->saddr, iph->daddr, 0);
2845 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2846 
2847 	return tcp_gro_complete(skb);
2848 }
2849 
2850 struct proto tcp_prot = {
2851 	.name			= "TCP",
2852 	.owner			= THIS_MODULE,
2853 	.close			= tcp_close,
2854 	.connect		= tcp_v4_connect,
2855 	.disconnect		= tcp_disconnect,
2856 	.accept			= inet_csk_accept,
2857 	.ioctl			= tcp_ioctl,
2858 	.init			= tcp_v4_init_sock,
2859 	.destroy		= tcp_v4_destroy_sock,
2860 	.shutdown		= tcp_shutdown,
2861 	.setsockopt		= tcp_setsockopt,
2862 	.getsockopt		= tcp_getsockopt,
2863 	.recvmsg		= tcp_recvmsg,
2864 	.sendmsg		= tcp_sendmsg,
2865 	.sendpage		= tcp_sendpage,
2866 	.backlog_rcv		= tcp_v4_do_rcv,
2867 	.release_cb		= tcp_release_cb,
2868 	.mtu_reduced		= tcp_v4_mtu_reduced,
2869 	.hash			= inet_hash,
2870 	.unhash			= inet_unhash,
2871 	.get_port		= inet_csk_get_port,
2872 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2873 	.sockets_allocated	= &tcp_sockets_allocated,
2874 	.orphan_count		= &tcp_orphan_count,
2875 	.memory_allocated	= &tcp_memory_allocated,
2876 	.memory_pressure	= &tcp_memory_pressure,
2877 	.sysctl_wmem		= sysctl_tcp_wmem,
2878 	.sysctl_rmem		= sysctl_tcp_rmem,
2879 	.max_header		= MAX_TCP_HEADER,
2880 	.obj_size		= sizeof(struct tcp_sock),
2881 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2882 	.twsk_prot		= &tcp_timewait_sock_ops,
2883 	.rsk_prot		= &tcp_request_sock_ops,
2884 	.h.hashinfo		= &tcp_hashinfo,
2885 	.no_autobind		= true,
2886 #ifdef CONFIG_COMPAT
2887 	.compat_setsockopt	= compat_tcp_setsockopt,
2888 	.compat_getsockopt	= compat_tcp_getsockopt,
2889 #endif
2890 #ifdef CONFIG_MEMCG_KMEM
2891 	.init_cgroup		= tcp_init_cgroup,
2892 	.destroy_cgroup		= tcp_destroy_cgroup,
2893 	.proto_cgroup		= tcp_proto_cgroup,
2894 #endif
2895 };
2896 EXPORT_SYMBOL(tcp_prot);
2897 
2898 static int __net_init tcp_sk_init(struct net *net)
2899 {
2900 	return 0;
2901 }
2902 
2903 static void __net_exit tcp_sk_exit(struct net *net)
2904 {
2905 }
2906 
2907 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2908 {
2909 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2910 }
2911 
2912 static struct pernet_operations __net_initdata tcp_sk_ops = {
2913        .init	   = tcp_sk_init,
2914        .exit	   = tcp_sk_exit,
2915        .exit_batch = tcp_sk_exit_batch,
2916 };
2917 
2918 void __init tcp_v4_init(void)
2919 {
2920 	inet_hashinfo_init(&tcp_hashinfo);
2921 	if (register_pernet_subsys(&tcp_sk_ops))
2922 		panic("Failed to create the TCP control socket.\n");
2923 }
2924