xref: /linux/net/ipv4/tcp_ipv4.c (revision 2a10154abcb75ad0d7b6bfea6210ac743ec60897)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96 
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99 
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 					  ip_hdr(skb)->saddr,
104 					  tcp_hdr(skb)->dest,
105 					  tcp_hdr(skb)->source);
106 }
107 
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 	struct tcp_sock *tp = tcp_sk(sk);
112 
113 	/* With PAWS, it is safe from the viewpoint
114 	   of data integrity. Even without PAWS it is safe provided sequence
115 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117 	   Actually, the idea is close to VJ's one, only timestamp cache is
118 	   held not per host, but per port pair and TW bucket is used as state
119 	   holder.
120 
121 	   If TW bucket has been already destroyed we fall back to VJ's scheme
122 	   and use initial timestamp retrieved from peer table.
123 	 */
124 	if (tcptw->tw_ts_recent_stamp &&
125 	    (!twp || (sysctl_tcp_tw_reuse &&
126 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 		if (tp->write_seq == 0)
129 			tp->write_seq = 1;
130 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
131 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 		sock_hold(sktw);
133 		return 1;
134 	}
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 	struct inet_sock *inet = inet_sk(sk);
145 	struct tcp_sock *tp = tcp_sk(sk);
146 	__be16 orig_sport, orig_dport;
147 	__be32 daddr, nexthop;
148 	struct flowi4 *fl4;
149 	struct rtable *rt;
150 	int err;
151 	struct ip_options_rcu *inet_opt;
152 
153 	if (addr_len < sizeof(struct sockaddr_in))
154 		return -EINVAL;
155 
156 	if (usin->sin_family != AF_INET)
157 		return -EAFNOSUPPORT;
158 
159 	nexthop = daddr = usin->sin_addr.s_addr;
160 	inet_opt = rcu_dereference_protected(inet->inet_opt,
161 					     sock_owned_by_user(sk));
162 	if (inet_opt && inet_opt->opt.srr) {
163 		if (!daddr)
164 			return -EINVAL;
165 		nexthop = inet_opt->opt.faddr;
166 	}
167 
168 	orig_sport = inet->inet_sport;
169 	orig_dport = usin->sin_port;
170 	fl4 = &inet->cork.fl.u.ip4;
171 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 			      IPPROTO_TCP,
174 			      orig_sport, orig_dport, sk);
175 	if (IS_ERR(rt)) {
176 		err = PTR_ERR(rt);
177 		if (err == -ENETUNREACH)
178 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 		return err;
180 	}
181 
182 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 		ip_rt_put(rt);
184 		return -ENETUNREACH;
185 	}
186 
187 	if (!inet_opt || !inet_opt->opt.srr)
188 		daddr = fl4->daddr;
189 
190 	if (!inet->inet_saddr)
191 		inet->inet_saddr = fl4->saddr;
192 	sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 		/* Reset inherited state */
196 		tp->rx_opt.ts_recent	   = 0;
197 		tp->rx_opt.ts_recent_stamp = 0;
198 		if (likely(!tp->repair))
199 			tp->write_seq	   = 0;
200 	}
201 
202 	if (tcp_death_row.sysctl_tw_recycle &&
203 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 		tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206 	inet->inet_dport = usin->sin_port;
207 	sk_daddr_set(sk, daddr);
208 
209 	inet_csk(sk)->icsk_ext_hdr_len = 0;
210 	if (inet_opt)
211 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215 	/* Socket identity is still unknown (sport may be zero).
216 	 * However we set state to SYN-SENT and not releasing socket
217 	 * lock select source port, enter ourselves into the hash tables and
218 	 * complete initialization after this.
219 	 */
220 	tcp_set_state(sk, TCP_SYN_SENT);
221 	err = inet_hash_connect(&tcp_death_row, sk);
222 	if (err)
223 		goto failure;
224 
225 	inet_set_txhash(sk);
226 
227 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 			       inet->inet_sport, inet->inet_dport, sk);
229 	if (IS_ERR(rt)) {
230 		err = PTR_ERR(rt);
231 		rt = NULL;
232 		goto failure;
233 	}
234 	/* OK, now commit destination to socket.  */
235 	sk->sk_gso_type = SKB_GSO_TCPV4;
236 	sk_setup_caps(sk, &rt->dst);
237 
238 	if (!tp->write_seq && likely(!tp->repair))
239 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 							   inet->inet_daddr,
241 							   inet->inet_sport,
242 							   usin->sin_port);
243 
244 	inet->inet_id = tp->write_seq ^ jiffies;
245 
246 	err = tcp_connect(sk);
247 
248 	rt = NULL;
249 	if (err)
250 		goto failure;
251 
252 	return 0;
253 
254 failure:
255 	/*
256 	 * This unhashes the socket and releases the local port,
257 	 * if necessary.
258 	 */
259 	tcp_set_state(sk, TCP_CLOSE);
260 	ip_rt_put(rt);
261 	sk->sk_route_caps = 0;
262 	inet->inet_dport = 0;
263 	return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 	struct dst_entry *dst;
275 	struct inet_sock *inet = inet_sk(sk);
276 	u32 mtu = tcp_sk(sk)->mtu_info;
277 
278 	dst = inet_csk_update_pmtu(sk, mtu);
279 	if (!dst)
280 		return;
281 
282 	/* Something is about to be wrong... Remember soft error
283 	 * for the case, if this connection will not able to recover.
284 	 */
285 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 		sk->sk_err_soft = EMSGSIZE;
287 
288 	mtu = dst_mtu(dst);
289 
290 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 	    ip_sk_accept_pmtu(sk) &&
292 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 		tcp_sync_mss(sk, mtu);
294 
295 		/* Resend the TCP packet because it's
296 		 * clear that the old packet has been
297 		 * dropped. This is the new "fast" path mtu
298 		 * discovery.
299 		 */
300 		tcp_simple_retransmit(sk);
301 	} /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 	struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309 	if (dst)
310 		dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 	struct request_sock *req = inet_reqsk(sk);
318 	struct net *net = sock_net(sk);
319 
320 	/* ICMPs are not backlogged, hence we cannot get
321 	 * an established socket here.
322 	 */
323 	WARN_ON(req->sk);
324 
325 	if (seq != tcp_rsk(req)->snt_isn) {
326 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 		reqsk_put(req);
328 	} else {
329 		/*
330 		 * Still in SYN_RECV, just remove it silently.
331 		 * There is no good way to pass the error to the newly
332 		 * created socket, and POSIX does not want network
333 		 * errors returned from accept().
334 		 */
335 		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337 	}
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 	struct inet_connection_sock *icsk;
362 	struct tcp_sock *tp;
363 	struct inet_sock *inet;
364 	const int type = icmp_hdr(icmp_skb)->type;
365 	const int code = icmp_hdr(icmp_skb)->code;
366 	struct sock *sk;
367 	struct sk_buff *skb;
368 	struct request_sock *fastopen;
369 	__u32 seq, snd_una;
370 	__u32 remaining;
371 	int err;
372 	struct net *net = dev_net(icmp_skb->dev);
373 
374 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 				       th->dest, iph->saddr, ntohs(th->source),
376 				       inet_iif(icmp_skb));
377 	if (!sk) {
378 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 		return;
380 	}
381 	if (sk->sk_state == TCP_TIME_WAIT) {
382 		inet_twsk_put(inet_twsk(sk));
383 		return;
384 	}
385 	seq = ntohl(th->seq);
386 	if (sk->sk_state == TCP_NEW_SYN_RECV)
387 		return tcp_req_err(sk, seq);
388 
389 	bh_lock_sock(sk);
390 	/* If too many ICMPs get dropped on busy
391 	 * servers this needs to be solved differently.
392 	 * We do take care of PMTU discovery (RFC1191) special case :
393 	 * we can receive locally generated ICMP messages while socket is held.
394 	 */
395 	if (sock_owned_by_user(sk)) {
396 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 	}
399 	if (sk->sk_state == TCP_CLOSE)
400 		goto out;
401 
402 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 		goto out;
405 	}
406 
407 	icsk = inet_csk(sk);
408 	tp = tcp_sk(sk);
409 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 	fastopen = tp->fastopen_rsk;
411 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 	if (sk->sk_state != TCP_LISTEN &&
413 	    !between(seq, snd_una, tp->snd_nxt)) {
414 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 		goto out;
416 	}
417 
418 	switch (type) {
419 	case ICMP_REDIRECT:
420 		do_redirect(icmp_skb, sk);
421 		goto out;
422 	case ICMP_SOURCE_QUENCH:
423 		/* Just silently ignore these. */
424 		goto out;
425 	case ICMP_PARAMETERPROB:
426 		err = EPROTO;
427 		break;
428 	case ICMP_DEST_UNREACH:
429 		if (code > NR_ICMP_UNREACH)
430 			goto out;
431 
432 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 			/* We are not interested in TCP_LISTEN and open_requests
434 			 * (SYN-ACKs send out by Linux are always <576bytes so
435 			 * they should go through unfragmented).
436 			 */
437 			if (sk->sk_state == TCP_LISTEN)
438 				goto out;
439 
440 			tp->mtu_info = info;
441 			if (!sock_owned_by_user(sk)) {
442 				tcp_v4_mtu_reduced(sk);
443 			} else {
444 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 					sock_hold(sk);
446 			}
447 			goto out;
448 		}
449 
450 		err = icmp_err_convert[code].errno;
451 		/* check if icmp_skb allows revert of backoff
452 		 * (see draft-zimmermann-tcp-lcd) */
453 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 			break;
455 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456 		    !icsk->icsk_backoff || fastopen)
457 			break;
458 
459 		if (sock_owned_by_user(sk))
460 			break;
461 
462 		icsk->icsk_backoff--;
463 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 					       TCP_TIMEOUT_INIT;
465 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467 		skb = tcp_write_queue_head(sk);
468 		BUG_ON(!skb);
469 
470 		remaining = icsk->icsk_rto -
471 			    min(icsk->icsk_rto,
472 				tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474 		if (remaining) {
475 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 						  remaining, TCP_RTO_MAX);
477 		} else {
478 			/* RTO revert clocked out retransmission.
479 			 * Will retransmit now */
480 			tcp_retransmit_timer(sk);
481 		}
482 
483 		break;
484 	case ICMP_TIME_EXCEEDED:
485 		err = EHOSTUNREACH;
486 		break;
487 	default:
488 		goto out;
489 	}
490 
491 	switch (sk->sk_state) {
492 	case TCP_SYN_SENT:
493 	case TCP_SYN_RECV:
494 		/* Only in fast or simultaneous open. If a fast open socket is
495 		 * is already accepted it is treated as a connected one below.
496 		 */
497 		if (fastopen && !fastopen->sk)
498 			break;
499 
500 		if (!sock_owned_by_user(sk)) {
501 			sk->sk_err = err;
502 
503 			sk->sk_error_report(sk);
504 
505 			tcp_done(sk);
506 		} else {
507 			sk->sk_err_soft = err;
508 		}
509 		goto out;
510 	}
511 
512 	/* If we've already connected we will keep trying
513 	 * until we time out, or the user gives up.
514 	 *
515 	 * rfc1122 4.2.3.9 allows to consider as hard errors
516 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 	 * but it is obsoleted by pmtu discovery).
518 	 *
519 	 * Note, that in modern internet, where routing is unreliable
520 	 * and in each dark corner broken firewalls sit, sending random
521 	 * errors ordered by their masters even this two messages finally lose
522 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 	 *
524 	 * Now we are in compliance with RFCs.
525 	 *							--ANK (980905)
526 	 */
527 
528 	inet = inet_sk(sk);
529 	if (!sock_owned_by_user(sk) && inet->recverr) {
530 		sk->sk_err = err;
531 		sk->sk_error_report(sk);
532 	} else	{ /* Only an error on timeout */
533 		sk->sk_err_soft = err;
534 	}
535 
536 out:
537 	bh_unlock_sock(sk);
538 	sock_put(sk);
539 }
540 
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 	struct tcphdr *th = tcp_hdr(skb);
544 
545 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 		skb->csum_start = skb_transport_header(skb) - skb->head;
548 		skb->csum_offset = offsetof(struct tcphdr, check);
549 	} else {
550 		th->check = tcp_v4_check(skb->len, saddr, daddr,
551 					 csum_partial(th,
552 						      th->doff << 2,
553 						      skb->csum));
554 	}
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 	const struct inet_sock *inet = inet_sk(sk);
561 
562 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *	This routine will send an RST to the other tcp.
568  *
569  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *		      for reset.
571  *	Answer: if a packet caused RST, it is not for a socket
572  *		existing in our system, if it is matched to a socket,
573  *		it is just duplicate segment or bug in other side's TCP.
574  *		So that we build reply only basing on parameters
575  *		arrived with segment.
576  *	Exception: precedence violation. We do not implement it in any case.
577  */
578 
579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581 	const struct tcphdr *th = tcp_hdr(skb);
582 	struct {
583 		struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 	} rep;
588 	struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 	struct tcp_md5sig_key *key;
591 	const __u8 *hash_location = NULL;
592 	unsigned char newhash[16];
593 	int genhash;
594 	struct sock *sk1 = NULL;
595 #endif
596 	struct net *net;
597 
598 	/* Never send a reset in response to a reset. */
599 	if (th->rst)
600 		return;
601 
602 	/* If sk not NULL, it means we did a successful lookup and incoming
603 	 * route had to be correct. prequeue might have dropped our dst.
604 	 */
605 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 		return;
607 
608 	/* Swap the send and the receive. */
609 	memset(&rep, 0, sizeof(rep));
610 	rep.th.dest   = th->source;
611 	rep.th.source = th->dest;
612 	rep.th.doff   = sizeof(struct tcphdr) / 4;
613 	rep.th.rst    = 1;
614 
615 	if (th->ack) {
616 		rep.th.seq = th->ack_seq;
617 	} else {
618 		rep.th.ack = 1;
619 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 				       skb->len - (th->doff << 2));
621 	}
622 
623 	memset(&arg, 0, sizeof(arg));
624 	arg.iov[0].iov_base = (unsigned char *)&rep;
625 	arg.iov[0].iov_len  = sizeof(rep.th);
626 
627 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 	hash_location = tcp_parse_md5sig_option(th);
630 	if (!sk && hash_location) {
631 		/*
632 		 * active side is lost. Try to find listening socket through
633 		 * source port, and then find md5 key through listening socket.
634 		 * we are not loose security here:
635 		 * Incoming packet is checked with md5 hash with finding key,
636 		 * no RST generated if md5 hash doesn't match.
637 		 */
638 		sk1 = __inet_lookup_listener(net,
639 					     &tcp_hashinfo, ip_hdr(skb)->saddr,
640 					     th->source, ip_hdr(skb)->daddr,
641 					     ntohs(th->source), inet_iif(skb));
642 		/* don't send rst if it can't find key */
643 		if (!sk1)
644 			return;
645 		rcu_read_lock();
646 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 					&ip_hdr(skb)->saddr, AF_INET);
648 		if (!key)
649 			goto release_sk1;
650 
651 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 			goto release_sk1;
654 	} else {
655 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 					     &ip_hdr(skb)->saddr,
657 					     AF_INET) : NULL;
658 	}
659 
660 	if (key) {
661 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 				   (TCPOPT_NOP << 16) |
663 				   (TCPOPT_MD5SIG << 8) |
664 				   TCPOLEN_MD5SIG);
665 		/* Update length and the length the header thinks exists */
666 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 		rep.th.doff = arg.iov[0].iov_len / 4;
668 
669 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 				     key, ip_hdr(skb)->saddr,
671 				     ip_hdr(skb)->daddr, &rep.th);
672 	}
673 #endif
674 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 				      ip_hdr(skb)->saddr, /* XXX */
676 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 	/* When socket is gone, all binding information is lost.
680 	 * routing might fail in this case. No choice here, if we choose to force
681 	 * input interface, we will misroute in case of asymmetric route.
682 	 */
683 	if (sk)
684 		arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686 	arg.tos = ip_hdr(skb)->tos;
687 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 			      &arg, arg.iov[0].iov_len);
691 
692 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 	if (sk1) {
698 		rcu_read_unlock();
699 		sock_put(sk1);
700 	}
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709 			    u32 win, u32 tsval, u32 tsecr, int oif,
710 			    struct tcp_md5sig_key *key,
711 			    int reply_flags, u8 tos)
712 {
713 	const struct tcphdr *th = tcp_hdr(skb);
714 	struct {
715 		struct tcphdr th;
716 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720 			];
721 	} rep;
722 	struct ip_reply_arg arg;
723 	struct net *net = dev_net(skb_dst(skb)->dev);
724 
725 	memset(&rep.th, 0, sizeof(struct tcphdr));
726 	memset(&arg, 0, sizeof(arg));
727 
728 	arg.iov[0].iov_base = (unsigned char *)&rep;
729 	arg.iov[0].iov_len  = sizeof(rep.th);
730 	if (tsecr) {
731 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 				   (TCPOPT_TIMESTAMP << 8) |
733 				   TCPOLEN_TIMESTAMP);
734 		rep.opt[1] = htonl(tsval);
735 		rep.opt[2] = htonl(tsecr);
736 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 	}
738 
739 	/* Swap the send and the receive. */
740 	rep.th.dest    = th->source;
741 	rep.th.source  = th->dest;
742 	rep.th.doff    = arg.iov[0].iov_len / 4;
743 	rep.th.seq     = htonl(seq);
744 	rep.th.ack_seq = htonl(ack);
745 	rep.th.ack     = 1;
746 	rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 	if (key) {
750 		int offset = (tsecr) ? 3 : 0;
751 
752 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 					  (TCPOPT_NOP << 16) |
754 					  (TCPOPT_MD5SIG << 8) |
755 					  TCPOLEN_MD5SIG);
756 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 		rep.th.doff = arg.iov[0].iov_len/4;
758 
759 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 				    key, ip_hdr(skb)->saddr,
761 				    ip_hdr(skb)->daddr, &rep.th);
762 	}
763 #endif
764 	arg.flags = reply_flags;
765 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 				      ip_hdr(skb)->saddr, /* XXX */
767 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 	if (oif)
770 		arg.bound_dev_if = oif;
771 	arg.tos = tos;
772 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 			      &arg, arg.iov[0].iov_len);
776 
777 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 	struct inet_timewait_sock *tw = inet_twsk(sk);
783 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787 			tcp_time_stamp + tcptw->tw_ts_offset,
788 			tcptw->tw_ts_recent,
789 			tw->tw_bound_dev_if,
790 			tcp_twsk_md5_key(tcptw),
791 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792 			tw->tw_tos
793 			);
794 
795 	inet_twsk_put(tw);
796 }
797 
798 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
799 				  struct request_sock *req)
800 {
801 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803 	 */
804 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
807 			tcp_time_stamp,
808 			req->ts_recent,
809 			0,
810 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811 					  AF_INET),
812 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 			ip_hdr(skb)->tos);
814 }
815 
816 /*
817  *	Send a SYN-ACK after having received a SYN.
818  *	This still operates on a request_sock only, not on a big
819  *	socket.
820  */
821 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
822 			      struct flowi *fl,
823 			      struct request_sock *req,
824 			      u16 queue_mapping,
825 			      struct tcp_fastopen_cookie *foc)
826 {
827 	const struct inet_request_sock *ireq = inet_rsk(req);
828 	struct flowi4 fl4;
829 	int err = -1;
830 	struct sk_buff *skb;
831 
832 	/* First, grab a route. */
833 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834 		return -1;
835 
836 	skb = tcp_make_synack(sk, dst, req, foc);
837 
838 	if (skb) {
839 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841 		skb_set_queue_mapping(skb, queue_mapping);
842 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
843 					    ireq->ir_rmt_addr,
844 					    ireq->opt);
845 		err = net_xmit_eval(err);
846 	}
847 
848 	return err;
849 }
850 
851 /*
852  *	IPv4 request_sock destructor.
853  */
854 static void tcp_v4_reqsk_destructor(struct request_sock *req)
855 {
856 	kfree(inet_rsk(req)->opt);
857 }
858 
859 
860 #ifdef CONFIG_TCP_MD5SIG
861 /*
862  * RFC2385 MD5 checksumming requires a mapping of
863  * IP address->MD5 Key.
864  * We need to maintain these in the sk structure.
865  */
866 
867 /* Find the Key structure for an address.  */
868 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
869 					 const union tcp_md5_addr *addr,
870 					 int family)
871 {
872 	const struct tcp_sock *tp = tcp_sk(sk);
873 	struct tcp_md5sig_key *key;
874 	unsigned int size = sizeof(struct in_addr);
875 	const struct tcp_md5sig_info *md5sig;
876 
877 	/* caller either holds rcu_read_lock() or socket lock */
878 	md5sig = rcu_dereference_check(tp->md5sig_info,
879 				       sock_owned_by_user(sk) ||
880 				       lockdep_is_held(&sk->sk_lock.slock));
881 	if (!md5sig)
882 		return NULL;
883 #if IS_ENABLED(CONFIG_IPV6)
884 	if (family == AF_INET6)
885 		size = sizeof(struct in6_addr);
886 #endif
887 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
888 		if (key->family != family)
889 			continue;
890 		if (!memcmp(&key->addr, addr, size))
891 			return key;
892 	}
893 	return NULL;
894 }
895 EXPORT_SYMBOL(tcp_md5_do_lookup);
896 
897 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
898 					 const struct sock *addr_sk)
899 {
900 	const union tcp_md5_addr *addr;
901 
902 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
903 	return tcp_md5_do_lookup(sk, addr, AF_INET);
904 }
905 EXPORT_SYMBOL(tcp_v4_md5_lookup);
906 
907 /* This can be called on a newly created socket, from other files */
908 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
909 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
910 {
911 	/* Add Key to the list */
912 	struct tcp_md5sig_key *key;
913 	struct tcp_sock *tp = tcp_sk(sk);
914 	struct tcp_md5sig_info *md5sig;
915 
916 	key = tcp_md5_do_lookup(sk, addr, family);
917 	if (key) {
918 		/* Pre-existing entry - just update that one. */
919 		memcpy(key->key, newkey, newkeylen);
920 		key->keylen = newkeylen;
921 		return 0;
922 	}
923 
924 	md5sig = rcu_dereference_protected(tp->md5sig_info,
925 					   sock_owned_by_user(sk));
926 	if (!md5sig) {
927 		md5sig = kmalloc(sizeof(*md5sig), gfp);
928 		if (!md5sig)
929 			return -ENOMEM;
930 
931 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
932 		INIT_HLIST_HEAD(&md5sig->head);
933 		rcu_assign_pointer(tp->md5sig_info, md5sig);
934 	}
935 
936 	key = sock_kmalloc(sk, sizeof(*key), gfp);
937 	if (!key)
938 		return -ENOMEM;
939 	if (!tcp_alloc_md5sig_pool()) {
940 		sock_kfree_s(sk, key, sizeof(*key));
941 		return -ENOMEM;
942 	}
943 
944 	memcpy(key->key, newkey, newkeylen);
945 	key->keylen = newkeylen;
946 	key->family = family;
947 	memcpy(&key->addr, addr,
948 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
949 				      sizeof(struct in_addr));
950 	hlist_add_head_rcu(&key->node, &md5sig->head);
951 	return 0;
952 }
953 EXPORT_SYMBOL(tcp_md5_do_add);
954 
955 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
956 {
957 	struct tcp_md5sig_key *key;
958 
959 	key = tcp_md5_do_lookup(sk, addr, family);
960 	if (!key)
961 		return -ENOENT;
962 	hlist_del_rcu(&key->node);
963 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
964 	kfree_rcu(key, rcu);
965 	return 0;
966 }
967 EXPORT_SYMBOL(tcp_md5_do_del);
968 
969 static void tcp_clear_md5_list(struct sock *sk)
970 {
971 	struct tcp_sock *tp = tcp_sk(sk);
972 	struct tcp_md5sig_key *key;
973 	struct hlist_node *n;
974 	struct tcp_md5sig_info *md5sig;
975 
976 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977 
978 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
979 		hlist_del_rcu(&key->node);
980 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
981 		kfree_rcu(key, rcu);
982 	}
983 }
984 
985 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
986 				 int optlen)
987 {
988 	struct tcp_md5sig cmd;
989 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
990 
991 	if (optlen < sizeof(cmd))
992 		return -EINVAL;
993 
994 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
995 		return -EFAULT;
996 
997 	if (sin->sin_family != AF_INET)
998 		return -EINVAL;
999 
1000 	if (!cmd.tcpm_keylen)
1001 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1002 				      AF_INET);
1003 
1004 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1005 		return -EINVAL;
1006 
1007 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1009 			      GFP_KERNEL);
1010 }
1011 
1012 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1013 					__be32 daddr, __be32 saddr, int nbytes)
1014 {
1015 	struct tcp4_pseudohdr *bp;
1016 	struct scatterlist sg;
1017 
1018 	bp = &hp->md5_blk.ip4;
1019 
1020 	/*
1021 	 * 1. the TCP pseudo-header (in the order: source IP address,
1022 	 * destination IP address, zero-padded protocol number, and
1023 	 * segment length)
1024 	 */
1025 	bp->saddr = saddr;
1026 	bp->daddr = daddr;
1027 	bp->pad = 0;
1028 	bp->protocol = IPPROTO_TCP;
1029 	bp->len = cpu_to_be16(nbytes);
1030 
1031 	sg_init_one(&sg, bp, sizeof(*bp));
1032 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1033 }
1034 
1035 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1036 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1037 {
1038 	struct tcp_md5sig_pool *hp;
1039 	struct hash_desc *desc;
1040 
1041 	hp = tcp_get_md5sig_pool();
1042 	if (!hp)
1043 		goto clear_hash_noput;
1044 	desc = &hp->md5_desc;
1045 
1046 	if (crypto_hash_init(desc))
1047 		goto clear_hash;
1048 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049 		goto clear_hash;
1050 	if (tcp_md5_hash_header(hp, th))
1051 		goto clear_hash;
1052 	if (tcp_md5_hash_key(hp, key))
1053 		goto clear_hash;
1054 	if (crypto_hash_final(desc, md5_hash))
1055 		goto clear_hash;
1056 
1057 	tcp_put_md5sig_pool();
1058 	return 0;
1059 
1060 clear_hash:
1061 	tcp_put_md5sig_pool();
1062 clear_hash_noput:
1063 	memset(md5_hash, 0, 16);
1064 	return 1;
1065 }
1066 
1067 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1068 			const struct sock *sk,
1069 			const struct sk_buff *skb)
1070 {
1071 	struct tcp_md5sig_pool *hp;
1072 	struct hash_desc *desc;
1073 	const struct tcphdr *th = tcp_hdr(skb);
1074 	__be32 saddr, daddr;
1075 
1076 	if (sk) { /* valid for establish/request sockets */
1077 		saddr = sk->sk_rcv_saddr;
1078 		daddr = sk->sk_daddr;
1079 	} else {
1080 		const struct iphdr *iph = ip_hdr(skb);
1081 		saddr = iph->saddr;
1082 		daddr = iph->daddr;
1083 	}
1084 
1085 	hp = tcp_get_md5sig_pool();
1086 	if (!hp)
1087 		goto clear_hash_noput;
1088 	desc = &hp->md5_desc;
1089 
1090 	if (crypto_hash_init(desc))
1091 		goto clear_hash;
1092 
1093 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094 		goto clear_hash;
1095 	if (tcp_md5_hash_header(hp, th))
1096 		goto clear_hash;
1097 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098 		goto clear_hash;
1099 	if (tcp_md5_hash_key(hp, key))
1100 		goto clear_hash;
1101 	if (crypto_hash_final(desc, md5_hash))
1102 		goto clear_hash;
1103 
1104 	tcp_put_md5sig_pool();
1105 	return 0;
1106 
1107 clear_hash:
1108 	tcp_put_md5sig_pool();
1109 clear_hash_noput:
1110 	memset(md5_hash, 0, 16);
1111 	return 1;
1112 }
1113 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1114 
1115 /* Called with rcu_read_lock() */
1116 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1117 				    const struct sk_buff *skb)
1118 {
1119 	/*
1120 	 * This gets called for each TCP segment that arrives
1121 	 * so we want to be efficient.
1122 	 * We have 3 drop cases:
1123 	 * o No MD5 hash and one expected.
1124 	 * o MD5 hash and we're not expecting one.
1125 	 * o MD5 hash and its wrong.
1126 	 */
1127 	const __u8 *hash_location = NULL;
1128 	struct tcp_md5sig_key *hash_expected;
1129 	const struct iphdr *iph = ip_hdr(skb);
1130 	const struct tcphdr *th = tcp_hdr(skb);
1131 	int genhash;
1132 	unsigned char newhash[16];
1133 
1134 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1135 					  AF_INET);
1136 	hash_location = tcp_parse_md5sig_option(th);
1137 
1138 	/* We've parsed the options - do we have a hash? */
1139 	if (!hash_expected && !hash_location)
1140 		return false;
1141 
1142 	if (hash_expected && !hash_location) {
1143 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1144 		return true;
1145 	}
1146 
1147 	if (!hash_expected && hash_location) {
1148 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1149 		return true;
1150 	}
1151 
1152 	/* Okay, so this is hash_expected and hash_location -
1153 	 * so we need to calculate the checksum.
1154 	 */
1155 	genhash = tcp_v4_md5_hash_skb(newhash,
1156 				      hash_expected,
1157 				      NULL, skb);
1158 
1159 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1160 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1161 				     &iph->saddr, ntohs(th->source),
1162 				     &iph->daddr, ntohs(th->dest),
1163 				     genhash ? " tcp_v4_calc_md5_hash failed"
1164 				     : "");
1165 		return true;
1166 	}
1167 	return false;
1168 }
1169 #endif
1170 
1171 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1172 			    struct sk_buff *skb)
1173 {
1174 	struct inet_request_sock *ireq = inet_rsk(req);
1175 
1176 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1177 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1178 	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1179 	ireq->opt = tcp_v4_save_options(skb);
1180 }
1181 
1182 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1183 					  const struct request_sock *req,
1184 					  bool *strict)
1185 {
1186 	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1187 
1188 	if (strict) {
1189 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1190 			*strict = true;
1191 		else
1192 			*strict = false;
1193 	}
1194 
1195 	return dst;
1196 }
1197 
1198 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1199 	.family		=	PF_INET,
1200 	.obj_size	=	sizeof(struct tcp_request_sock),
1201 	.rtx_syn_ack	=	tcp_rtx_synack,
1202 	.send_ack	=	tcp_v4_reqsk_send_ack,
1203 	.destructor	=	tcp_v4_reqsk_destructor,
1204 	.send_reset	=	tcp_v4_send_reset,
1205 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1206 };
1207 
1208 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1209 	.mss_clamp	=	TCP_MSS_DEFAULT,
1210 #ifdef CONFIG_TCP_MD5SIG
1211 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1212 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1213 #endif
1214 	.init_req	=	tcp_v4_init_req,
1215 #ifdef CONFIG_SYN_COOKIES
1216 	.cookie_init_seq =	cookie_v4_init_sequence,
1217 #endif
1218 	.route_req	=	tcp_v4_route_req,
1219 	.init_seq	=	tcp_v4_init_sequence,
1220 	.send_synack	=	tcp_v4_send_synack,
1221 	.queue_hash_add =	inet_csk_reqsk_queue_hash_add,
1222 };
1223 
1224 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1225 {
1226 	/* Never answer to SYNs send to broadcast or multicast */
1227 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1228 		goto drop;
1229 
1230 	return tcp_conn_request(&tcp_request_sock_ops,
1231 				&tcp_request_sock_ipv4_ops, sk, skb);
1232 
1233 drop:
1234 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1235 	return 0;
1236 }
1237 EXPORT_SYMBOL(tcp_v4_conn_request);
1238 
1239 
1240 /*
1241  * The three way handshake has completed - we got a valid synack -
1242  * now create the new socket.
1243  */
1244 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1245 				  struct request_sock *req,
1246 				  struct dst_entry *dst)
1247 {
1248 	struct inet_request_sock *ireq;
1249 	struct inet_sock *newinet;
1250 	struct tcp_sock *newtp;
1251 	struct sock *newsk;
1252 #ifdef CONFIG_TCP_MD5SIG
1253 	struct tcp_md5sig_key *key;
1254 #endif
1255 	struct ip_options_rcu *inet_opt;
1256 
1257 	if (sk_acceptq_is_full(sk))
1258 		goto exit_overflow;
1259 
1260 	newsk = tcp_create_openreq_child(sk, req, skb);
1261 	if (!newsk)
1262 		goto exit_nonewsk;
1263 
1264 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1265 	inet_sk_rx_dst_set(newsk, skb);
1266 
1267 	newtp		      = tcp_sk(newsk);
1268 	newinet		      = inet_sk(newsk);
1269 	ireq		      = inet_rsk(req);
1270 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1271 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1272 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1273 	inet_opt	      = ireq->opt;
1274 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1275 	ireq->opt	      = NULL;
1276 	newinet->mc_index     = inet_iif(skb);
1277 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1278 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1279 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1280 	inet_set_txhash(newsk);
1281 	if (inet_opt)
1282 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1283 	newinet->inet_id = newtp->write_seq ^ jiffies;
1284 
1285 	if (!dst) {
1286 		dst = inet_csk_route_child_sock(sk, newsk, req);
1287 		if (!dst)
1288 			goto put_and_exit;
1289 	} else {
1290 		/* syncookie case : see end of cookie_v4_check() */
1291 	}
1292 	sk_setup_caps(newsk, dst);
1293 
1294 	tcp_ca_openreq_child(newsk, dst);
1295 
1296 	tcp_sync_mss(newsk, dst_mtu(dst));
1297 	newtp->advmss = dst_metric_advmss(dst);
1298 	if (tcp_sk(sk)->rx_opt.user_mss &&
1299 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1300 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1301 
1302 	tcp_initialize_rcv_mss(newsk);
1303 
1304 #ifdef CONFIG_TCP_MD5SIG
1305 	/* Copy over the MD5 key from the original socket */
1306 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1307 				AF_INET);
1308 	if (key) {
1309 		/*
1310 		 * We're using one, so create a matching key
1311 		 * on the newsk structure. If we fail to get
1312 		 * memory, then we end up not copying the key
1313 		 * across. Shucks.
1314 		 */
1315 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1316 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1317 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1318 	}
1319 #endif
1320 
1321 	if (__inet_inherit_port(sk, newsk) < 0)
1322 		goto put_and_exit;
1323 	__inet_hash_nolisten(newsk, NULL);
1324 
1325 	return newsk;
1326 
1327 exit_overflow:
1328 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1329 exit_nonewsk:
1330 	dst_release(dst);
1331 exit:
1332 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1333 	return NULL;
1334 put_and_exit:
1335 	inet_csk_prepare_forced_close(newsk);
1336 	tcp_done(newsk);
1337 	goto exit;
1338 }
1339 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1340 
1341 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1342 {
1343 	const struct tcphdr *th = tcp_hdr(skb);
1344 	const struct iphdr *iph = ip_hdr(skb);
1345 	struct request_sock *req;
1346 	struct sock *nsk;
1347 
1348 	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1349 	if (req) {
1350 		nsk = tcp_check_req(sk, skb, req, false);
1351 		if (!nsk)
1352 			reqsk_put(req);
1353 		return nsk;
1354 	}
1355 
1356 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1357 			th->source, iph->daddr, th->dest, inet_iif(skb));
1358 
1359 	if (nsk) {
1360 		if (nsk->sk_state != TCP_TIME_WAIT) {
1361 			bh_lock_sock(nsk);
1362 			return nsk;
1363 		}
1364 		inet_twsk_put(inet_twsk(nsk));
1365 		return NULL;
1366 	}
1367 
1368 #ifdef CONFIG_SYN_COOKIES
1369 	if (!th->syn)
1370 		sk = cookie_v4_check(sk, skb);
1371 #endif
1372 	return sk;
1373 }
1374 
1375 /* The socket must have it's spinlock held when we get
1376  * here.
1377  *
1378  * We have a potential double-lock case here, so even when
1379  * doing backlog processing we use the BH locking scheme.
1380  * This is because we cannot sleep with the original spinlock
1381  * held.
1382  */
1383 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1384 {
1385 	struct sock *rsk;
1386 
1387 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1388 		struct dst_entry *dst = sk->sk_rx_dst;
1389 
1390 		sock_rps_save_rxhash(sk, skb);
1391 		sk_mark_napi_id(sk, skb);
1392 		if (dst) {
1393 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1394 			    !dst->ops->check(dst, 0)) {
1395 				dst_release(dst);
1396 				sk->sk_rx_dst = NULL;
1397 			}
1398 		}
1399 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1400 		return 0;
1401 	}
1402 
1403 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1404 		goto csum_err;
1405 
1406 	if (sk->sk_state == TCP_LISTEN) {
1407 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1408 		if (!nsk)
1409 			goto discard;
1410 
1411 		if (nsk != sk) {
1412 			sock_rps_save_rxhash(nsk, skb);
1413 			sk_mark_napi_id(sk, skb);
1414 			if (tcp_child_process(sk, nsk, skb)) {
1415 				rsk = nsk;
1416 				goto reset;
1417 			}
1418 			return 0;
1419 		}
1420 	} else
1421 		sock_rps_save_rxhash(sk, skb);
1422 
1423 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1424 		rsk = sk;
1425 		goto reset;
1426 	}
1427 	return 0;
1428 
1429 reset:
1430 	tcp_v4_send_reset(rsk, skb);
1431 discard:
1432 	kfree_skb(skb);
1433 	/* Be careful here. If this function gets more complicated and
1434 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1435 	 * might be destroyed here. This current version compiles correctly,
1436 	 * but you have been warned.
1437 	 */
1438 	return 0;
1439 
1440 csum_err:
1441 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1442 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1443 	goto discard;
1444 }
1445 EXPORT_SYMBOL(tcp_v4_do_rcv);
1446 
1447 void tcp_v4_early_demux(struct sk_buff *skb)
1448 {
1449 	const struct iphdr *iph;
1450 	const struct tcphdr *th;
1451 	struct sock *sk;
1452 
1453 	if (skb->pkt_type != PACKET_HOST)
1454 		return;
1455 
1456 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1457 		return;
1458 
1459 	iph = ip_hdr(skb);
1460 	th = tcp_hdr(skb);
1461 
1462 	if (th->doff < sizeof(struct tcphdr) / 4)
1463 		return;
1464 
1465 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1466 				       iph->saddr, th->source,
1467 				       iph->daddr, ntohs(th->dest),
1468 				       skb->skb_iif);
1469 	if (sk) {
1470 		skb->sk = sk;
1471 		skb->destructor = sock_edemux;
1472 		if (sk_fullsock(sk)) {
1473 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1474 
1475 			if (dst)
1476 				dst = dst_check(dst, 0);
1477 			if (dst &&
1478 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1479 				skb_dst_set_noref(skb, dst);
1480 		}
1481 	}
1482 }
1483 
1484 /* Packet is added to VJ-style prequeue for processing in process
1485  * context, if a reader task is waiting. Apparently, this exciting
1486  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1487  * failed somewhere. Latency? Burstiness? Well, at least now we will
1488  * see, why it failed. 8)8)				  --ANK
1489  *
1490  */
1491 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1492 {
1493 	struct tcp_sock *tp = tcp_sk(sk);
1494 
1495 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1496 		return false;
1497 
1498 	if (skb->len <= tcp_hdrlen(skb) &&
1499 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1500 		return false;
1501 
1502 	/* Before escaping RCU protected region, we need to take care of skb
1503 	 * dst. Prequeue is only enabled for established sockets.
1504 	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1505 	 * Instead of doing full sk_rx_dst validity here, let's perform
1506 	 * an optimistic check.
1507 	 */
1508 	if (likely(sk->sk_rx_dst))
1509 		skb_dst_drop(skb);
1510 	else
1511 		skb_dst_force(skb);
1512 
1513 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1514 	tp->ucopy.memory += skb->truesize;
1515 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1516 		struct sk_buff *skb1;
1517 
1518 		BUG_ON(sock_owned_by_user(sk));
1519 
1520 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1521 			sk_backlog_rcv(sk, skb1);
1522 			NET_INC_STATS_BH(sock_net(sk),
1523 					 LINUX_MIB_TCPPREQUEUEDROPPED);
1524 		}
1525 
1526 		tp->ucopy.memory = 0;
1527 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1528 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1529 					   POLLIN | POLLRDNORM | POLLRDBAND);
1530 		if (!inet_csk_ack_scheduled(sk))
1531 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1532 						  (3 * tcp_rto_min(sk)) / 4,
1533 						  TCP_RTO_MAX);
1534 	}
1535 	return true;
1536 }
1537 EXPORT_SYMBOL(tcp_prequeue);
1538 
1539 /*
1540  *	From tcp_input.c
1541  */
1542 
1543 int tcp_v4_rcv(struct sk_buff *skb)
1544 {
1545 	const struct iphdr *iph;
1546 	const struct tcphdr *th;
1547 	struct sock *sk;
1548 	int ret;
1549 	struct net *net = dev_net(skb->dev);
1550 
1551 	if (skb->pkt_type != PACKET_HOST)
1552 		goto discard_it;
1553 
1554 	/* Count it even if it's bad */
1555 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1556 
1557 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1558 		goto discard_it;
1559 
1560 	th = tcp_hdr(skb);
1561 
1562 	if (th->doff < sizeof(struct tcphdr) / 4)
1563 		goto bad_packet;
1564 	if (!pskb_may_pull(skb, th->doff * 4))
1565 		goto discard_it;
1566 
1567 	/* An explanation is required here, I think.
1568 	 * Packet length and doff are validated by header prediction,
1569 	 * provided case of th->doff==0 is eliminated.
1570 	 * So, we defer the checks. */
1571 
1572 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1573 		goto csum_error;
1574 
1575 	th = tcp_hdr(skb);
1576 	iph = ip_hdr(skb);
1577 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1578 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1579 	 */
1580 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1581 		sizeof(struct inet_skb_parm));
1582 	barrier();
1583 
1584 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1585 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1586 				    skb->len - th->doff * 4);
1587 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1588 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1589 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1590 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1591 	TCP_SKB_CB(skb)->sacked	 = 0;
1592 
1593 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1594 	if (!sk)
1595 		goto no_tcp_socket;
1596 
1597 process:
1598 	if (sk->sk_state == TCP_TIME_WAIT)
1599 		goto do_time_wait;
1600 
1601 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1602 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1603 		goto discard_and_relse;
1604 	}
1605 
1606 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1607 		goto discard_and_relse;
1608 
1609 #ifdef CONFIG_TCP_MD5SIG
1610 	/*
1611 	 * We really want to reject the packet as early as possible
1612 	 * if:
1613 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1614 	 *  o There is an MD5 option and we're not expecting one
1615 	 */
1616 	if (tcp_v4_inbound_md5_hash(sk, skb))
1617 		goto discard_and_relse;
1618 #endif
1619 
1620 	nf_reset(skb);
1621 
1622 	if (sk_filter(sk, skb))
1623 		goto discard_and_relse;
1624 
1625 	sk_incoming_cpu_update(sk);
1626 	skb->dev = NULL;
1627 
1628 	bh_lock_sock_nested(sk);
1629 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1630 	ret = 0;
1631 	if (!sock_owned_by_user(sk)) {
1632 		if (!tcp_prequeue(sk, skb))
1633 			ret = tcp_v4_do_rcv(sk, skb);
1634 	} else if (unlikely(sk_add_backlog(sk, skb,
1635 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1636 		bh_unlock_sock(sk);
1637 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1638 		goto discard_and_relse;
1639 	}
1640 	bh_unlock_sock(sk);
1641 
1642 	sock_put(sk);
1643 
1644 	return ret;
1645 
1646 no_tcp_socket:
1647 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1648 		goto discard_it;
1649 
1650 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1651 csum_error:
1652 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1653 bad_packet:
1654 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1655 	} else {
1656 		tcp_v4_send_reset(NULL, skb);
1657 	}
1658 
1659 discard_it:
1660 	/* Discard frame. */
1661 	kfree_skb(skb);
1662 	return 0;
1663 
1664 discard_and_relse:
1665 	sock_put(sk);
1666 	goto discard_it;
1667 
1668 do_time_wait:
1669 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1670 		inet_twsk_put(inet_twsk(sk));
1671 		goto discard_it;
1672 	}
1673 
1674 	if (skb->len < (th->doff << 2)) {
1675 		inet_twsk_put(inet_twsk(sk));
1676 		goto bad_packet;
1677 	}
1678 	if (tcp_checksum_complete(skb)) {
1679 		inet_twsk_put(inet_twsk(sk));
1680 		goto csum_error;
1681 	}
1682 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1683 	case TCP_TW_SYN: {
1684 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1685 							&tcp_hashinfo,
1686 							iph->saddr, th->source,
1687 							iph->daddr, th->dest,
1688 							inet_iif(skb));
1689 		if (sk2) {
1690 			inet_twsk_deschedule(inet_twsk(sk));
1691 			inet_twsk_put(inet_twsk(sk));
1692 			sk = sk2;
1693 			goto process;
1694 		}
1695 		/* Fall through to ACK */
1696 	}
1697 	case TCP_TW_ACK:
1698 		tcp_v4_timewait_ack(sk, skb);
1699 		break;
1700 	case TCP_TW_RST:
1701 		goto no_tcp_socket;
1702 	case TCP_TW_SUCCESS:;
1703 	}
1704 	goto discard_it;
1705 }
1706 
1707 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1708 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1709 	.twsk_unique	= tcp_twsk_unique,
1710 	.twsk_destructor= tcp_twsk_destructor,
1711 };
1712 
1713 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1714 {
1715 	struct dst_entry *dst = skb_dst(skb);
1716 
1717 	if (dst) {
1718 		dst_hold(dst);
1719 		sk->sk_rx_dst = dst;
1720 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1721 	}
1722 }
1723 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1724 
1725 const struct inet_connection_sock_af_ops ipv4_specific = {
1726 	.queue_xmit	   = ip_queue_xmit,
1727 	.send_check	   = tcp_v4_send_check,
1728 	.rebuild_header	   = inet_sk_rebuild_header,
1729 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1730 	.conn_request	   = tcp_v4_conn_request,
1731 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1732 	.net_header_len	   = sizeof(struct iphdr),
1733 	.setsockopt	   = ip_setsockopt,
1734 	.getsockopt	   = ip_getsockopt,
1735 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1736 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1737 	.bind_conflict	   = inet_csk_bind_conflict,
1738 #ifdef CONFIG_COMPAT
1739 	.compat_setsockopt = compat_ip_setsockopt,
1740 	.compat_getsockopt = compat_ip_getsockopt,
1741 #endif
1742 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1743 };
1744 EXPORT_SYMBOL(ipv4_specific);
1745 
1746 #ifdef CONFIG_TCP_MD5SIG
1747 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1748 	.md5_lookup		= tcp_v4_md5_lookup,
1749 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1750 	.md5_parse		= tcp_v4_parse_md5_keys,
1751 };
1752 #endif
1753 
1754 /* NOTE: A lot of things set to zero explicitly by call to
1755  *       sk_alloc() so need not be done here.
1756  */
1757 static int tcp_v4_init_sock(struct sock *sk)
1758 {
1759 	struct inet_connection_sock *icsk = inet_csk(sk);
1760 
1761 	tcp_init_sock(sk);
1762 
1763 	icsk->icsk_af_ops = &ipv4_specific;
1764 
1765 #ifdef CONFIG_TCP_MD5SIG
1766 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1767 #endif
1768 
1769 	return 0;
1770 }
1771 
1772 void tcp_v4_destroy_sock(struct sock *sk)
1773 {
1774 	struct tcp_sock *tp = tcp_sk(sk);
1775 
1776 	tcp_clear_xmit_timers(sk);
1777 
1778 	tcp_cleanup_congestion_control(sk);
1779 
1780 	/* Cleanup up the write buffer. */
1781 	tcp_write_queue_purge(sk);
1782 
1783 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1784 	__skb_queue_purge(&tp->out_of_order_queue);
1785 
1786 #ifdef CONFIG_TCP_MD5SIG
1787 	/* Clean up the MD5 key list, if any */
1788 	if (tp->md5sig_info) {
1789 		tcp_clear_md5_list(sk);
1790 		kfree_rcu(tp->md5sig_info, rcu);
1791 		tp->md5sig_info = NULL;
1792 	}
1793 #endif
1794 
1795 	/* Clean prequeue, it must be empty really */
1796 	__skb_queue_purge(&tp->ucopy.prequeue);
1797 
1798 	/* Clean up a referenced TCP bind bucket. */
1799 	if (inet_csk(sk)->icsk_bind_hash)
1800 		inet_put_port(sk);
1801 
1802 	BUG_ON(tp->fastopen_rsk);
1803 
1804 	/* If socket is aborted during connect operation */
1805 	tcp_free_fastopen_req(tp);
1806 	tcp_saved_syn_free(tp);
1807 
1808 	sk_sockets_allocated_dec(sk);
1809 	sock_release_memcg(sk);
1810 }
1811 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1812 
1813 #ifdef CONFIG_PROC_FS
1814 /* Proc filesystem TCP sock list dumping. */
1815 
1816 /*
1817  * Get next listener socket follow cur.  If cur is NULL, get first socket
1818  * starting from bucket given in st->bucket; when st->bucket is zero the
1819  * very first socket in the hash table is returned.
1820  */
1821 static void *listening_get_next(struct seq_file *seq, void *cur)
1822 {
1823 	struct inet_connection_sock *icsk;
1824 	struct hlist_nulls_node *node;
1825 	struct sock *sk = cur;
1826 	struct inet_listen_hashbucket *ilb;
1827 	struct tcp_iter_state *st = seq->private;
1828 	struct net *net = seq_file_net(seq);
1829 
1830 	if (!sk) {
1831 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1832 		spin_lock_bh(&ilb->lock);
1833 		sk = sk_nulls_head(&ilb->head);
1834 		st->offset = 0;
1835 		goto get_sk;
1836 	}
1837 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1838 	++st->num;
1839 	++st->offset;
1840 
1841 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
1842 		struct request_sock *req = cur;
1843 
1844 		icsk = inet_csk(st->syn_wait_sk);
1845 		req = req->dl_next;
1846 		while (1) {
1847 			while (req) {
1848 				if (req->rsk_ops->family == st->family) {
1849 					cur = req;
1850 					goto out;
1851 				}
1852 				req = req->dl_next;
1853 			}
1854 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1855 				break;
1856 get_req:
1857 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1858 		}
1859 		sk	  = sk_nulls_next(st->syn_wait_sk);
1860 		st->state = TCP_SEQ_STATE_LISTENING;
1861 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1862 	} else {
1863 		icsk = inet_csk(sk);
1864 		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1865 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
1866 			goto start_req;
1867 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1868 		sk = sk_nulls_next(sk);
1869 	}
1870 get_sk:
1871 	sk_nulls_for_each_from(sk, node) {
1872 		if (!net_eq(sock_net(sk), net))
1873 			continue;
1874 		if (sk->sk_family == st->family) {
1875 			cur = sk;
1876 			goto out;
1877 		}
1878 		icsk = inet_csk(sk);
1879 		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1880 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1881 start_req:
1882 			st->uid		= sock_i_uid(sk);
1883 			st->syn_wait_sk = sk;
1884 			st->state	= TCP_SEQ_STATE_OPENREQ;
1885 			st->sbucket	= 0;
1886 			goto get_req;
1887 		}
1888 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1889 	}
1890 	spin_unlock_bh(&ilb->lock);
1891 	st->offset = 0;
1892 	if (++st->bucket < INET_LHTABLE_SIZE) {
1893 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1894 		spin_lock_bh(&ilb->lock);
1895 		sk = sk_nulls_head(&ilb->head);
1896 		goto get_sk;
1897 	}
1898 	cur = NULL;
1899 out:
1900 	return cur;
1901 }
1902 
1903 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1904 {
1905 	struct tcp_iter_state *st = seq->private;
1906 	void *rc;
1907 
1908 	st->bucket = 0;
1909 	st->offset = 0;
1910 	rc = listening_get_next(seq, NULL);
1911 
1912 	while (rc && *pos) {
1913 		rc = listening_get_next(seq, rc);
1914 		--*pos;
1915 	}
1916 	return rc;
1917 }
1918 
1919 static inline bool empty_bucket(const struct tcp_iter_state *st)
1920 {
1921 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1922 }
1923 
1924 /*
1925  * Get first established socket starting from bucket given in st->bucket.
1926  * If st->bucket is zero, the very first socket in the hash is returned.
1927  */
1928 static void *established_get_first(struct seq_file *seq)
1929 {
1930 	struct tcp_iter_state *st = seq->private;
1931 	struct net *net = seq_file_net(seq);
1932 	void *rc = NULL;
1933 
1934 	st->offset = 0;
1935 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1936 		struct sock *sk;
1937 		struct hlist_nulls_node *node;
1938 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1939 
1940 		/* Lockless fast path for the common case of empty buckets */
1941 		if (empty_bucket(st))
1942 			continue;
1943 
1944 		spin_lock_bh(lock);
1945 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1946 			if (sk->sk_family != st->family ||
1947 			    !net_eq(sock_net(sk), net)) {
1948 				continue;
1949 			}
1950 			rc = sk;
1951 			goto out;
1952 		}
1953 		spin_unlock_bh(lock);
1954 	}
1955 out:
1956 	return rc;
1957 }
1958 
1959 static void *established_get_next(struct seq_file *seq, void *cur)
1960 {
1961 	struct sock *sk = cur;
1962 	struct hlist_nulls_node *node;
1963 	struct tcp_iter_state *st = seq->private;
1964 	struct net *net = seq_file_net(seq);
1965 
1966 	++st->num;
1967 	++st->offset;
1968 
1969 	sk = sk_nulls_next(sk);
1970 
1971 	sk_nulls_for_each_from(sk, node) {
1972 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1973 			return sk;
1974 	}
1975 
1976 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1977 	++st->bucket;
1978 	return established_get_first(seq);
1979 }
1980 
1981 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1982 {
1983 	struct tcp_iter_state *st = seq->private;
1984 	void *rc;
1985 
1986 	st->bucket = 0;
1987 	rc = established_get_first(seq);
1988 
1989 	while (rc && pos) {
1990 		rc = established_get_next(seq, rc);
1991 		--pos;
1992 	}
1993 	return rc;
1994 }
1995 
1996 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1997 {
1998 	void *rc;
1999 	struct tcp_iter_state *st = seq->private;
2000 
2001 	st->state = TCP_SEQ_STATE_LISTENING;
2002 	rc	  = listening_get_idx(seq, &pos);
2003 
2004 	if (!rc) {
2005 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2006 		rc	  = established_get_idx(seq, pos);
2007 	}
2008 
2009 	return rc;
2010 }
2011 
2012 static void *tcp_seek_last_pos(struct seq_file *seq)
2013 {
2014 	struct tcp_iter_state *st = seq->private;
2015 	int offset = st->offset;
2016 	int orig_num = st->num;
2017 	void *rc = NULL;
2018 
2019 	switch (st->state) {
2020 	case TCP_SEQ_STATE_OPENREQ:
2021 	case TCP_SEQ_STATE_LISTENING:
2022 		if (st->bucket >= INET_LHTABLE_SIZE)
2023 			break;
2024 		st->state = TCP_SEQ_STATE_LISTENING;
2025 		rc = listening_get_next(seq, NULL);
2026 		while (offset-- && rc)
2027 			rc = listening_get_next(seq, rc);
2028 		if (rc)
2029 			break;
2030 		st->bucket = 0;
2031 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2032 		/* Fallthrough */
2033 	case TCP_SEQ_STATE_ESTABLISHED:
2034 		if (st->bucket > tcp_hashinfo.ehash_mask)
2035 			break;
2036 		rc = established_get_first(seq);
2037 		while (offset-- && rc)
2038 			rc = established_get_next(seq, rc);
2039 	}
2040 
2041 	st->num = orig_num;
2042 
2043 	return rc;
2044 }
2045 
2046 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2047 {
2048 	struct tcp_iter_state *st = seq->private;
2049 	void *rc;
2050 
2051 	if (*pos && *pos == st->last_pos) {
2052 		rc = tcp_seek_last_pos(seq);
2053 		if (rc)
2054 			goto out;
2055 	}
2056 
2057 	st->state = TCP_SEQ_STATE_LISTENING;
2058 	st->num = 0;
2059 	st->bucket = 0;
2060 	st->offset = 0;
2061 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2062 
2063 out:
2064 	st->last_pos = *pos;
2065 	return rc;
2066 }
2067 
2068 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2069 {
2070 	struct tcp_iter_state *st = seq->private;
2071 	void *rc = NULL;
2072 
2073 	if (v == SEQ_START_TOKEN) {
2074 		rc = tcp_get_idx(seq, 0);
2075 		goto out;
2076 	}
2077 
2078 	switch (st->state) {
2079 	case TCP_SEQ_STATE_OPENREQ:
2080 	case TCP_SEQ_STATE_LISTENING:
2081 		rc = listening_get_next(seq, v);
2082 		if (!rc) {
2083 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2084 			st->bucket = 0;
2085 			st->offset = 0;
2086 			rc	  = established_get_first(seq);
2087 		}
2088 		break;
2089 	case TCP_SEQ_STATE_ESTABLISHED:
2090 		rc = established_get_next(seq, v);
2091 		break;
2092 	}
2093 out:
2094 	++*pos;
2095 	st->last_pos = *pos;
2096 	return rc;
2097 }
2098 
2099 static void tcp_seq_stop(struct seq_file *seq, void *v)
2100 {
2101 	struct tcp_iter_state *st = seq->private;
2102 
2103 	switch (st->state) {
2104 	case TCP_SEQ_STATE_OPENREQ:
2105 		if (v) {
2106 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2107 			spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2108 		}
2109 	case TCP_SEQ_STATE_LISTENING:
2110 		if (v != SEQ_START_TOKEN)
2111 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2112 		break;
2113 	case TCP_SEQ_STATE_ESTABLISHED:
2114 		if (v)
2115 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2116 		break;
2117 	}
2118 }
2119 
2120 int tcp_seq_open(struct inode *inode, struct file *file)
2121 {
2122 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2123 	struct tcp_iter_state *s;
2124 	int err;
2125 
2126 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2127 			  sizeof(struct tcp_iter_state));
2128 	if (err < 0)
2129 		return err;
2130 
2131 	s = ((struct seq_file *)file->private_data)->private;
2132 	s->family		= afinfo->family;
2133 	s->last_pos		= 0;
2134 	return 0;
2135 }
2136 EXPORT_SYMBOL(tcp_seq_open);
2137 
2138 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2139 {
2140 	int rc = 0;
2141 	struct proc_dir_entry *p;
2142 
2143 	afinfo->seq_ops.start		= tcp_seq_start;
2144 	afinfo->seq_ops.next		= tcp_seq_next;
2145 	afinfo->seq_ops.stop		= tcp_seq_stop;
2146 
2147 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2148 			     afinfo->seq_fops, afinfo);
2149 	if (!p)
2150 		rc = -ENOMEM;
2151 	return rc;
2152 }
2153 EXPORT_SYMBOL(tcp_proc_register);
2154 
2155 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2156 {
2157 	remove_proc_entry(afinfo->name, net->proc_net);
2158 }
2159 EXPORT_SYMBOL(tcp_proc_unregister);
2160 
2161 static void get_openreq4(const struct request_sock *req,
2162 			 struct seq_file *f, int i, kuid_t uid)
2163 {
2164 	const struct inet_request_sock *ireq = inet_rsk(req);
2165 	long delta = req->rsk_timer.expires - jiffies;
2166 
2167 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2168 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2169 		i,
2170 		ireq->ir_loc_addr,
2171 		ireq->ir_num,
2172 		ireq->ir_rmt_addr,
2173 		ntohs(ireq->ir_rmt_port),
2174 		TCP_SYN_RECV,
2175 		0, 0, /* could print option size, but that is af dependent. */
2176 		1,    /* timers active (only the expire timer) */
2177 		jiffies_delta_to_clock_t(delta),
2178 		req->num_timeout,
2179 		from_kuid_munged(seq_user_ns(f), uid),
2180 		0,  /* non standard timer */
2181 		0, /* open_requests have no inode */
2182 		0,
2183 		req);
2184 }
2185 
2186 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2187 {
2188 	int timer_active;
2189 	unsigned long timer_expires;
2190 	const struct tcp_sock *tp = tcp_sk(sk);
2191 	const struct inet_connection_sock *icsk = inet_csk(sk);
2192 	const struct inet_sock *inet = inet_sk(sk);
2193 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2194 	__be32 dest = inet->inet_daddr;
2195 	__be32 src = inet->inet_rcv_saddr;
2196 	__u16 destp = ntohs(inet->inet_dport);
2197 	__u16 srcp = ntohs(inet->inet_sport);
2198 	int rx_queue;
2199 
2200 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2201 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2202 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2203 		timer_active	= 1;
2204 		timer_expires	= icsk->icsk_timeout;
2205 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2206 		timer_active	= 4;
2207 		timer_expires	= icsk->icsk_timeout;
2208 	} else if (timer_pending(&sk->sk_timer)) {
2209 		timer_active	= 2;
2210 		timer_expires	= sk->sk_timer.expires;
2211 	} else {
2212 		timer_active	= 0;
2213 		timer_expires = jiffies;
2214 	}
2215 
2216 	if (sk->sk_state == TCP_LISTEN)
2217 		rx_queue = sk->sk_ack_backlog;
2218 	else
2219 		/*
2220 		 * because we dont lock socket, we might find a transient negative value
2221 		 */
2222 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2223 
2224 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2225 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2226 		i, src, srcp, dest, destp, sk->sk_state,
2227 		tp->write_seq - tp->snd_una,
2228 		rx_queue,
2229 		timer_active,
2230 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2231 		icsk->icsk_retransmits,
2232 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2233 		icsk->icsk_probes_out,
2234 		sock_i_ino(sk),
2235 		atomic_read(&sk->sk_refcnt), sk,
2236 		jiffies_to_clock_t(icsk->icsk_rto),
2237 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2238 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2239 		tp->snd_cwnd,
2240 		sk->sk_state == TCP_LISTEN ?
2241 		    (fastopenq ? fastopenq->max_qlen : 0) :
2242 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2243 }
2244 
2245 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2246 			       struct seq_file *f, int i)
2247 {
2248 	long delta = tw->tw_timer.expires - jiffies;
2249 	__be32 dest, src;
2250 	__u16 destp, srcp;
2251 
2252 	dest  = tw->tw_daddr;
2253 	src   = tw->tw_rcv_saddr;
2254 	destp = ntohs(tw->tw_dport);
2255 	srcp  = ntohs(tw->tw_sport);
2256 
2257 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2258 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2259 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2260 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2261 		atomic_read(&tw->tw_refcnt), tw);
2262 }
2263 
2264 #define TMPSZ 150
2265 
2266 static int tcp4_seq_show(struct seq_file *seq, void *v)
2267 {
2268 	struct tcp_iter_state *st;
2269 	struct sock *sk = v;
2270 
2271 	seq_setwidth(seq, TMPSZ - 1);
2272 	if (v == SEQ_START_TOKEN) {
2273 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2274 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2275 			   "inode");
2276 		goto out;
2277 	}
2278 	st = seq->private;
2279 
2280 	switch (st->state) {
2281 	case TCP_SEQ_STATE_LISTENING:
2282 	case TCP_SEQ_STATE_ESTABLISHED:
2283 		if (sk->sk_state == TCP_TIME_WAIT)
2284 			get_timewait4_sock(v, seq, st->num);
2285 		else
2286 			get_tcp4_sock(v, seq, st->num);
2287 		break;
2288 	case TCP_SEQ_STATE_OPENREQ:
2289 		get_openreq4(v, seq, st->num, st->uid);
2290 		break;
2291 	}
2292 out:
2293 	seq_pad(seq, '\n');
2294 	return 0;
2295 }
2296 
2297 static const struct file_operations tcp_afinfo_seq_fops = {
2298 	.owner   = THIS_MODULE,
2299 	.open    = tcp_seq_open,
2300 	.read    = seq_read,
2301 	.llseek  = seq_lseek,
2302 	.release = seq_release_net
2303 };
2304 
2305 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2306 	.name		= "tcp",
2307 	.family		= AF_INET,
2308 	.seq_fops	= &tcp_afinfo_seq_fops,
2309 	.seq_ops	= {
2310 		.show		= tcp4_seq_show,
2311 	},
2312 };
2313 
2314 static int __net_init tcp4_proc_init_net(struct net *net)
2315 {
2316 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2317 }
2318 
2319 static void __net_exit tcp4_proc_exit_net(struct net *net)
2320 {
2321 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2322 }
2323 
2324 static struct pernet_operations tcp4_net_ops = {
2325 	.init = tcp4_proc_init_net,
2326 	.exit = tcp4_proc_exit_net,
2327 };
2328 
2329 int __init tcp4_proc_init(void)
2330 {
2331 	return register_pernet_subsys(&tcp4_net_ops);
2332 }
2333 
2334 void tcp4_proc_exit(void)
2335 {
2336 	unregister_pernet_subsys(&tcp4_net_ops);
2337 }
2338 #endif /* CONFIG_PROC_FS */
2339 
2340 struct proto tcp_prot = {
2341 	.name			= "TCP",
2342 	.owner			= THIS_MODULE,
2343 	.close			= tcp_close,
2344 	.connect		= tcp_v4_connect,
2345 	.disconnect		= tcp_disconnect,
2346 	.accept			= inet_csk_accept,
2347 	.ioctl			= tcp_ioctl,
2348 	.init			= tcp_v4_init_sock,
2349 	.destroy		= tcp_v4_destroy_sock,
2350 	.shutdown		= tcp_shutdown,
2351 	.setsockopt		= tcp_setsockopt,
2352 	.getsockopt		= tcp_getsockopt,
2353 	.recvmsg		= tcp_recvmsg,
2354 	.sendmsg		= tcp_sendmsg,
2355 	.sendpage		= tcp_sendpage,
2356 	.backlog_rcv		= tcp_v4_do_rcv,
2357 	.release_cb		= tcp_release_cb,
2358 	.hash			= inet_hash,
2359 	.unhash			= inet_unhash,
2360 	.get_port		= inet_csk_get_port,
2361 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2362 	.stream_memory_free	= tcp_stream_memory_free,
2363 	.sockets_allocated	= &tcp_sockets_allocated,
2364 	.orphan_count		= &tcp_orphan_count,
2365 	.memory_allocated	= &tcp_memory_allocated,
2366 	.memory_pressure	= &tcp_memory_pressure,
2367 	.sysctl_mem		= sysctl_tcp_mem,
2368 	.sysctl_wmem		= sysctl_tcp_wmem,
2369 	.sysctl_rmem		= sysctl_tcp_rmem,
2370 	.max_header		= MAX_TCP_HEADER,
2371 	.obj_size		= sizeof(struct tcp_sock),
2372 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2373 	.twsk_prot		= &tcp_timewait_sock_ops,
2374 	.rsk_prot		= &tcp_request_sock_ops,
2375 	.h.hashinfo		= &tcp_hashinfo,
2376 	.no_autobind		= true,
2377 #ifdef CONFIG_COMPAT
2378 	.compat_setsockopt	= compat_tcp_setsockopt,
2379 	.compat_getsockopt	= compat_tcp_getsockopt,
2380 #endif
2381 #ifdef CONFIG_MEMCG_KMEM
2382 	.init_cgroup		= tcp_init_cgroup,
2383 	.destroy_cgroup		= tcp_destroy_cgroup,
2384 	.proto_cgroup		= tcp_proto_cgroup,
2385 #endif
2386 };
2387 EXPORT_SYMBOL(tcp_prot);
2388 
2389 static void __net_exit tcp_sk_exit(struct net *net)
2390 {
2391 	int cpu;
2392 
2393 	for_each_possible_cpu(cpu)
2394 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2395 	free_percpu(net->ipv4.tcp_sk);
2396 }
2397 
2398 static int __net_init tcp_sk_init(struct net *net)
2399 {
2400 	int res, cpu;
2401 
2402 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2403 	if (!net->ipv4.tcp_sk)
2404 		return -ENOMEM;
2405 
2406 	for_each_possible_cpu(cpu) {
2407 		struct sock *sk;
2408 
2409 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2410 					   IPPROTO_TCP, net);
2411 		if (res)
2412 			goto fail;
2413 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2414 	}
2415 
2416 	net->ipv4.sysctl_tcp_ecn = 2;
2417 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2418 
2419 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2420 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2421 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2422 
2423 	return 0;
2424 fail:
2425 	tcp_sk_exit(net);
2426 
2427 	return res;
2428 }
2429 
2430 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2431 {
2432 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2433 }
2434 
2435 static struct pernet_operations __net_initdata tcp_sk_ops = {
2436        .init	   = tcp_sk_init,
2437        .exit	   = tcp_sk_exit,
2438        .exit_batch = tcp_sk_exit_batch,
2439 };
2440 
2441 void __init tcp_v4_init(void)
2442 {
2443 	inet_hashinfo_init(&tcp_hashinfo);
2444 	if (register_pernet_subsys(&tcp_sk_ops))
2445 		panic("Failed to create the TCP control socket.\n");
2446 }
2447