xref: /linux/net/ipv4/tcp_ipv4.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97 
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100 
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 					  ip_hdr(skb)->saddr,
105 					  tcp_hdr(skb)->dest,
106 					  tcp_hdr(skb)->source);
107 }
108 
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 	struct tcp_sock *tp = tcp_sk(sk);
113 
114 	/* With PAWS, it is safe from the viewpoint
115 	   of data integrity. Even without PAWS it is safe provided sequence
116 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 
118 	   Actually, the idea is close to VJ's one, only timestamp cache is
119 	   held not per host, but per port pair and TW bucket is used as state
120 	   holder.
121 
122 	   If TW bucket has been already destroyed we fall back to VJ's scheme
123 	   and use initial timestamp retrieved from peer table.
124 	 */
125 	if (tcptw->tw_ts_recent_stamp &&
126 	    (twp == NULL || (sysctl_tcp_tw_reuse &&
127 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 		if (tp->write_seq == 0)
130 			tp->write_seq = 1;
131 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
132 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 		sock_hold(sktw);
134 		return 1;
135 	}
136 
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 
141 static int tcp_repair_connect(struct sock *sk)
142 {
143 	tcp_connect_init(sk);
144 	tcp_finish_connect(sk, NULL);
145 
146 	return 0;
147 }
148 
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151 {
152 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 	struct inet_sock *inet = inet_sk(sk);
154 	struct tcp_sock *tp = tcp_sk(sk);
155 	__be16 orig_sport, orig_dport;
156 	__be32 daddr, nexthop;
157 	struct flowi4 *fl4;
158 	struct rtable *rt;
159 	int err;
160 	struct ip_options_rcu *inet_opt;
161 
162 	if (addr_len < sizeof(struct sockaddr_in))
163 		return -EINVAL;
164 
165 	if (usin->sin_family != AF_INET)
166 		return -EAFNOSUPPORT;
167 
168 	nexthop = daddr = usin->sin_addr.s_addr;
169 	inet_opt = rcu_dereference_protected(inet->inet_opt,
170 					     sock_owned_by_user(sk));
171 	if (inet_opt && inet_opt->opt.srr) {
172 		if (!daddr)
173 			return -EINVAL;
174 		nexthop = inet_opt->opt.faddr;
175 	}
176 
177 	orig_sport = inet->inet_sport;
178 	orig_dport = usin->sin_port;
179 	fl4 = &inet->cork.fl.u.ip4;
180 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 			      IPPROTO_TCP,
183 			      orig_sport, orig_dport, sk, true);
184 	if (IS_ERR(rt)) {
185 		err = PTR_ERR(rt);
186 		if (err == -ENETUNREACH)
187 			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
188 		return err;
189 	}
190 
191 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 		ip_rt_put(rt);
193 		return -ENETUNREACH;
194 	}
195 
196 	if (!inet_opt || !inet_opt->opt.srr)
197 		daddr = fl4->daddr;
198 
199 	if (!inet->inet_saddr)
200 		inet->inet_saddr = fl4->saddr;
201 	inet->inet_rcv_saddr = inet->inet_saddr;
202 
203 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 		/* Reset inherited state */
205 		tp->rx_opt.ts_recent	   = 0;
206 		tp->rx_opt.ts_recent_stamp = 0;
207 		if (likely(!tp->repair))
208 			tp->write_seq	   = 0;
209 	}
210 
211 	if (tcp_death_row.sysctl_tw_recycle &&
212 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 		tcp_fetch_timewait_stamp(sk, &rt->dst);
214 
215 	inet->inet_dport = usin->sin_port;
216 	inet->inet_daddr = daddr;
217 
218 	inet_csk(sk)->icsk_ext_hdr_len = 0;
219 	if (inet_opt)
220 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
221 
222 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
223 
224 	/* Socket identity is still unknown (sport may be zero).
225 	 * However we set state to SYN-SENT and not releasing socket
226 	 * lock select source port, enter ourselves into the hash tables and
227 	 * complete initialization after this.
228 	 */
229 	tcp_set_state(sk, TCP_SYN_SENT);
230 	err = inet_hash_connect(&tcp_death_row, sk);
231 	if (err)
232 		goto failure;
233 
234 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 			       inet->inet_sport, inet->inet_dport, sk);
236 	if (IS_ERR(rt)) {
237 		err = PTR_ERR(rt);
238 		rt = NULL;
239 		goto failure;
240 	}
241 	/* OK, now commit destination to socket.  */
242 	sk->sk_gso_type = SKB_GSO_TCPV4;
243 	sk_setup_caps(sk, &rt->dst);
244 
245 	if (!tp->write_seq && likely(!tp->repair))
246 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 							   inet->inet_daddr,
248 							   inet->inet_sport,
249 							   usin->sin_port);
250 
251 	inet->inet_id = tp->write_seq ^ jiffies;
252 
253 	if (likely(!tp->repair))
254 		err = tcp_connect(sk);
255 	else
256 		err = tcp_repair_connect(sk);
257 
258 	rt = NULL;
259 	if (err)
260 		goto failure;
261 
262 	return 0;
263 
264 failure:
265 	/*
266 	 * This unhashes the socket and releases the local port,
267 	 * if necessary.
268 	 */
269 	tcp_set_state(sk, TCP_CLOSE);
270 	ip_rt_put(rt);
271 	sk->sk_route_caps = 0;
272 	inet->inet_dport = 0;
273 	return err;
274 }
275 EXPORT_SYMBOL(tcp_v4_connect);
276 
277 /*
278  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279  * It can be called through tcp_release_cb() if socket was owned by user
280  * at the time tcp_v4_err() was called to handle ICMP message.
281  */
282 static void tcp_v4_mtu_reduced(struct sock *sk)
283 {
284 	struct dst_entry *dst;
285 	struct inet_sock *inet = inet_sk(sk);
286 	u32 mtu = tcp_sk(sk)->mtu_info;
287 
288 	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 	 * send out by Linux are always <576bytes so they should go through
290 	 * unfragmented).
291 	 */
292 	if (sk->sk_state == TCP_LISTEN)
293 		return;
294 
295 	dst = inet_csk_update_pmtu(sk, mtu);
296 	if (!dst)
297 		return;
298 
299 	/* Something is about to be wrong... Remember soft error
300 	 * for the case, if this connection will not able to recover.
301 	 */
302 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 		sk->sk_err_soft = EMSGSIZE;
304 
305 	mtu = dst_mtu(dst);
306 
307 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309 		tcp_sync_mss(sk, mtu);
310 
311 		/* Resend the TCP packet because it's
312 		 * clear that the old packet has been
313 		 * dropped. This is the new "fast" path mtu
314 		 * discovery.
315 		 */
316 		tcp_simple_retransmit(sk);
317 	} /* else let the usual retransmit timer handle it */
318 }
319 
320 static void do_redirect(struct sk_buff *skb, struct sock *sk)
321 {
322 	struct dst_entry *dst = __sk_dst_check(sk, 0);
323 
324 	if (dst)
325 		dst->ops->redirect(dst, sk, skb);
326 }
327 
328 /*
329  * This routine is called by the ICMP module when it gets some
330  * sort of error condition.  If err < 0 then the socket should
331  * be closed and the error returned to the user.  If err > 0
332  * it's just the icmp type << 8 | icmp code.  After adjustment
333  * header points to the first 8 bytes of the tcp header.  We need
334  * to find the appropriate port.
335  *
336  * The locking strategy used here is very "optimistic". When
337  * someone else accesses the socket the ICMP is just dropped
338  * and for some paths there is no check at all.
339  * A more general error queue to queue errors for later handling
340  * is probably better.
341  *
342  */
343 
344 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
345 {
346 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
347 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
348 	struct inet_connection_sock *icsk;
349 	struct tcp_sock *tp;
350 	struct inet_sock *inet;
351 	const int type = icmp_hdr(icmp_skb)->type;
352 	const int code = icmp_hdr(icmp_skb)->code;
353 	struct sock *sk;
354 	struct sk_buff *skb;
355 	__u32 seq;
356 	__u32 remaining;
357 	int err;
358 	struct net *net = dev_net(icmp_skb->dev);
359 
360 	if (icmp_skb->len < (iph->ihl << 2) + 8) {
361 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
362 		return;
363 	}
364 
365 	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
366 			iph->saddr, th->source, inet_iif(icmp_skb));
367 	if (!sk) {
368 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
369 		return;
370 	}
371 	if (sk->sk_state == TCP_TIME_WAIT) {
372 		inet_twsk_put(inet_twsk(sk));
373 		return;
374 	}
375 
376 	bh_lock_sock(sk);
377 	/* If too many ICMPs get dropped on busy
378 	 * servers this needs to be solved differently.
379 	 * We do take care of PMTU discovery (RFC1191) special case :
380 	 * we can receive locally generated ICMP messages while socket is held.
381 	 */
382 	if (sock_owned_by_user(sk) &&
383 	    type != ICMP_DEST_UNREACH &&
384 	    code != ICMP_FRAG_NEEDED)
385 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
386 
387 	if (sk->sk_state == TCP_CLOSE)
388 		goto out;
389 
390 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
391 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
392 		goto out;
393 	}
394 
395 	icsk = inet_csk(sk);
396 	tp = tcp_sk(sk);
397 	seq = ntohl(th->seq);
398 	if (sk->sk_state != TCP_LISTEN &&
399 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
400 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
401 		goto out;
402 	}
403 
404 	switch (type) {
405 	case ICMP_REDIRECT:
406 		do_redirect(icmp_skb, sk);
407 		goto out;
408 	case ICMP_SOURCE_QUENCH:
409 		/* Just silently ignore these. */
410 		goto out;
411 	case ICMP_PARAMETERPROB:
412 		err = EPROTO;
413 		break;
414 	case ICMP_DEST_UNREACH:
415 		if (code > NR_ICMP_UNREACH)
416 			goto out;
417 
418 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
419 			tp->mtu_info = info;
420 			if (!sock_owned_by_user(sk))
421 				tcp_v4_mtu_reduced(sk);
422 			else
423 				set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
424 			goto out;
425 		}
426 
427 		err = icmp_err_convert[code].errno;
428 		/* check if icmp_skb allows revert of backoff
429 		 * (see draft-zimmermann-tcp-lcd) */
430 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
431 			break;
432 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
433 		    !icsk->icsk_backoff)
434 			break;
435 
436 		if (sock_owned_by_user(sk))
437 			break;
438 
439 		icsk->icsk_backoff--;
440 		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
442 		tcp_bound_rto(sk);
443 
444 		skb = tcp_write_queue_head(sk);
445 		BUG_ON(!skb);
446 
447 		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 				tcp_time_stamp - TCP_SKB_CB(skb)->when);
449 
450 		if (remaining) {
451 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 						  remaining, TCP_RTO_MAX);
453 		} else {
454 			/* RTO revert clocked out retransmission.
455 			 * Will retransmit now */
456 			tcp_retransmit_timer(sk);
457 		}
458 
459 		break;
460 	case ICMP_TIME_EXCEEDED:
461 		err = EHOSTUNREACH;
462 		break;
463 	default:
464 		goto out;
465 	}
466 
467 	switch (sk->sk_state) {
468 		struct request_sock *req, **prev;
469 	case TCP_LISTEN:
470 		if (sock_owned_by_user(sk))
471 			goto out;
472 
473 		req = inet_csk_search_req(sk, &prev, th->dest,
474 					  iph->daddr, iph->saddr);
475 		if (!req)
476 			goto out;
477 
478 		/* ICMPs are not backlogged, hence we cannot get
479 		   an established socket here.
480 		 */
481 		WARN_ON(req->sk);
482 
483 		if (seq != tcp_rsk(req)->snt_isn) {
484 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
485 			goto out;
486 		}
487 
488 		/*
489 		 * Still in SYN_RECV, just remove it silently.
490 		 * There is no good way to pass the error to the newly
491 		 * created socket, and POSIX does not want network
492 		 * errors returned from accept().
493 		 */
494 		inet_csk_reqsk_queue_drop(sk, req, prev);
495 		goto out;
496 
497 	case TCP_SYN_SENT:
498 	case TCP_SYN_RECV:  /* Cannot happen.
499 			       It can f.e. if SYNs crossed.
500 			     */
501 		if (!sock_owned_by_user(sk)) {
502 			sk->sk_err = err;
503 
504 			sk->sk_error_report(sk);
505 
506 			tcp_done(sk);
507 		} else {
508 			sk->sk_err_soft = err;
509 		}
510 		goto out;
511 	}
512 
513 	/* If we've already connected we will keep trying
514 	 * until we time out, or the user gives up.
515 	 *
516 	 * rfc1122 4.2.3.9 allows to consider as hard errors
517 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 	 * but it is obsoleted by pmtu discovery).
519 	 *
520 	 * Note, that in modern internet, where routing is unreliable
521 	 * and in each dark corner broken firewalls sit, sending random
522 	 * errors ordered by their masters even this two messages finally lose
523 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 	 *
525 	 * Now we are in compliance with RFCs.
526 	 *							--ANK (980905)
527 	 */
528 
529 	inet = inet_sk(sk);
530 	if (!sock_owned_by_user(sk) && inet->recverr) {
531 		sk->sk_err = err;
532 		sk->sk_error_report(sk);
533 	} else	{ /* Only an error on timeout */
534 		sk->sk_err_soft = err;
535 	}
536 
537 out:
538 	bh_unlock_sock(sk);
539 	sock_put(sk);
540 }
541 
542 static void __tcp_v4_send_check(struct sk_buff *skb,
543 				__be32 saddr, __be32 daddr)
544 {
545 	struct tcphdr *th = tcp_hdr(skb);
546 
547 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 		skb->csum_start = skb_transport_header(skb) - skb->head;
550 		skb->csum_offset = offsetof(struct tcphdr, check);
551 	} else {
552 		th->check = tcp_v4_check(skb->len, saddr, daddr,
553 					 csum_partial(th,
554 						      th->doff << 2,
555 						      skb->csum));
556 	}
557 }
558 
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
561 {
562 	const struct inet_sock *inet = inet_sk(sk);
563 
564 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565 }
566 EXPORT_SYMBOL(tcp_v4_send_check);
567 
568 int tcp_v4_gso_send_check(struct sk_buff *skb)
569 {
570 	const struct iphdr *iph;
571 	struct tcphdr *th;
572 
573 	if (!pskb_may_pull(skb, sizeof(*th)))
574 		return -EINVAL;
575 
576 	iph = ip_hdr(skb);
577 	th = tcp_hdr(skb);
578 
579 	th->check = 0;
580 	skb->ip_summed = CHECKSUM_PARTIAL;
581 	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
582 	return 0;
583 }
584 
585 /*
586  *	This routine will send an RST to the other tcp.
587  *
588  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589  *		      for reset.
590  *	Answer: if a packet caused RST, it is not for a socket
591  *		existing in our system, if it is matched to a socket,
592  *		it is just duplicate segment or bug in other side's TCP.
593  *		So that we build reply only basing on parameters
594  *		arrived with segment.
595  *	Exception: precedence violation. We do not implement it in any case.
596  */
597 
598 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
599 {
600 	const struct tcphdr *th = tcp_hdr(skb);
601 	struct {
602 		struct tcphdr th;
603 #ifdef CONFIG_TCP_MD5SIG
604 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
605 #endif
606 	} rep;
607 	struct ip_reply_arg arg;
608 #ifdef CONFIG_TCP_MD5SIG
609 	struct tcp_md5sig_key *key;
610 	const __u8 *hash_location = NULL;
611 	unsigned char newhash[16];
612 	int genhash;
613 	struct sock *sk1 = NULL;
614 #endif
615 	struct net *net;
616 
617 	/* Never send a reset in response to a reset. */
618 	if (th->rst)
619 		return;
620 
621 	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
622 		return;
623 
624 	/* Swap the send and the receive. */
625 	memset(&rep, 0, sizeof(rep));
626 	rep.th.dest   = th->source;
627 	rep.th.source = th->dest;
628 	rep.th.doff   = sizeof(struct tcphdr) / 4;
629 	rep.th.rst    = 1;
630 
631 	if (th->ack) {
632 		rep.th.seq = th->ack_seq;
633 	} else {
634 		rep.th.ack = 1;
635 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 				       skb->len - (th->doff << 2));
637 	}
638 
639 	memset(&arg, 0, sizeof(arg));
640 	arg.iov[0].iov_base = (unsigned char *)&rep;
641 	arg.iov[0].iov_len  = sizeof(rep.th);
642 
643 #ifdef CONFIG_TCP_MD5SIG
644 	hash_location = tcp_parse_md5sig_option(th);
645 	if (!sk && hash_location) {
646 		/*
647 		 * active side is lost. Try to find listening socket through
648 		 * source port, and then find md5 key through listening socket.
649 		 * we are not loose security here:
650 		 * Incoming packet is checked with md5 hash with finding key,
651 		 * no RST generated if md5 hash doesn't match.
652 		 */
653 		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 					     &tcp_hashinfo, ip_hdr(skb)->daddr,
655 					     ntohs(th->source), inet_iif(skb));
656 		/* don't send rst if it can't find key */
657 		if (!sk1)
658 			return;
659 		rcu_read_lock();
660 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 					&ip_hdr(skb)->saddr, AF_INET);
662 		if (!key)
663 			goto release_sk1;
664 
665 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 			goto release_sk1;
668 	} else {
669 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 					     &ip_hdr(skb)->saddr,
671 					     AF_INET) : NULL;
672 	}
673 
674 	if (key) {
675 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
676 				   (TCPOPT_NOP << 16) |
677 				   (TCPOPT_MD5SIG << 8) |
678 				   TCPOLEN_MD5SIG);
679 		/* Update length and the length the header thinks exists */
680 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 		rep.th.doff = arg.iov[0].iov_len / 4;
682 
683 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
684 				     key, ip_hdr(skb)->saddr,
685 				     ip_hdr(skb)->daddr, &rep.th);
686 	}
687 #endif
688 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 				      ip_hdr(skb)->saddr, /* XXX */
690 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
691 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
692 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
693 	/* When socket is gone, all binding information is lost.
694 	 * routing might fail in this case. using iif for oif to
695 	 * make sure we can deliver it
696 	 */
697 	arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
698 
699 	net = dev_net(skb_dst(skb)->dev);
700 	arg.tos = ip_hdr(skb)->tos;
701 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
702 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
703 
704 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
706 
707 #ifdef CONFIG_TCP_MD5SIG
708 release_sk1:
709 	if (sk1) {
710 		rcu_read_unlock();
711 		sock_put(sk1);
712 	}
713 #endif
714 }
715 
716 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717    outside socket context is ugly, certainly. What can I do?
718  */
719 
720 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 			    u32 win, u32 ts, int oif,
722 			    struct tcp_md5sig_key *key,
723 			    int reply_flags, u8 tos)
724 {
725 	const struct tcphdr *th = tcp_hdr(skb);
726 	struct {
727 		struct tcphdr th;
728 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
729 #ifdef CONFIG_TCP_MD5SIG
730 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
731 #endif
732 			];
733 	} rep;
734 	struct ip_reply_arg arg;
735 	struct net *net = dev_net(skb_dst(skb)->dev);
736 
737 	memset(&rep.th, 0, sizeof(struct tcphdr));
738 	memset(&arg, 0, sizeof(arg));
739 
740 	arg.iov[0].iov_base = (unsigned char *)&rep;
741 	arg.iov[0].iov_len  = sizeof(rep.th);
742 	if (ts) {
743 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 				   (TCPOPT_TIMESTAMP << 8) |
745 				   TCPOLEN_TIMESTAMP);
746 		rep.opt[1] = htonl(tcp_time_stamp);
747 		rep.opt[2] = htonl(ts);
748 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
749 	}
750 
751 	/* Swap the send and the receive. */
752 	rep.th.dest    = th->source;
753 	rep.th.source  = th->dest;
754 	rep.th.doff    = arg.iov[0].iov_len / 4;
755 	rep.th.seq     = htonl(seq);
756 	rep.th.ack_seq = htonl(ack);
757 	rep.th.ack     = 1;
758 	rep.th.window  = htons(win);
759 
760 #ifdef CONFIG_TCP_MD5SIG
761 	if (key) {
762 		int offset = (ts) ? 3 : 0;
763 
764 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
765 					  (TCPOPT_NOP << 16) |
766 					  (TCPOPT_MD5SIG << 8) |
767 					  TCPOLEN_MD5SIG);
768 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 		rep.th.doff = arg.iov[0].iov_len/4;
770 
771 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
772 				    key, ip_hdr(skb)->saddr,
773 				    ip_hdr(skb)->daddr, &rep.th);
774 	}
775 #endif
776 	arg.flags = reply_flags;
777 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 				      ip_hdr(skb)->saddr, /* XXX */
779 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
781 	if (oif)
782 		arg.bound_dev_if = oif;
783 	arg.tos = tos;
784 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
785 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
786 
787 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
788 }
789 
790 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
791 {
792 	struct inet_timewait_sock *tw = inet_twsk(sk);
793 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
794 
795 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
796 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
797 			tcptw->tw_ts_recent,
798 			tw->tw_bound_dev_if,
799 			tcp_twsk_md5_key(tcptw),
800 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 			tw->tw_tos
802 			);
803 
804 	inet_twsk_put(tw);
805 }
806 
807 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
808 				  struct request_sock *req)
809 {
810 	tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
811 			tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
812 			req->ts_recent,
813 			0,
814 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 					  AF_INET),
816 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 			ip_hdr(skb)->tos);
818 }
819 
820 /*
821  *	Send a SYN-ACK after having received a SYN.
822  *	This still operates on a request_sock only, not on a big
823  *	socket.
824  */
825 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 			      struct request_sock *req,
827 			      struct request_values *rvp,
828 			      u16 queue_mapping,
829 			      bool nocache)
830 {
831 	const struct inet_request_sock *ireq = inet_rsk(req);
832 	struct flowi4 fl4;
833 	int err = -1;
834 	struct sk_buff * skb;
835 
836 	/* First, grab a route. */
837 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
838 		return -1;
839 
840 	skb = tcp_make_synack(sk, dst, req, rvp);
841 
842 	if (skb) {
843 		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
844 
845 		skb_set_queue_mapping(skb, queue_mapping);
846 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
847 					    ireq->rmt_addr,
848 					    ireq->opt);
849 		err = net_xmit_eval(err);
850 	}
851 
852 	return err;
853 }
854 
855 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
856 			      struct request_values *rvp)
857 {
858 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 	return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
860 }
861 
862 /*
863  *	IPv4 request_sock destructor.
864  */
865 static void tcp_v4_reqsk_destructor(struct request_sock *req)
866 {
867 	kfree(inet_rsk(req)->opt);
868 }
869 
870 /*
871  * Return true if a syncookie should be sent
872  */
873 bool tcp_syn_flood_action(struct sock *sk,
874 			 const struct sk_buff *skb,
875 			 const char *proto)
876 {
877 	const char *msg = "Dropping request";
878 	bool want_cookie = false;
879 	struct listen_sock *lopt;
880 
881 
882 
883 #ifdef CONFIG_SYN_COOKIES
884 	if (sysctl_tcp_syncookies) {
885 		msg = "Sending cookies";
886 		want_cookie = true;
887 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
888 	} else
889 #endif
890 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
891 
892 	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 	if (!lopt->synflood_warned) {
894 		lopt->synflood_warned = 1;
895 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
896 			proto, ntohs(tcp_hdr(skb)->dest), msg);
897 	}
898 	return want_cookie;
899 }
900 EXPORT_SYMBOL(tcp_syn_flood_action);
901 
902 /*
903  * Save and compile IPv4 options into the request_sock if needed.
904  */
905 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
906 						  struct sk_buff *skb)
907 {
908 	const struct ip_options *opt = &(IPCB(skb)->opt);
909 	struct ip_options_rcu *dopt = NULL;
910 
911 	if (opt && opt->optlen) {
912 		int opt_size = sizeof(*dopt) + opt->optlen;
913 
914 		dopt = kmalloc(opt_size, GFP_ATOMIC);
915 		if (dopt) {
916 			if (ip_options_echo(&dopt->opt, skb)) {
917 				kfree(dopt);
918 				dopt = NULL;
919 			}
920 		}
921 	}
922 	return dopt;
923 }
924 
925 #ifdef CONFIG_TCP_MD5SIG
926 /*
927  * RFC2385 MD5 checksumming requires a mapping of
928  * IP address->MD5 Key.
929  * We need to maintain these in the sk structure.
930  */
931 
932 /* Find the Key structure for an address.  */
933 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
934 					 const union tcp_md5_addr *addr,
935 					 int family)
936 {
937 	struct tcp_sock *tp = tcp_sk(sk);
938 	struct tcp_md5sig_key *key;
939 	struct hlist_node *pos;
940 	unsigned int size = sizeof(struct in_addr);
941 	struct tcp_md5sig_info *md5sig;
942 
943 	/* caller either holds rcu_read_lock() or socket lock */
944 	md5sig = rcu_dereference_check(tp->md5sig_info,
945 				       sock_owned_by_user(sk) ||
946 				       lockdep_is_held(&sk->sk_lock.slock));
947 	if (!md5sig)
948 		return NULL;
949 #if IS_ENABLED(CONFIG_IPV6)
950 	if (family == AF_INET6)
951 		size = sizeof(struct in6_addr);
952 #endif
953 	hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
954 		if (key->family != family)
955 			continue;
956 		if (!memcmp(&key->addr, addr, size))
957 			return key;
958 	}
959 	return NULL;
960 }
961 EXPORT_SYMBOL(tcp_md5_do_lookup);
962 
963 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
964 					 struct sock *addr_sk)
965 {
966 	union tcp_md5_addr *addr;
967 
968 	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 	return tcp_md5_do_lookup(sk, addr, AF_INET);
970 }
971 EXPORT_SYMBOL(tcp_v4_md5_lookup);
972 
973 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
974 						      struct request_sock *req)
975 {
976 	union tcp_md5_addr *addr;
977 
978 	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 	return tcp_md5_do_lookup(sk, addr, AF_INET);
980 }
981 
982 /* This can be called on a newly created socket, from other files */
983 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
984 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
985 {
986 	/* Add Key to the list */
987 	struct tcp_md5sig_key *key;
988 	struct tcp_sock *tp = tcp_sk(sk);
989 	struct tcp_md5sig_info *md5sig;
990 
991 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
992 	if (key) {
993 		/* Pre-existing entry - just update that one. */
994 		memcpy(key->key, newkey, newkeylen);
995 		key->keylen = newkeylen;
996 		return 0;
997 	}
998 
999 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1000 					   sock_owned_by_user(sk));
1001 	if (!md5sig) {
1002 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 		if (!md5sig)
1004 			return -ENOMEM;
1005 
1006 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007 		INIT_HLIST_HEAD(&md5sig->head);
1008 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1009 	}
1010 
1011 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1012 	if (!key)
1013 		return -ENOMEM;
1014 	if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1015 		sock_kfree_s(sk, key, sizeof(*key));
1016 		return -ENOMEM;
1017 	}
1018 
1019 	memcpy(key->key, newkey, newkeylen);
1020 	key->keylen = newkeylen;
1021 	key->family = family;
1022 	memcpy(&key->addr, addr,
1023 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 				      sizeof(struct in_addr));
1025 	hlist_add_head_rcu(&key->node, &md5sig->head);
1026 	return 0;
1027 }
1028 EXPORT_SYMBOL(tcp_md5_do_add);
1029 
1030 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1031 {
1032 	struct tcp_sock *tp = tcp_sk(sk);
1033 	struct tcp_md5sig_key *key;
1034 	struct tcp_md5sig_info *md5sig;
1035 
1036 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1037 	if (!key)
1038 		return -ENOENT;
1039 	hlist_del_rcu(&key->node);
1040 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1041 	kfree_rcu(key, rcu);
1042 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1043 					   sock_owned_by_user(sk));
1044 	if (hlist_empty(&md5sig->head))
1045 		tcp_free_md5sig_pool();
1046 	return 0;
1047 }
1048 EXPORT_SYMBOL(tcp_md5_do_del);
1049 
1050 void tcp_clear_md5_list(struct sock *sk)
1051 {
1052 	struct tcp_sock *tp = tcp_sk(sk);
1053 	struct tcp_md5sig_key *key;
1054 	struct hlist_node *pos, *n;
1055 	struct tcp_md5sig_info *md5sig;
1056 
1057 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1058 
1059 	if (!hlist_empty(&md5sig->head))
1060 		tcp_free_md5sig_pool();
1061 	hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1062 		hlist_del_rcu(&key->node);
1063 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064 		kfree_rcu(key, rcu);
1065 	}
1066 }
1067 
1068 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1069 				 int optlen)
1070 {
1071 	struct tcp_md5sig cmd;
1072 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1073 
1074 	if (optlen < sizeof(cmd))
1075 		return -EINVAL;
1076 
1077 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1078 		return -EFAULT;
1079 
1080 	if (sin->sin_family != AF_INET)
1081 		return -EINVAL;
1082 
1083 	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1084 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1085 				      AF_INET);
1086 
1087 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1088 		return -EINVAL;
1089 
1090 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092 			      GFP_KERNEL);
1093 }
1094 
1095 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096 					__be32 daddr, __be32 saddr, int nbytes)
1097 {
1098 	struct tcp4_pseudohdr *bp;
1099 	struct scatterlist sg;
1100 
1101 	bp = &hp->md5_blk.ip4;
1102 
1103 	/*
1104 	 * 1. the TCP pseudo-header (in the order: source IP address,
1105 	 * destination IP address, zero-padded protocol number, and
1106 	 * segment length)
1107 	 */
1108 	bp->saddr = saddr;
1109 	bp->daddr = daddr;
1110 	bp->pad = 0;
1111 	bp->protocol = IPPROTO_TCP;
1112 	bp->len = cpu_to_be16(nbytes);
1113 
1114 	sg_init_one(&sg, bp, sizeof(*bp));
1115 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1116 }
1117 
1118 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1119 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1120 {
1121 	struct tcp_md5sig_pool *hp;
1122 	struct hash_desc *desc;
1123 
1124 	hp = tcp_get_md5sig_pool();
1125 	if (!hp)
1126 		goto clear_hash_noput;
1127 	desc = &hp->md5_desc;
1128 
1129 	if (crypto_hash_init(desc))
1130 		goto clear_hash;
1131 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1132 		goto clear_hash;
1133 	if (tcp_md5_hash_header(hp, th))
1134 		goto clear_hash;
1135 	if (tcp_md5_hash_key(hp, key))
1136 		goto clear_hash;
1137 	if (crypto_hash_final(desc, md5_hash))
1138 		goto clear_hash;
1139 
1140 	tcp_put_md5sig_pool();
1141 	return 0;
1142 
1143 clear_hash:
1144 	tcp_put_md5sig_pool();
1145 clear_hash_noput:
1146 	memset(md5_hash, 0, 16);
1147 	return 1;
1148 }
1149 
1150 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1151 			const struct sock *sk, const struct request_sock *req,
1152 			const struct sk_buff *skb)
1153 {
1154 	struct tcp_md5sig_pool *hp;
1155 	struct hash_desc *desc;
1156 	const struct tcphdr *th = tcp_hdr(skb);
1157 	__be32 saddr, daddr;
1158 
1159 	if (sk) {
1160 		saddr = inet_sk(sk)->inet_saddr;
1161 		daddr = inet_sk(sk)->inet_daddr;
1162 	} else if (req) {
1163 		saddr = inet_rsk(req)->loc_addr;
1164 		daddr = inet_rsk(req)->rmt_addr;
1165 	} else {
1166 		const struct iphdr *iph = ip_hdr(skb);
1167 		saddr = iph->saddr;
1168 		daddr = iph->daddr;
1169 	}
1170 
1171 	hp = tcp_get_md5sig_pool();
1172 	if (!hp)
1173 		goto clear_hash_noput;
1174 	desc = &hp->md5_desc;
1175 
1176 	if (crypto_hash_init(desc))
1177 		goto clear_hash;
1178 
1179 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1180 		goto clear_hash;
1181 	if (tcp_md5_hash_header(hp, th))
1182 		goto clear_hash;
1183 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 		goto clear_hash;
1185 	if (tcp_md5_hash_key(hp, key))
1186 		goto clear_hash;
1187 	if (crypto_hash_final(desc, md5_hash))
1188 		goto clear_hash;
1189 
1190 	tcp_put_md5sig_pool();
1191 	return 0;
1192 
1193 clear_hash:
1194 	tcp_put_md5sig_pool();
1195 clear_hash_noput:
1196 	memset(md5_hash, 0, 16);
1197 	return 1;
1198 }
1199 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1200 
1201 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1202 {
1203 	/*
1204 	 * This gets called for each TCP segment that arrives
1205 	 * so we want to be efficient.
1206 	 * We have 3 drop cases:
1207 	 * o No MD5 hash and one expected.
1208 	 * o MD5 hash and we're not expecting one.
1209 	 * o MD5 hash and its wrong.
1210 	 */
1211 	const __u8 *hash_location = NULL;
1212 	struct tcp_md5sig_key *hash_expected;
1213 	const struct iphdr *iph = ip_hdr(skb);
1214 	const struct tcphdr *th = tcp_hdr(skb);
1215 	int genhash;
1216 	unsigned char newhash[16];
1217 
1218 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219 					  AF_INET);
1220 	hash_location = tcp_parse_md5sig_option(th);
1221 
1222 	/* We've parsed the options - do we have a hash? */
1223 	if (!hash_expected && !hash_location)
1224 		return false;
1225 
1226 	if (hash_expected && !hash_location) {
1227 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1228 		return true;
1229 	}
1230 
1231 	if (!hash_expected && hash_location) {
1232 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1233 		return true;
1234 	}
1235 
1236 	/* Okay, so this is hash_expected and hash_location -
1237 	 * so we need to calculate the checksum.
1238 	 */
1239 	genhash = tcp_v4_md5_hash_skb(newhash,
1240 				      hash_expected,
1241 				      NULL, NULL, skb);
1242 
1243 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1244 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245 				     &iph->saddr, ntohs(th->source),
1246 				     &iph->daddr, ntohs(th->dest),
1247 				     genhash ? " tcp_v4_calc_md5_hash failed"
1248 				     : "");
1249 		return true;
1250 	}
1251 	return false;
1252 }
1253 
1254 #endif
1255 
1256 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1257 	.family		=	PF_INET,
1258 	.obj_size	=	sizeof(struct tcp_request_sock),
1259 	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1260 	.send_ack	=	tcp_v4_reqsk_send_ack,
1261 	.destructor	=	tcp_v4_reqsk_destructor,
1262 	.send_reset	=	tcp_v4_send_reset,
1263 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1264 };
1265 
1266 #ifdef CONFIG_TCP_MD5SIG
1267 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1268 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1269 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1270 };
1271 #endif
1272 
1273 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274 {
1275 	struct tcp_extend_values tmp_ext;
1276 	struct tcp_options_received tmp_opt;
1277 	const u8 *hash_location;
1278 	struct request_sock *req;
1279 	struct inet_request_sock *ireq;
1280 	struct tcp_sock *tp = tcp_sk(sk);
1281 	struct dst_entry *dst = NULL;
1282 	__be32 saddr = ip_hdr(skb)->saddr;
1283 	__be32 daddr = ip_hdr(skb)->daddr;
1284 	__u32 isn = TCP_SKB_CB(skb)->when;
1285 	bool want_cookie = false;
1286 
1287 	/* Never answer to SYNs send to broadcast or multicast */
1288 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1289 		goto drop;
1290 
1291 	/* TW buckets are converted to open requests without
1292 	 * limitations, they conserve resources and peer is
1293 	 * evidently real one.
1294 	 */
1295 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1296 		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1297 		if (!want_cookie)
1298 			goto drop;
1299 	}
1300 
1301 	/* Accept backlog is full. If we have already queued enough
1302 	 * of warm entries in syn queue, drop request. It is better than
1303 	 * clogging syn queue with openreqs with exponentially increasing
1304 	 * timeout.
1305 	 */
1306 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1307 		goto drop;
1308 
1309 	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1310 	if (!req)
1311 		goto drop;
1312 
1313 #ifdef CONFIG_TCP_MD5SIG
1314 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1315 #endif
1316 
1317 	tcp_clear_options(&tmp_opt);
1318 	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1319 	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1320 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1321 
1322 	if (tmp_opt.cookie_plus > 0 &&
1323 	    tmp_opt.saw_tstamp &&
1324 	    !tp->rx_opt.cookie_out_never &&
1325 	    (sysctl_tcp_cookie_size > 0 ||
1326 	     (tp->cookie_values != NULL &&
1327 	      tp->cookie_values->cookie_desired > 0))) {
1328 		u8 *c;
1329 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331 
1332 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333 			goto drop_and_release;
1334 
1335 		/* Secret recipe starts with IP addresses */
1336 		*mess++ ^= (__force u32)daddr;
1337 		*mess++ ^= (__force u32)saddr;
1338 
1339 		/* plus variable length Initiator Cookie */
1340 		c = (u8 *)mess;
1341 		while (l-- > 0)
1342 			*c++ ^= *hash_location++;
1343 
1344 		want_cookie = false;	/* not our kind of cookie */
1345 		tmp_ext.cookie_out_never = 0; /* false */
1346 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347 	} else if (!tp->rx_opt.cookie_in_always) {
1348 		/* redundant indications, but ensure initialization. */
1349 		tmp_ext.cookie_out_never = 1; /* true */
1350 		tmp_ext.cookie_plus = 0;
1351 	} else {
1352 		goto drop_and_release;
1353 	}
1354 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1355 
1356 	if (want_cookie && !tmp_opt.saw_tstamp)
1357 		tcp_clear_options(&tmp_opt);
1358 
1359 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1360 	tcp_openreq_init(req, &tmp_opt, skb);
1361 
1362 	ireq = inet_rsk(req);
1363 	ireq->loc_addr = daddr;
1364 	ireq->rmt_addr = saddr;
1365 	ireq->no_srccheck = inet_sk(sk)->transparent;
1366 	ireq->opt = tcp_v4_save_options(sk, skb);
1367 
1368 	if (security_inet_conn_request(sk, skb, req))
1369 		goto drop_and_free;
1370 
1371 	if (!want_cookie || tmp_opt.tstamp_ok)
1372 		TCP_ECN_create_request(req, skb);
1373 
1374 	if (want_cookie) {
1375 		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376 		req->cookie_ts = tmp_opt.tstamp_ok;
1377 	} else if (!isn) {
1378 		struct flowi4 fl4;
1379 
1380 		/* VJ's idea. We save last timestamp seen
1381 		 * from the destination in peer table, when entering
1382 		 * state TIME-WAIT, and check against it before
1383 		 * accepting new connection request.
1384 		 *
1385 		 * If "isn" is not zero, this request hit alive
1386 		 * timewait bucket, so that all the necessary checks
1387 		 * are made in the function processing timewait state.
1388 		 */
1389 		if (tmp_opt.saw_tstamp &&
1390 		    tcp_death_row.sysctl_tw_recycle &&
1391 		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1392 		    fl4.daddr == saddr) {
1393 			if (!tcp_peer_is_proven(req, dst, true)) {
1394 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1395 				goto drop_and_release;
1396 			}
1397 		}
1398 		/* Kill the following clause, if you dislike this way. */
1399 		else if (!sysctl_tcp_syncookies &&
1400 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1401 			  (sysctl_max_syn_backlog >> 2)) &&
1402 			 !tcp_peer_is_proven(req, dst, false)) {
1403 			/* Without syncookies last quarter of
1404 			 * backlog is filled with destinations,
1405 			 * proven to be alive.
1406 			 * It means that we continue to communicate
1407 			 * to destinations, already remembered
1408 			 * to the moment of synflood.
1409 			 */
1410 			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1411 				       &saddr, ntohs(tcp_hdr(skb)->source));
1412 			goto drop_and_release;
1413 		}
1414 
1415 		isn = tcp_v4_init_sequence(skb);
1416 	}
1417 	tcp_rsk(req)->snt_isn = isn;
1418 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1419 
1420 	if (tcp_v4_send_synack(sk, dst, req,
1421 			       (struct request_values *)&tmp_ext,
1422 			       skb_get_queue_mapping(skb),
1423 			       want_cookie) ||
1424 	    want_cookie)
1425 		goto drop_and_free;
1426 
1427 	inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1428 	return 0;
1429 
1430 drop_and_release:
1431 	dst_release(dst);
1432 drop_and_free:
1433 	reqsk_free(req);
1434 drop:
1435 	return 0;
1436 }
1437 EXPORT_SYMBOL(tcp_v4_conn_request);
1438 
1439 
1440 /*
1441  * The three way handshake has completed - we got a valid synack -
1442  * now create the new socket.
1443  */
1444 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1445 				  struct request_sock *req,
1446 				  struct dst_entry *dst)
1447 {
1448 	struct inet_request_sock *ireq;
1449 	struct inet_sock *newinet;
1450 	struct tcp_sock *newtp;
1451 	struct sock *newsk;
1452 #ifdef CONFIG_TCP_MD5SIG
1453 	struct tcp_md5sig_key *key;
1454 #endif
1455 	struct ip_options_rcu *inet_opt;
1456 
1457 	if (sk_acceptq_is_full(sk))
1458 		goto exit_overflow;
1459 
1460 	newsk = tcp_create_openreq_child(sk, req, skb);
1461 	if (!newsk)
1462 		goto exit_nonewsk;
1463 
1464 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1465 
1466 	newtp		      = tcp_sk(newsk);
1467 	newinet		      = inet_sk(newsk);
1468 	ireq		      = inet_rsk(req);
1469 	newinet->inet_daddr   = ireq->rmt_addr;
1470 	newinet->inet_rcv_saddr = ireq->loc_addr;
1471 	newinet->inet_saddr	      = ireq->loc_addr;
1472 	inet_opt	      = ireq->opt;
1473 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1474 	ireq->opt	      = NULL;
1475 	newinet->mc_index     = inet_iif(skb);
1476 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1477 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1478 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1479 	if (inet_opt)
1480 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1481 	newinet->inet_id = newtp->write_seq ^ jiffies;
1482 
1483 	if (!dst) {
1484 		dst = inet_csk_route_child_sock(sk, newsk, req);
1485 		if (!dst)
1486 			goto put_and_exit;
1487 	} else {
1488 		/* syncookie case : see end of cookie_v4_check() */
1489 	}
1490 	sk_setup_caps(newsk, dst);
1491 
1492 	tcp_mtup_init(newsk);
1493 	tcp_sync_mss(newsk, dst_mtu(dst));
1494 	newtp->advmss = dst_metric_advmss(dst);
1495 	if (tcp_sk(sk)->rx_opt.user_mss &&
1496 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1497 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1498 
1499 	tcp_initialize_rcv_mss(newsk);
1500 	if (tcp_rsk(req)->snt_synack)
1501 		tcp_valid_rtt_meas(newsk,
1502 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1503 	newtp->total_retrans = req->retrans;
1504 
1505 #ifdef CONFIG_TCP_MD5SIG
1506 	/* Copy over the MD5 key from the original socket */
1507 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1508 				AF_INET);
1509 	if (key != NULL) {
1510 		/*
1511 		 * We're using one, so create a matching key
1512 		 * on the newsk structure. If we fail to get
1513 		 * memory, then we end up not copying the key
1514 		 * across. Shucks.
1515 		 */
1516 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1517 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1518 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1519 	}
1520 #endif
1521 
1522 	if (__inet_inherit_port(sk, newsk) < 0)
1523 		goto put_and_exit;
1524 	__inet_hash_nolisten(newsk, NULL);
1525 
1526 	return newsk;
1527 
1528 exit_overflow:
1529 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1530 exit_nonewsk:
1531 	dst_release(dst);
1532 exit:
1533 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1534 	return NULL;
1535 put_and_exit:
1536 	tcp_clear_xmit_timers(newsk);
1537 	tcp_cleanup_congestion_control(newsk);
1538 	bh_unlock_sock(newsk);
1539 	sock_put(newsk);
1540 	goto exit;
1541 }
1542 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1543 
1544 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1545 {
1546 	struct tcphdr *th = tcp_hdr(skb);
1547 	const struct iphdr *iph = ip_hdr(skb);
1548 	struct sock *nsk;
1549 	struct request_sock **prev;
1550 	/* Find possible connection requests. */
1551 	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1552 						       iph->saddr, iph->daddr);
1553 	if (req)
1554 		return tcp_check_req(sk, skb, req, prev);
1555 
1556 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1557 			th->source, iph->daddr, th->dest, inet_iif(skb));
1558 
1559 	if (nsk) {
1560 		if (nsk->sk_state != TCP_TIME_WAIT) {
1561 			bh_lock_sock(nsk);
1562 			return nsk;
1563 		}
1564 		inet_twsk_put(inet_twsk(nsk));
1565 		return NULL;
1566 	}
1567 
1568 #ifdef CONFIG_SYN_COOKIES
1569 	if (!th->syn)
1570 		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1571 #endif
1572 	return sk;
1573 }
1574 
1575 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1576 {
1577 	const struct iphdr *iph = ip_hdr(skb);
1578 
1579 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1580 		if (!tcp_v4_check(skb->len, iph->saddr,
1581 				  iph->daddr, skb->csum)) {
1582 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 			return 0;
1584 		}
1585 	}
1586 
1587 	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1588 				       skb->len, IPPROTO_TCP, 0);
1589 
1590 	if (skb->len <= 76) {
1591 		return __skb_checksum_complete(skb);
1592 	}
1593 	return 0;
1594 }
1595 
1596 
1597 /* The socket must have it's spinlock held when we get
1598  * here.
1599  *
1600  * We have a potential double-lock case here, so even when
1601  * doing backlog processing we use the BH locking scheme.
1602  * This is because we cannot sleep with the original spinlock
1603  * held.
1604  */
1605 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1606 {
1607 	struct sock *rsk;
1608 #ifdef CONFIG_TCP_MD5SIG
1609 	/*
1610 	 * We really want to reject the packet as early as possible
1611 	 * if:
1612 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1613 	 *  o There is an MD5 option and we're not expecting one
1614 	 */
1615 	if (tcp_v4_inbound_md5_hash(sk, skb))
1616 		goto discard;
1617 #endif
1618 
1619 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1620 		struct dst_entry *dst = sk->sk_rx_dst;
1621 
1622 		sock_rps_save_rxhash(sk, skb);
1623 		if (dst) {
1624 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1625 			    dst->ops->check(dst, 0) == NULL) {
1626 				dst_release(dst);
1627 				sk->sk_rx_dst = NULL;
1628 			}
1629 		}
1630 		if (unlikely(sk->sk_rx_dst == NULL))
1631 			inet_sk_rx_dst_set(sk, skb);
1632 
1633 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1634 			rsk = sk;
1635 			goto reset;
1636 		}
1637 		return 0;
1638 	}
1639 
1640 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1641 		goto csum_err;
1642 
1643 	if (sk->sk_state == TCP_LISTEN) {
1644 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1645 		if (!nsk)
1646 			goto discard;
1647 
1648 		if (nsk != sk) {
1649 			sock_rps_save_rxhash(nsk, skb);
1650 			if (tcp_child_process(sk, nsk, skb)) {
1651 				rsk = nsk;
1652 				goto reset;
1653 			}
1654 			return 0;
1655 		}
1656 	} else
1657 		sock_rps_save_rxhash(sk, skb);
1658 
1659 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1660 		rsk = sk;
1661 		goto reset;
1662 	}
1663 	return 0;
1664 
1665 reset:
1666 	tcp_v4_send_reset(rsk, skb);
1667 discard:
1668 	kfree_skb(skb);
1669 	/* Be careful here. If this function gets more complicated and
1670 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1671 	 * might be destroyed here. This current version compiles correctly,
1672 	 * but you have been warned.
1673 	 */
1674 	return 0;
1675 
1676 csum_err:
1677 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1678 	goto discard;
1679 }
1680 EXPORT_SYMBOL(tcp_v4_do_rcv);
1681 
1682 void tcp_v4_early_demux(struct sk_buff *skb)
1683 {
1684 	struct net *net = dev_net(skb->dev);
1685 	const struct iphdr *iph;
1686 	const struct tcphdr *th;
1687 	struct sock *sk;
1688 
1689 	if (skb->pkt_type != PACKET_HOST)
1690 		return;
1691 
1692 	if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1693 		return;
1694 
1695 	iph = ip_hdr(skb);
1696 	th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1697 
1698 	if (th->doff < sizeof(struct tcphdr) / 4)
1699 		return;
1700 
1701 	sk = __inet_lookup_established(net, &tcp_hashinfo,
1702 				       iph->saddr, th->source,
1703 				       iph->daddr, ntohs(th->dest),
1704 				       skb->skb_iif);
1705 	if (sk) {
1706 		skb->sk = sk;
1707 		skb->destructor = sock_edemux;
1708 		if (sk->sk_state != TCP_TIME_WAIT) {
1709 			struct dst_entry *dst = sk->sk_rx_dst;
1710 
1711 			if (dst)
1712 				dst = dst_check(dst, 0);
1713 			if (dst &&
1714 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1715 				skb_dst_set_noref(skb, dst);
1716 		}
1717 	}
1718 }
1719 
1720 /*
1721  *	From tcp_input.c
1722  */
1723 
1724 int tcp_v4_rcv(struct sk_buff *skb)
1725 {
1726 	const struct iphdr *iph;
1727 	const struct tcphdr *th;
1728 	struct sock *sk;
1729 	int ret;
1730 	struct net *net = dev_net(skb->dev);
1731 
1732 	if (skb->pkt_type != PACKET_HOST)
1733 		goto discard_it;
1734 
1735 	/* Count it even if it's bad */
1736 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1737 
1738 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1739 		goto discard_it;
1740 
1741 	th = tcp_hdr(skb);
1742 
1743 	if (th->doff < sizeof(struct tcphdr) / 4)
1744 		goto bad_packet;
1745 	if (!pskb_may_pull(skb, th->doff * 4))
1746 		goto discard_it;
1747 
1748 	/* An explanation is required here, I think.
1749 	 * Packet length and doff are validated by header prediction,
1750 	 * provided case of th->doff==0 is eliminated.
1751 	 * So, we defer the checks. */
1752 	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1753 		goto bad_packet;
1754 
1755 	th = tcp_hdr(skb);
1756 	iph = ip_hdr(skb);
1757 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1758 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1759 				    skb->len - th->doff * 4);
1760 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1761 	TCP_SKB_CB(skb)->when	 = 0;
1762 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1763 	TCP_SKB_CB(skb)->sacked	 = 0;
1764 
1765 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1766 	if (!sk)
1767 		goto no_tcp_socket;
1768 
1769 process:
1770 	if (sk->sk_state == TCP_TIME_WAIT)
1771 		goto do_time_wait;
1772 
1773 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1774 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1775 		goto discard_and_relse;
1776 	}
1777 
1778 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1779 		goto discard_and_relse;
1780 	nf_reset(skb);
1781 
1782 	if (sk_filter(sk, skb))
1783 		goto discard_and_relse;
1784 
1785 	skb->dev = NULL;
1786 
1787 	bh_lock_sock_nested(sk);
1788 	ret = 0;
1789 	if (!sock_owned_by_user(sk)) {
1790 #ifdef CONFIG_NET_DMA
1791 		struct tcp_sock *tp = tcp_sk(sk);
1792 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1793 			tp->ucopy.dma_chan = net_dma_find_channel();
1794 		if (tp->ucopy.dma_chan)
1795 			ret = tcp_v4_do_rcv(sk, skb);
1796 		else
1797 #endif
1798 		{
1799 			if (!tcp_prequeue(sk, skb))
1800 				ret = tcp_v4_do_rcv(sk, skb);
1801 		}
1802 	} else if (unlikely(sk_add_backlog(sk, skb,
1803 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1804 		bh_unlock_sock(sk);
1805 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1806 		goto discard_and_relse;
1807 	}
1808 	bh_unlock_sock(sk);
1809 
1810 	sock_put(sk);
1811 
1812 	return ret;
1813 
1814 no_tcp_socket:
1815 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1816 		goto discard_it;
1817 
1818 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1819 bad_packet:
1820 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1821 	} else {
1822 		tcp_v4_send_reset(NULL, skb);
1823 	}
1824 
1825 discard_it:
1826 	/* Discard frame. */
1827 	kfree_skb(skb);
1828 	return 0;
1829 
1830 discard_and_relse:
1831 	sock_put(sk);
1832 	goto discard_it;
1833 
1834 do_time_wait:
1835 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1836 		inet_twsk_put(inet_twsk(sk));
1837 		goto discard_it;
1838 	}
1839 
1840 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1841 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1842 		inet_twsk_put(inet_twsk(sk));
1843 		goto discard_it;
1844 	}
1845 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1846 	case TCP_TW_SYN: {
1847 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1848 							&tcp_hashinfo,
1849 							iph->daddr, th->dest,
1850 							inet_iif(skb));
1851 		if (sk2) {
1852 			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1853 			inet_twsk_put(inet_twsk(sk));
1854 			sk = sk2;
1855 			goto process;
1856 		}
1857 		/* Fall through to ACK */
1858 	}
1859 	case TCP_TW_ACK:
1860 		tcp_v4_timewait_ack(sk, skb);
1861 		break;
1862 	case TCP_TW_RST:
1863 		goto no_tcp_socket;
1864 	case TCP_TW_SUCCESS:;
1865 	}
1866 	goto discard_it;
1867 }
1868 
1869 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1870 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1871 	.twsk_unique	= tcp_twsk_unique,
1872 	.twsk_destructor= tcp_twsk_destructor,
1873 };
1874 
1875 const struct inet_connection_sock_af_ops ipv4_specific = {
1876 	.queue_xmit	   = ip_queue_xmit,
1877 	.send_check	   = tcp_v4_send_check,
1878 	.rebuild_header	   = inet_sk_rebuild_header,
1879 	.conn_request	   = tcp_v4_conn_request,
1880 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1881 	.net_header_len	   = sizeof(struct iphdr),
1882 	.setsockopt	   = ip_setsockopt,
1883 	.getsockopt	   = ip_getsockopt,
1884 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1885 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1886 	.bind_conflict	   = inet_csk_bind_conflict,
1887 #ifdef CONFIG_COMPAT
1888 	.compat_setsockopt = compat_ip_setsockopt,
1889 	.compat_getsockopt = compat_ip_getsockopt,
1890 #endif
1891 };
1892 EXPORT_SYMBOL(ipv4_specific);
1893 
1894 #ifdef CONFIG_TCP_MD5SIG
1895 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1896 	.md5_lookup		= tcp_v4_md5_lookup,
1897 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1898 	.md5_parse		= tcp_v4_parse_md5_keys,
1899 };
1900 #endif
1901 
1902 /* NOTE: A lot of things set to zero explicitly by call to
1903  *       sk_alloc() so need not be done here.
1904  */
1905 static int tcp_v4_init_sock(struct sock *sk)
1906 {
1907 	struct inet_connection_sock *icsk = inet_csk(sk);
1908 
1909 	tcp_init_sock(sk);
1910 
1911 	icsk->icsk_af_ops = &ipv4_specific;
1912 
1913 #ifdef CONFIG_TCP_MD5SIG
1914 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1915 #endif
1916 
1917 	return 0;
1918 }
1919 
1920 void tcp_v4_destroy_sock(struct sock *sk)
1921 {
1922 	struct tcp_sock *tp = tcp_sk(sk);
1923 
1924 	tcp_clear_xmit_timers(sk);
1925 
1926 	tcp_cleanup_congestion_control(sk);
1927 
1928 	/* Cleanup up the write buffer. */
1929 	tcp_write_queue_purge(sk);
1930 
1931 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1932 	__skb_queue_purge(&tp->out_of_order_queue);
1933 
1934 #ifdef CONFIG_TCP_MD5SIG
1935 	/* Clean up the MD5 key list, if any */
1936 	if (tp->md5sig_info) {
1937 		tcp_clear_md5_list(sk);
1938 		kfree_rcu(tp->md5sig_info, rcu);
1939 		tp->md5sig_info = NULL;
1940 	}
1941 #endif
1942 
1943 #ifdef CONFIG_NET_DMA
1944 	/* Cleans up our sk_async_wait_queue */
1945 	__skb_queue_purge(&sk->sk_async_wait_queue);
1946 #endif
1947 
1948 	/* Clean prequeue, it must be empty really */
1949 	__skb_queue_purge(&tp->ucopy.prequeue);
1950 
1951 	/* Clean up a referenced TCP bind bucket. */
1952 	if (inet_csk(sk)->icsk_bind_hash)
1953 		inet_put_port(sk);
1954 
1955 	/*
1956 	 * If sendmsg cached page exists, toss it.
1957 	 */
1958 	if (sk->sk_sndmsg_page) {
1959 		__free_page(sk->sk_sndmsg_page);
1960 		sk->sk_sndmsg_page = NULL;
1961 	}
1962 
1963 	/* TCP Cookie Transactions */
1964 	if (tp->cookie_values != NULL) {
1965 		kref_put(&tp->cookie_values->kref,
1966 			 tcp_cookie_values_release);
1967 		tp->cookie_values = NULL;
1968 	}
1969 
1970 	/* If socket is aborted during connect operation */
1971 	tcp_free_fastopen_req(tp);
1972 
1973 	sk_sockets_allocated_dec(sk);
1974 	sock_release_memcg(sk);
1975 }
1976 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1977 
1978 #ifdef CONFIG_PROC_FS
1979 /* Proc filesystem TCP sock list dumping. */
1980 
1981 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1982 {
1983 	return hlist_nulls_empty(head) ? NULL :
1984 		list_entry(head->first, struct inet_timewait_sock, tw_node);
1985 }
1986 
1987 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1988 {
1989 	return !is_a_nulls(tw->tw_node.next) ?
1990 		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1991 }
1992 
1993 /*
1994  * Get next listener socket follow cur.  If cur is NULL, get first socket
1995  * starting from bucket given in st->bucket; when st->bucket is zero the
1996  * very first socket in the hash table is returned.
1997  */
1998 static void *listening_get_next(struct seq_file *seq, void *cur)
1999 {
2000 	struct inet_connection_sock *icsk;
2001 	struct hlist_nulls_node *node;
2002 	struct sock *sk = cur;
2003 	struct inet_listen_hashbucket *ilb;
2004 	struct tcp_iter_state *st = seq->private;
2005 	struct net *net = seq_file_net(seq);
2006 
2007 	if (!sk) {
2008 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2009 		spin_lock_bh(&ilb->lock);
2010 		sk = sk_nulls_head(&ilb->head);
2011 		st->offset = 0;
2012 		goto get_sk;
2013 	}
2014 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2015 	++st->num;
2016 	++st->offset;
2017 
2018 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2019 		struct request_sock *req = cur;
2020 
2021 		icsk = inet_csk(st->syn_wait_sk);
2022 		req = req->dl_next;
2023 		while (1) {
2024 			while (req) {
2025 				if (req->rsk_ops->family == st->family) {
2026 					cur = req;
2027 					goto out;
2028 				}
2029 				req = req->dl_next;
2030 			}
2031 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2032 				break;
2033 get_req:
2034 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2035 		}
2036 		sk	  = sk_nulls_next(st->syn_wait_sk);
2037 		st->state = TCP_SEQ_STATE_LISTENING;
2038 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2039 	} else {
2040 		icsk = inet_csk(sk);
2041 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2043 			goto start_req;
2044 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2045 		sk = sk_nulls_next(sk);
2046 	}
2047 get_sk:
2048 	sk_nulls_for_each_from(sk, node) {
2049 		if (!net_eq(sock_net(sk), net))
2050 			continue;
2051 		if (sk->sk_family == st->family) {
2052 			cur = sk;
2053 			goto out;
2054 		}
2055 		icsk = inet_csk(sk);
2056 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2057 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2058 start_req:
2059 			st->uid		= sock_i_uid(sk);
2060 			st->syn_wait_sk = sk;
2061 			st->state	= TCP_SEQ_STATE_OPENREQ;
2062 			st->sbucket	= 0;
2063 			goto get_req;
2064 		}
2065 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2066 	}
2067 	spin_unlock_bh(&ilb->lock);
2068 	st->offset = 0;
2069 	if (++st->bucket < INET_LHTABLE_SIZE) {
2070 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2071 		spin_lock_bh(&ilb->lock);
2072 		sk = sk_nulls_head(&ilb->head);
2073 		goto get_sk;
2074 	}
2075 	cur = NULL;
2076 out:
2077 	return cur;
2078 }
2079 
2080 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2081 {
2082 	struct tcp_iter_state *st = seq->private;
2083 	void *rc;
2084 
2085 	st->bucket = 0;
2086 	st->offset = 0;
2087 	rc = listening_get_next(seq, NULL);
2088 
2089 	while (rc && *pos) {
2090 		rc = listening_get_next(seq, rc);
2091 		--*pos;
2092 	}
2093 	return rc;
2094 }
2095 
2096 static inline bool empty_bucket(struct tcp_iter_state *st)
2097 {
2098 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2099 		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2100 }
2101 
2102 /*
2103  * Get first established socket starting from bucket given in st->bucket.
2104  * If st->bucket is zero, the very first socket in the hash is returned.
2105  */
2106 static void *established_get_first(struct seq_file *seq)
2107 {
2108 	struct tcp_iter_state *st = seq->private;
2109 	struct net *net = seq_file_net(seq);
2110 	void *rc = NULL;
2111 
2112 	st->offset = 0;
2113 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2114 		struct sock *sk;
2115 		struct hlist_nulls_node *node;
2116 		struct inet_timewait_sock *tw;
2117 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2118 
2119 		/* Lockless fast path for the common case of empty buckets */
2120 		if (empty_bucket(st))
2121 			continue;
2122 
2123 		spin_lock_bh(lock);
2124 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2125 			if (sk->sk_family != st->family ||
2126 			    !net_eq(sock_net(sk), net)) {
2127 				continue;
2128 			}
2129 			rc = sk;
2130 			goto out;
2131 		}
2132 		st->state = TCP_SEQ_STATE_TIME_WAIT;
2133 		inet_twsk_for_each(tw, node,
2134 				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2135 			if (tw->tw_family != st->family ||
2136 			    !net_eq(twsk_net(tw), net)) {
2137 				continue;
2138 			}
2139 			rc = tw;
2140 			goto out;
2141 		}
2142 		spin_unlock_bh(lock);
2143 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2144 	}
2145 out:
2146 	return rc;
2147 }
2148 
2149 static void *established_get_next(struct seq_file *seq, void *cur)
2150 {
2151 	struct sock *sk = cur;
2152 	struct inet_timewait_sock *tw;
2153 	struct hlist_nulls_node *node;
2154 	struct tcp_iter_state *st = seq->private;
2155 	struct net *net = seq_file_net(seq);
2156 
2157 	++st->num;
2158 	++st->offset;
2159 
2160 	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2161 		tw = cur;
2162 		tw = tw_next(tw);
2163 get_tw:
2164 		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2165 			tw = tw_next(tw);
2166 		}
2167 		if (tw) {
2168 			cur = tw;
2169 			goto out;
2170 		}
2171 		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2172 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2173 
2174 		/* Look for next non empty bucket */
2175 		st->offset = 0;
2176 		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2177 				empty_bucket(st))
2178 			;
2179 		if (st->bucket > tcp_hashinfo.ehash_mask)
2180 			return NULL;
2181 
2182 		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2183 		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2184 	} else
2185 		sk = sk_nulls_next(sk);
2186 
2187 	sk_nulls_for_each_from(sk, node) {
2188 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2189 			goto found;
2190 	}
2191 
2192 	st->state = TCP_SEQ_STATE_TIME_WAIT;
2193 	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2194 	goto get_tw;
2195 found:
2196 	cur = sk;
2197 out:
2198 	return cur;
2199 }
2200 
2201 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2202 {
2203 	struct tcp_iter_state *st = seq->private;
2204 	void *rc;
2205 
2206 	st->bucket = 0;
2207 	rc = established_get_first(seq);
2208 
2209 	while (rc && pos) {
2210 		rc = established_get_next(seq, rc);
2211 		--pos;
2212 	}
2213 	return rc;
2214 }
2215 
2216 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2217 {
2218 	void *rc;
2219 	struct tcp_iter_state *st = seq->private;
2220 
2221 	st->state = TCP_SEQ_STATE_LISTENING;
2222 	rc	  = listening_get_idx(seq, &pos);
2223 
2224 	if (!rc) {
2225 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2226 		rc	  = established_get_idx(seq, pos);
2227 	}
2228 
2229 	return rc;
2230 }
2231 
2232 static void *tcp_seek_last_pos(struct seq_file *seq)
2233 {
2234 	struct tcp_iter_state *st = seq->private;
2235 	int offset = st->offset;
2236 	int orig_num = st->num;
2237 	void *rc = NULL;
2238 
2239 	switch (st->state) {
2240 	case TCP_SEQ_STATE_OPENREQ:
2241 	case TCP_SEQ_STATE_LISTENING:
2242 		if (st->bucket >= INET_LHTABLE_SIZE)
2243 			break;
2244 		st->state = TCP_SEQ_STATE_LISTENING;
2245 		rc = listening_get_next(seq, NULL);
2246 		while (offset-- && rc)
2247 			rc = listening_get_next(seq, rc);
2248 		if (rc)
2249 			break;
2250 		st->bucket = 0;
2251 		/* Fallthrough */
2252 	case TCP_SEQ_STATE_ESTABLISHED:
2253 	case TCP_SEQ_STATE_TIME_WAIT:
2254 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2255 		if (st->bucket > tcp_hashinfo.ehash_mask)
2256 			break;
2257 		rc = established_get_first(seq);
2258 		while (offset-- && rc)
2259 			rc = established_get_next(seq, rc);
2260 	}
2261 
2262 	st->num = orig_num;
2263 
2264 	return rc;
2265 }
2266 
2267 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2268 {
2269 	struct tcp_iter_state *st = seq->private;
2270 	void *rc;
2271 
2272 	if (*pos && *pos == st->last_pos) {
2273 		rc = tcp_seek_last_pos(seq);
2274 		if (rc)
2275 			goto out;
2276 	}
2277 
2278 	st->state = TCP_SEQ_STATE_LISTENING;
2279 	st->num = 0;
2280 	st->bucket = 0;
2281 	st->offset = 0;
2282 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2283 
2284 out:
2285 	st->last_pos = *pos;
2286 	return rc;
2287 }
2288 
2289 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2290 {
2291 	struct tcp_iter_state *st = seq->private;
2292 	void *rc = NULL;
2293 
2294 	if (v == SEQ_START_TOKEN) {
2295 		rc = tcp_get_idx(seq, 0);
2296 		goto out;
2297 	}
2298 
2299 	switch (st->state) {
2300 	case TCP_SEQ_STATE_OPENREQ:
2301 	case TCP_SEQ_STATE_LISTENING:
2302 		rc = listening_get_next(seq, v);
2303 		if (!rc) {
2304 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2305 			st->bucket = 0;
2306 			st->offset = 0;
2307 			rc	  = established_get_first(seq);
2308 		}
2309 		break;
2310 	case TCP_SEQ_STATE_ESTABLISHED:
2311 	case TCP_SEQ_STATE_TIME_WAIT:
2312 		rc = established_get_next(seq, v);
2313 		break;
2314 	}
2315 out:
2316 	++*pos;
2317 	st->last_pos = *pos;
2318 	return rc;
2319 }
2320 
2321 static void tcp_seq_stop(struct seq_file *seq, void *v)
2322 {
2323 	struct tcp_iter_state *st = seq->private;
2324 
2325 	switch (st->state) {
2326 	case TCP_SEQ_STATE_OPENREQ:
2327 		if (v) {
2328 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2329 			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2330 		}
2331 	case TCP_SEQ_STATE_LISTENING:
2332 		if (v != SEQ_START_TOKEN)
2333 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2334 		break;
2335 	case TCP_SEQ_STATE_TIME_WAIT:
2336 	case TCP_SEQ_STATE_ESTABLISHED:
2337 		if (v)
2338 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2339 		break;
2340 	}
2341 }
2342 
2343 int tcp_seq_open(struct inode *inode, struct file *file)
2344 {
2345 	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2346 	struct tcp_iter_state *s;
2347 	int err;
2348 
2349 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2350 			  sizeof(struct tcp_iter_state));
2351 	if (err < 0)
2352 		return err;
2353 
2354 	s = ((struct seq_file *)file->private_data)->private;
2355 	s->family		= afinfo->family;
2356 	s->last_pos 		= 0;
2357 	return 0;
2358 }
2359 EXPORT_SYMBOL(tcp_seq_open);
2360 
2361 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2362 {
2363 	int rc = 0;
2364 	struct proc_dir_entry *p;
2365 
2366 	afinfo->seq_ops.start		= tcp_seq_start;
2367 	afinfo->seq_ops.next		= tcp_seq_next;
2368 	afinfo->seq_ops.stop		= tcp_seq_stop;
2369 
2370 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2371 			     afinfo->seq_fops, afinfo);
2372 	if (!p)
2373 		rc = -ENOMEM;
2374 	return rc;
2375 }
2376 EXPORT_SYMBOL(tcp_proc_register);
2377 
2378 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2379 {
2380 	proc_net_remove(net, afinfo->name);
2381 }
2382 EXPORT_SYMBOL(tcp_proc_unregister);
2383 
2384 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2385 			 struct seq_file *f, int i, int uid, int *len)
2386 {
2387 	const struct inet_request_sock *ireq = inet_rsk(req);
2388 	int ttd = req->expires - jiffies;
2389 
2390 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2391 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2392 		i,
2393 		ireq->loc_addr,
2394 		ntohs(inet_sk(sk)->inet_sport),
2395 		ireq->rmt_addr,
2396 		ntohs(ireq->rmt_port),
2397 		TCP_SYN_RECV,
2398 		0, 0, /* could print option size, but that is af dependent. */
2399 		1,    /* timers active (only the expire timer) */
2400 		jiffies_to_clock_t(ttd),
2401 		req->retrans,
2402 		uid,
2403 		0,  /* non standard timer */
2404 		0, /* open_requests have no inode */
2405 		atomic_read(&sk->sk_refcnt),
2406 		req,
2407 		len);
2408 }
2409 
2410 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2411 {
2412 	int timer_active;
2413 	unsigned long timer_expires;
2414 	const struct tcp_sock *tp = tcp_sk(sk);
2415 	const struct inet_connection_sock *icsk = inet_csk(sk);
2416 	const struct inet_sock *inet = inet_sk(sk);
2417 	__be32 dest = inet->inet_daddr;
2418 	__be32 src = inet->inet_rcv_saddr;
2419 	__u16 destp = ntohs(inet->inet_dport);
2420 	__u16 srcp = ntohs(inet->inet_sport);
2421 	int rx_queue;
2422 
2423 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2424 		timer_active	= 1;
2425 		timer_expires	= icsk->icsk_timeout;
2426 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2427 		timer_active	= 4;
2428 		timer_expires	= icsk->icsk_timeout;
2429 	} else if (timer_pending(&sk->sk_timer)) {
2430 		timer_active	= 2;
2431 		timer_expires	= sk->sk_timer.expires;
2432 	} else {
2433 		timer_active	= 0;
2434 		timer_expires = jiffies;
2435 	}
2436 
2437 	if (sk->sk_state == TCP_LISTEN)
2438 		rx_queue = sk->sk_ack_backlog;
2439 	else
2440 		/*
2441 		 * because we dont lock socket, we might find a transient negative value
2442 		 */
2443 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2444 
2445 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2446 			"%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2447 		i, src, srcp, dest, destp, sk->sk_state,
2448 		tp->write_seq - tp->snd_una,
2449 		rx_queue,
2450 		timer_active,
2451 		jiffies_to_clock_t(timer_expires - jiffies),
2452 		icsk->icsk_retransmits,
2453 		sock_i_uid(sk),
2454 		icsk->icsk_probes_out,
2455 		sock_i_ino(sk),
2456 		atomic_read(&sk->sk_refcnt), sk,
2457 		jiffies_to_clock_t(icsk->icsk_rto),
2458 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2459 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2460 		tp->snd_cwnd,
2461 		tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2462 		len);
2463 }
2464 
2465 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2466 			       struct seq_file *f, int i, int *len)
2467 {
2468 	__be32 dest, src;
2469 	__u16 destp, srcp;
2470 	int ttd = tw->tw_ttd - jiffies;
2471 
2472 	if (ttd < 0)
2473 		ttd = 0;
2474 
2475 	dest  = tw->tw_daddr;
2476 	src   = tw->tw_rcv_saddr;
2477 	destp = ntohs(tw->tw_dport);
2478 	srcp  = ntohs(tw->tw_sport);
2479 
2480 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2481 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2482 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2483 		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2484 		atomic_read(&tw->tw_refcnt), tw, len);
2485 }
2486 
2487 #define TMPSZ 150
2488 
2489 static int tcp4_seq_show(struct seq_file *seq, void *v)
2490 {
2491 	struct tcp_iter_state *st;
2492 	int len;
2493 
2494 	if (v == SEQ_START_TOKEN) {
2495 		seq_printf(seq, "%-*s\n", TMPSZ - 1,
2496 			   "  sl  local_address rem_address   st tx_queue "
2497 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2498 			   "inode");
2499 		goto out;
2500 	}
2501 	st = seq->private;
2502 
2503 	switch (st->state) {
2504 	case TCP_SEQ_STATE_LISTENING:
2505 	case TCP_SEQ_STATE_ESTABLISHED:
2506 		get_tcp4_sock(v, seq, st->num, &len);
2507 		break;
2508 	case TCP_SEQ_STATE_OPENREQ:
2509 		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2510 		break;
2511 	case TCP_SEQ_STATE_TIME_WAIT:
2512 		get_timewait4_sock(v, seq, st->num, &len);
2513 		break;
2514 	}
2515 	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2516 out:
2517 	return 0;
2518 }
2519 
2520 static const struct file_operations tcp_afinfo_seq_fops = {
2521 	.owner   = THIS_MODULE,
2522 	.open    = tcp_seq_open,
2523 	.read    = seq_read,
2524 	.llseek  = seq_lseek,
2525 	.release = seq_release_net
2526 };
2527 
2528 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2529 	.name		= "tcp",
2530 	.family		= AF_INET,
2531 	.seq_fops	= &tcp_afinfo_seq_fops,
2532 	.seq_ops	= {
2533 		.show		= tcp4_seq_show,
2534 	},
2535 };
2536 
2537 static int __net_init tcp4_proc_init_net(struct net *net)
2538 {
2539 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2540 }
2541 
2542 static void __net_exit tcp4_proc_exit_net(struct net *net)
2543 {
2544 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2545 }
2546 
2547 static struct pernet_operations tcp4_net_ops = {
2548 	.init = tcp4_proc_init_net,
2549 	.exit = tcp4_proc_exit_net,
2550 };
2551 
2552 int __init tcp4_proc_init(void)
2553 {
2554 	return register_pernet_subsys(&tcp4_net_ops);
2555 }
2556 
2557 void tcp4_proc_exit(void)
2558 {
2559 	unregister_pernet_subsys(&tcp4_net_ops);
2560 }
2561 #endif /* CONFIG_PROC_FS */
2562 
2563 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2564 {
2565 	const struct iphdr *iph = skb_gro_network_header(skb);
2566 
2567 	switch (skb->ip_summed) {
2568 	case CHECKSUM_COMPLETE:
2569 		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2570 				  skb->csum)) {
2571 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2572 			break;
2573 		}
2574 
2575 		/* fall through */
2576 	case CHECKSUM_NONE:
2577 		NAPI_GRO_CB(skb)->flush = 1;
2578 		return NULL;
2579 	}
2580 
2581 	return tcp_gro_receive(head, skb);
2582 }
2583 
2584 int tcp4_gro_complete(struct sk_buff *skb)
2585 {
2586 	const struct iphdr *iph = ip_hdr(skb);
2587 	struct tcphdr *th = tcp_hdr(skb);
2588 
2589 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2590 				  iph->saddr, iph->daddr, 0);
2591 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2592 
2593 	return tcp_gro_complete(skb);
2594 }
2595 
2596 struct proto tcp_prot = {
2597 	.name			= "TCP",
2598 	.owner			= THIS_MODULE,
2599 	.close			= tcp_close,
2600 	.connect		= tcp_v4_connect,
2601 	.disconnect		= tcp_disconnect,
2602 	.accept			= inet_csk_accept,
2603 	.ioctl			= tcp_ioctl,
2604 	.init			= tcp_v4_init_sock,
2605 	.destroy		= tcp_v4_destroy_sock,
2606 	.shutdown		= tcp_shutdown,
2607 	.setsockopt		= tcp_setsockopt,
2608 	.getsockopt		= tcp_getsockopt,
2609 	.recvmsg		= tcp_recvmsg,
2610 	.sendmsg		= tcp_sendmsg,
2611 	.sendpage		= tcp_sendpage,
2612 	.backlog_rcv		= tcp_v4_do_rcv,
2613 	.release_cb		= tcp_release_cb,
2614 	.mtu_reduced		= tcp_v4_mtu_reduced,
2615 	.hash			= inet_hash,
2616 	.unhash			= inet_unhash,
2617 	.get_port		= inet_csk_get_port,
2618 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2619 	.sockets_allocated	= &tcp_sockets_allocated,
2620 	.orphan_count		= &tcp_orphan_count,
2621 	.memory_allocated	= &tcp_memory_allocated,
2622 	.memory_pressure	= &tcp_memory_pressure,
2623 	.sysctl_wmem		= sysctl_tcp_wmem,
2624 	.sysctl_rmem		= sysctl_tcp_rmem,
2625 	.max_header		= MAX_TCP_HEADER,
2626 	.obj_size		= sizeof(struct tcp_sock),
2627 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2628 	.twsk_prot		= &tcp_timewait_sock_ops,
2629 	.rsk_prot		= &tcp_request_sock_ops,
2630 	.h.hashinfo		= &tcp_hashinfo,
2631 	.no_autobind		= true,
2632 #ifdef CONFIG_COMPAT
2633 	.compat_setsockopt	= compat_tcp_setsockopt,
2634 	.compat_getsockopt	= compat_tcp_getsockopt,
2635 #endif
2636 #ifdef CONFIG_MEMCG_KMEM
2637 	.init_cgroup		= tcp_init_cgroup,
2638 	.destroy_cgroup		= tcp_destroy_cgroup,
2639 	.proto_cgroup		= tcp_proto_cgroup,
2640 #endif
2641 };
2642 EXPORT_SYMBOL(tcp_prot);
2643 
2644 static int __net_init tcp_sk_init(struct net *net)
2645 {
2646 	return 0;
2647 }
2648 
2649 static void __net_exit tcp_sk_exit(struct net *net)
2650 {
2651 }
2652 
2653 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2654 {
2655 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2656 }
2657 
2658 static struct pernet_operations __net_initdata tcp_sk_ops = {
2659        .init	   = tcp_sk_init,
2660        .exit	   = tcp_sk_exit,
2661        .exit_batch = tcp_sk_exit_batch,
2662 };
2663 
2664 void __init tcp_v4_init(void)
2665 {
2666 	inet_hashinfo_init(&tcp_hashinfo);
2667 	if (register_pernet_subsys(&tcp_sk_ops))
2668 		panic("Failed to create the TCP control socket.\n");
2669 }
2670