xref: /linux/net/ipv4/tcp_ipv4.c (revision b9ccfda293ee6fca9a89a1584f0900e0627b975e)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 #endif
97 
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
100 
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102 {
103 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 					  ip_hdr(skb)->saddr,
105 					  tcp_hdr(skb)->dest,
106 					  tcp_hdr(skb)->source);
107 }
108 
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 	struct tcp_sock *tp = tcp_sk(sk);
113 
114 	/* With PAWS, it is safe from the viewpoint
115 	   of data integrity. Even without PAWS it is safe provided sequence
116 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 
118 	   Actually, the idea is close to VJ's one, only timestamp cache is
119 	   held not per host, but per port pair and TW bucket is used as state
120 	   holder.
121 
122 	   If TW bucket has been already destroyed we fall back to VJ's scheme
123 	   and use initial timestamp retrieved from peer table.
124 	 */
125 	if (tcptw->tw_ts_recent_stamp &&
126 	    (twp == NULL || (sysctl_tcp_tw_reuse &&
127 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 		if (tp->write_seq == 0)
130 			tp->write_seq = 1;
131 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
132 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 		sock_hold(sktw);
134 		return 1;
135 	}
136 
137 	return 0;
138 }
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 
141 static int tcp_repair_connect(struct sock *sk)
142 {
143 	tcp_connect_init(sk);
144 	tcp_finish_connect(sk, NULL);
145 
146 	return 0;
147 }
148 
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151 {
152 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 	struct inet_sock *inet = inet_sk(sk);
154 	struct tcp_sock *tp = tcp_sk(sk);
155 	__be16 orig_sport, orig_dport;
156 	__be32 daddr, nexthop;
157 	struct flowi4 *fl4;
158 	struct rtable *rt;
159 	int err;
160 	struct ip_options_rcu *inet_opt;
161 
162 	if (addr_len < sizeof(struct sockaddr_in))
163 		return -EINVAL;
164 
165 	if (usin->sin_family != AF_INET)
166 		return -EAFNOSUPPORT;
167 
168 	nexthop = daddr = usin->sin_addr.s_addr;
169 	inet_opt = rcu_dereference_protected(inet->inet_opt,
170 					     sock_owned_by_user(sk));
171 	if (inet_opt && inet_opt->opt.srr) {
172 		if (!daddr)
173 			return -EINVAL;
174 		nexthop = inet_opt->opt.faddr;
175 	}
176 
177 	orig_sport = inet->inet_sport;
178 	orig_dport = usin->sin_port;
179 	fl4 = &inet->cork.fl.u.ip4;
180 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 			      IPPROTO_TCP,
183 			      orig_sport, orig_dport, sk, true);
184 	if (IS_ERR(rt)) {
185 		err = PTR_ERR(rt);
186 		if (err == -ENETUNREACH)
187 			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
188 		return err;
189 	}
190 
191 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 		ip_rt_put(rt);
193 		return -ENETUNREACH;
194 	}
195 
196 	if (!inet_opt || !inet_opt->opt.srr)
197 		daddr = fl4->daddr;
198 
199 	if (!inet->inet_saddr)
200 		inet->inet_saddr = fl4->saddr;
201 	inet->inet_rcv_saddr = inet->inet_saddr;
202 
203 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 		/* Reset inherited state */
205 		tp->rx_opt.ts_recent	   = 0;
206 		tp->rx_opt.ts_recent_stamp = 0;
207 		if (likely(!tp->repair))
208 			tp->write_seq	   = 0;
209 	}
210 
211 	if (tcp_death_row.sysctl_tw_recycle &&
212 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 		tcp_fetch_timewait_stamp(sk, &rt->dst);
214 
215 	inet->inet_dport = usin->sin_port;
216 	inet->inet_daddr = daddr;
217 
218 	inet_csk(sk)->icsk_ext_hdr_len = 0;
219 	if (inet_opt)
220 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
221 
222 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
223 
224 	/* Socket identity is still unknown (sport may be zero).
225 	 * However we set state to SYN-SENT and not releasing socket
226 	 * lock select source port, enter ourselves into the hash tables and
227 	 * complete initialization after this.
228 	 */
229 	tcp_set_state(sk, TCP_SYN_SENT);
230 	err = inet_hash_connect(&tcp_death_row, sk);
231 	if (err)
232 		goto failure;
233 
234 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 			       inet->inet_sport, inet->inet_dport, sk);
236 	if (IS_ERR(rt)) {
237 		err = PTR_ERR(rt);
238 		rt = NULL;
239 		goto failure;
240 	}
241 	/* OK, now commit destination to socket.  */
242 	sk->sk_gso_type = SKB_GSO_TCPV4;
243 	sk_setup_caps(sk, &rt->dst);
244 
245 	if (!tp->write_seq && likely(!tp->repair))
246 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 							   inet->inet_daddr,
248 							   inet->inet_sport,
249 							   usin->sin_port);
250 
251 	inet->inet_id = tp->write_seq ^ jiffies;
252 
253 	if (likely(!tp->repair))
254 		err = tcp_connect(sk);
255 	else
256 		err = tcp_repair_connect(sk);
257 
258 	rt = NULL;
259 	if (err)
260 		goto failure;
261 
262 	return 0;
263 
264 failure:
265 	/*
266 	 * This unhashes the socket and releases the local port,
267 	 * if necessary.
268 	 */
269 	tcp_set_state(sk, TCP_CLOSE);
270 	ip_rt_put(rt);
271 	sk->sk_route_caps = 0;
272 	inet->inet_dport = 0;
273 	return err;
274 }
275 EXPORT_SYMBOL(tcp_v4_connect);
276 
277 /*
278  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279  * It can be called through tcp_release_cb() if socket was owned by user
280  * at the time tcp_v4_err() was called to handle ICMP message.
281  */
282 static void tcp_v4_mtu_reduced(struct sock *sk)
283 {
284 	struct dst_entry *dst;
285 	struct inet_sock *inet = inet_sk(sk);
286 	u32 mtu = tcp_sk(sk)->mtu_info;
287 
288 	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 	 * send out by Linux are always <576bytes so they should go through
290 	 * unfragmented).
291 	 */
292 	if (sk->sk_state == TCP_LISTEN)
293 		return;
294 
295 	dst = inet_csk_update_pmtu(sk, mtu);
296 	if (!dst)
297 		return;
298 
299 	/* Something is about to be wrong... Remember soft error
300 	 * for the case, if this connection will not able to recover.
301 	 */
302 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 		sk->sk_err_soft = EMSGSIZE;
304 
305 	mtu = dst_mtu(dst);
306 
307 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
308 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
309 		tcp_sync_mss(sk, mtu);
310 
311 		/* Resend the TCP packet because it's
312 		 * clear that the old packet has been
313 		 * dropped. This is the new "fast" path mtu
314 		 * discovery.
315 		 */
316 		tcp_simple_retransmit(sk);
317 	} /* else let the usual retransmit timer handle it */
318 }
319 
320 static void do_redirect(struct sk_buff *skb, struct sock *sk)
321 {
322 	struct dst_entry *dst = __sk_dst_check(sk, 0);
323 
324 	if (dst)
325 		dst->ops->redirect(dst, sk, skb);
326 }
327 
328 /*
329  * This routine is called by the ICMP module when it gets some
330  * sort of error condition.  If err < 0 then the socket should
331  * be closed and the error returned to the user.  If err > 0
332  * it's just the icmp type << 8 | icmp code.  After adjustment
333  * header points to the first 8 bytes of the tcp header.  We need
334  * to find the appropriate port.
335  *
336  * The locking strategy used here is very "optimistic". When
337  * someone else accesses the socket the ICMP is just dropped
338  * and for some paths there is no check at all.
339  * A more general error queue to queue errors for later handling
340  * is probably better.
341  *
342  */
343 
344 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
345 {
346 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
347 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
348 	struct inet_connection_sock *icsk;
349 	struct tcp_sock *tp;
350 	struct inet_sock *inet;
351 	const int type = icmp_hdr(icmp_skb)->type;
352 	const int code = icmp_hdr(icmp_skb)->code;
353 	struct sock *sk;
354 	struct sk_buff *skb;
355 	__u32 seq;
356 	__u32 remaining;
357 	int err;
358 	struct net *net = dev_net(icmp_skb->dev);
359 
360 	if (icmp_skb->len < (iph->ihl << 2) + 8) {
361 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
362 		return;
363 	}
364 
365 	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
366 			iph->saddr, th->source, inet_iif(icmp_skb));
367 	if (!sk) {
368 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
369 		return;
370 	}
371 	if (sk->sk_state == TCP_TIME_WAIT) {
372 		inet_twsk_put(inet_twsk(sk));
373 		return;
374 	}
375 
376 	bh_lock_sock(sk);
377 	/* If too many ICMPs get dropped on busy
378 	 * servers this needs to be solved differently.
379 	 * We do take care of PMTU discovery (RFC1191) special case :
380 	 * we can receive locally generated ICMP messages while socket is held.
381 	 */
382 	if (sock_owned_by_user(sk) &&
383 	    type != ICMP_DEST_UNREACH &&
384 	    code != ICMP_FRAG_NEEDED)
385 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
386 
387 	if (sk->sk_state == TCP_CLOSE)
388 		goto out;
389 
390 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
391 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
392 		goto out;
393 	}
394 
395 	icsk = inet_csk(sk);
396 	tp = tcp_sk(sk);
397 	seq = ntohl(th->seq);
398 	if (sk->sk_state != TCP_LISTEN &&
399 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
400 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
401 		goto out;
402 	}
403 
404 	switch (type) {
405 	case ICMP_REDIRECT:
406 		do_redirect(icmp_skb, sk);
407 		goto out;
408 	case ICMP_SOURCE_QUENCH:
409 		/* Just silently ignore these. */
410 		goto out;
411 	case ICMP_PARAMETERPROB:
412 		err = EPROTO;
413 		break;
414 	case ICMP_DEST_UNREACH:
415 		if (code > NR_ICMP_UNREACH)
416 			goto out;
417 
418 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
419 			tp->mtu_info = info;
420 			if (!sock_owned_by_user(sk))
421 				tcp_v4_mtu_reduced(sk);
422 			else
423 				set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
424 			goto out;
425 		}
426 
427 		err = icmp_err_convert[code].errno;
428 		/* check if icmp_skb allows revert of backoff
429 		 * (see draft-zimmermann-tcp-lcd) */
430 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
431 			break;
432 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
433 		    !icsk->icsk_backoff)
434 			break;
435 
436 		if (sock_owned_by_user(sk))
437 			break;
438 
439 		icsk->icsk_backoff--;
440 		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
442 		tcp_bound_rto(sk);
443 
444 		skb = tcp_write_queue_head(sk);
445 		BUG_ON(!skb);
446 
447 		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 				tcp_time_stamp - TCP_SKB_CB(skb)->when);
449 
450 		if (remaining) {
451 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 						  remaining, TCP_RTO_MAX);
453 		} else {
454 			/* RTO revert clocked out retransmission.
455 			 * Will retransmit now */
456 			tcp_retransmit_timer(sk);
457 		}
458 
459 		break;
460 	case ICMP_TIME_EXCEEDED:
461 		err = EHOSTUNREACH;
462 		break;
463 	default:
464 		goto out;
465 	}
466 
467 	switch (sk->sk_state) {
468 		struct request_sock *req, **prev;
469 	case TCP_LISTEN:
470 		if (sock_owned_by_user(sk))
471 			goto out;
472 
473 		req = inet_csk_search_req(sk, &prev, th->dest,
474 					  iph->daddr, iph->saddr);
475 		if (!req)
476 			goto out;
477 
478 		/* ICMPs are not backlogged, hence we cannot get
479 		   an established socket here.
480 		 */
481 		WARN_ON(req->sk);
482 
483 		if (seq != tcp_rsk(req)->snt_isn) {
484 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
485 			goto out;
486 		}
487 
488 		/*
489 		 * Still in SYN_RECV, just remove it silently.
490 		 * There is no good way to pass the error to the newly
491 		 * created socket, and POSIX does not want network
492 		 * errors returned from accept().
493 		 */
494 		inet_csk_reqsk_queue_drop(sk, req, prev);
495 		goto out;
496 
497 	case TCP_SYN_SENT:
498 	case TCP_SYN_RECV:  /* Cannot happen.
499 			       It can f.e. if SYNs crossed.
500 			     */
501 		if (!sock_owned_by_user(sk)) {
502 			sk->sk_err = err;
503 
504 			sk->sk_error_report(sk);
505 
506 			tcp_done(sk);
507 		} else {
508 			sk->sk_err_soft = err;
509 		}
510 		goto out;
511 	}
512 
513 	/* If we've already connected we will keep trying
514 	 * until we time out, or the user gives up.
515 	 *
516 	 * rfc1122 4.2.3.9 allows to consider as hard errors
517 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 	 * but it is obsoleted by pmtu discovery).
519 	 *
520 	 * Note, that in modern internet, where routing is unreliable
521 	 * and in each dark corner broken firewalls sit, sending random
522 	 * errors ordered by their masters even this two messages finally lose
523 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 	 *
525 	 * Now we are in compliance with RFCs.
526 	 *							--ANK (980905)
527 	 */
528 
529 	inet = inet_sk(sk);
530 	if (!sock_owned_by_user(sk) && inet->recverr) {
531 		sk->sk_err = err;
532 		sk->sk_error_report(sk);
533 	} else	{ /* Only an error on timeout */
534 		sk->sk_err_soft = err;
535 	}
536 
537 out:
538 	bh_unlock_sock(sk);
539 	sock_put(sk);
540 }
541 
542 static void __tcp_v4_send_check(struct sk_buff *skb,
543 				__be32 saddr, __be32 daddr)
544 {
545 	struct tcphdr *th = tcp_hdr(skb);
546 
547 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 		skb->csum_start = skb_transport_header(skb) - skb->head;
550 		skb->csum_offset = offsetof(struct tcphdr, check);
551 	} else {
552 		th->check = tcp_v4_check(skb->len, saddr, daddr,
553 					 csum_partial(th,
554 						      th->doff << 2,
555 						      skb->csum));
556 	}
557 }
558 
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
561 {
562 	const struct inet_sock *inet = inet_sk(sk);
563 
564 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565 }
566 EXPORT_SYMBOL(tcp_v4_send_check);
567 
568 int tcp_v4_gso_send_check(struct sk_buff *skb)
569 {
570 	const struct iphdr *iph;
571 	struct tcphdr *th;
572 
573 	if (!pskb_may_pull(skb, sizeof(*th)))
574 		return -EINVAL;
575 
576 	iph = ip_hdr(skb);
577 	th = tcp_hdr(skb);
578 
579 	th->check = 0;
580 	skb->ip_summed = CHECKSUM_PARTIAL;
581 	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
582 	return 0;
583 }
584 
585 /*
586  *	This routine will send an RST to the other tcp.
587  *
588  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589  *		      for reset.
590  *	Answer: if a packet caused RST, it is not for a socket
591  *		existing in our system, if it is matched to a socket,
592  *		it is just duplicate segment or bug in other side's TCP.
593  *		So that we build reply only basing on parameters
594  *		arrived with segment.
595  *	Exception: precedence violation. We do not implement it in any case.
596  */
597 
598 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
599 {
600 	const struct tcphdr *th = tcp_hdr(skb);
601 	struct {
602 		struct tcphdr th;
603 #ifdef CONFIG_TCP_MD5SIG
604 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
605 #endif
606 	} rep;
607 	struct ip_reply_arg arg;
608 #ifdef CONFIG_TCP_MD5SIG
609 	struct tcp_md5sig_key *key;
610 	const __u8 *hash_location = NULL;
611 	unsigned char newhash[16];
612 	int genhash;
613 	struct sock *sk1 = NULL;
614 #endif
615 	struct net *net;
616 
617 	/* Never send a reset in response to a reset. */
618 	if (th->rst)
619 		return;
620 
621 	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
622 		return;
623 
624 	/* Swap the send and the receive. */
625 	memset(&rep, 0, sizeof(rep));
626 	rep.th.dest   = th->source;
627 	rep.th.source = th->dest;
628 	rep.th.doff   = sizeof(struct tcphdr) / 4;
629 	rep.th.rst    = 1;
630 
631 	if (th->ack) {
632 		rep.th.seq = th->ack_seq;
633 	} else {
634 		rep.th.ack = 1;
635 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 				       skb->len - (th->doff << 2));
637 	}
638 
639 	memset(&arg, 0, sizeof(arg));
640 	arg.iov[0].iov_base = (unsigned char *)&rep;
641 	arg.iov[0].iov_len  = sizeof(rep.th);
642 
643 #ifdef CONFIG_TCP_MD5SIG
644 	hash_location = tcp_parse_md5sig_option(th);
645 	if (!sk && hash_location) {
646 		/*
647 		 * active side is lost. Try to find listening socket through
648 		 * source port, and then find md5 key through listening socket.
649 		 * we are not loose security here:
650 		 * Incoming packet is checked with md5 hash with finding key,
651 		 * no RST generated if md5 hash doesn't match.
652 		 */
653 		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 					     &tcp_hashinfo, ip_hdr(skb)->daddr,
655 					     ntohs(th->source), inet_iif(skb));
656 		/* don't send rst if it can't find key */
657 		if (!sk1)
658 			return;
659 		rcu_read_lock();
660 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 					&ip_hdr(skb)->saddr, AF_INET);
662 		if (!key)
663 			goto release_sk1;
664 
665 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 			goto release_sk1;
668 	} else {
669 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 					     &ip_hdr(skb)->saddr,
671 					     AF_INET) : NULL;
672 	}
673 
674 	if (key) {
675 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
676 				   (TCPOPT_NOP << 16) |
677 				   (TCPOPT_MD5SIG << 8) |
678 				   TCPOLEN_MD5SIG);
679 		/* Update length and the length the header thinks exists */
680 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 		rep.th.doff = arg.iov[0].iov_len / 4;
682 
683 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
684 				     key, ip_hdr(skb)->saddr,
685 				     ip_hdr(skb)->daddr, &rep.th);
686 	}
687 #endif
688 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 				      ip_hdr(skb)->saddr, /* XXX */
690 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
691 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
692 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
693 	/* When socket is gone, all binding information is lost.
694 	 * routing might fail in this case. using iif for oif to
695 	 * make sure we can deliver it
696 	 */
697 	arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
698 
699 	net = dev_net(skb_dst(skb)->dev);
700 	arg.tos = ip_hdr(skb)->tos;
701 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
702 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
703 
704 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
706 
707 #ifdef CONFIG_TCP_MD5SIG
708 release_sk1:
709 	if (sk1) {
710 		rcu_read_unlock();
711 		sock_put(sk1);
712 	}
713 #endif
714 }
715 
716 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717    outside socket context is ugly, certainly. What can I do?
718  */
719 
720 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 			    u32 win, u32 ts, int oif,
722 			    struct tcp_md5sig_key *key,
723 			    int reply_flags, u8 tos)
724 {
725 	const struct tcphdr *th = tcp_hdr(skb);
726 	struct {
727 		struct tcphdr th;
728 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
729 #ifdef CONFIG_TCP_MD5SIG
730 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
731 #endif
732 			];
733 	} rep;
734 	struct ip_reply_arg arg;
735 	struct net *net = dev_net(skb_dst(skb)->dev);
736 
737 	memset(&rep.th, 0, sizeof(struct tcphdr));
738 	memset(&arg, 0, sizeof(arg));
739 
740 	arg.iov[0].iov_base = (unsigned char *)&rep;
741 	arg.iov[0].iov_len  = sizeof(rep.th);
742 	if (ts) {
743 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 				   (TCPOPT_TIMESTAMP << 8) |
745 				   TCPOLEN_TIMESTAMP);
746 		rep.opt[1] = htonl(tcp_time_stamp);
747 		rep.opt[2] = htonl(ts);
748 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
749 	}
750 
751 	/* Swap the send and the receive. */
752 	rep.th.dest    = th->source;
753 	rep.th.source  = th->dest;
754 	rep.th.doff    = arg.iov[0].iov_len / 4;
755 	rep.th.seq     = htonl(seq);
756 	rep.th.ack_seq = htonl(ack);
757 	rep.th.ack     = 1;
758 	rep.th.window  = htons(win);
759 
760 #ifdef CONFIG_TCP_MD5SIG
761 	if (key) {
762 		int offset = (ts) ? 3 : 0;
763 
764 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
765 					  (TCPOPT_NOP << 16) |
766 					  (TCPOPT_MD5SIG << 8) |
767 					  TCPOLEN_MD5SIG);
768 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 		rep.th.doff = arg.iov[0].iov_len/4;
770 
771 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
772 				    key, ip_hdr(skb)->saddr,
773 				    ip_hdr(skb)->daddr, &rep.th);
774 	}
775 #endif
776 	arg.flags = reply_flags;
777 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 				      ip_hdr(skb)->saddr, /* XXX */
779 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
781 	if (oif)
782 		arg.bound_dev_if = oif;
783 	arg.tos = tos;
784 	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
785 			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
786 
787 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
788 }
789 
790 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
791 {
792 	struct inet_timewait_sock *tw = inet_twsk(sk);
793 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
794 
795 	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
796 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
797 			tcptw->tw_ts_recent,
798 			tw->tw_bound_dev_if,
799 			tcp_twsk_md5_key(tcptw),
800 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 			tw->tw_tos
802 			);
803 
804 	inet_twsk_put(tw);
805 }
806 
807 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
808 				  struct request_sock *req)
809 {
810 	tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
811 			tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
812 			req->ts_recent,
813 			0,
814 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 					  AF_INET),
816 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 			ip_hdr(skb)->tos);
818 }
819 
820 /*
821  *	Send a SYN-ACK after having received a SYN.
822  *	This still operates on a request_sock only, not on a big
823  *	socket.
824  */
825 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 			      struct request_sock *req,
827 			      struct request_values *rvp,
828 			      u16 queue_mapping,
829 			      bool nocache)
830 {
831 	const struct inet_request_sock *ireq = inet_rsk(req);
832 	struct flowi4 fl4;
833 	int err = -1;
834 	struct sk_buff * skb;
835 
836 	/* First, grab a route. */
837 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
838 		return -1;
839 
840 	skb = tcp_make_synack(sk, dst, req, rvp);
841 
842 	if (skb) {
843 		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
844 
845 		skb_set_queue_mapping(skb, queue_mapping);
846 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
847 					    ireq->rmt_addr,
848 					    ireq->opt);
849 		err = net_xmit_eval(err);
850 	}
851 
852 	return err;
853 }
854 
855 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
856 			      struct request_values *rvp)
857 {
858 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 	return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
860 }
861 
862 /*
863  *	IPv4 request_sock destructor.
864  */
865 static void tcp_v4_reqsk_destructor(struct request_sock *req)
866 {
867 	kfree(inet_rsk(req)->opt);
868 }
869 
870 /*
871  * Return true if a syncookie should be sent
872  */
873 bool tcp_syn_flood_action(struct sock *sk,
874 			 const struct sk_buff *skb,
875 			 const char *proto)
876 {
877 	const char *msg = "Dropping request";
878 	bool want_cookie = false;
879 	struct listen_sock *lopt;
880 
881 
882 
883 #ifdef CONFIG_SYN_COOKIES
884 	if (sysctl_tcp_syncookies) {
885 		msg = "Sending cookies";
886 		want_cookie = true;
887 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
888 	} else
889 #endif
890 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
891 
892 	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 	if (!lopt->synflood_warned) {
894 		lopt->synflood_warned = 1;
895 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
896 			proto, ntohs(tcp_hdr(skb)->dest), msg);
897 	}
898 	return want_cookie;
899 }
900 EXPORT_SYMBOL(tcp_syn_flood_action);
901 
902 /*
903  * Save and compile IPv4 options into the request_sock if needed.
904  */
905 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
906 						  struct sk_buff *skb)
907 {
908 	const struct ip_options *opt = &(IPCB(skb)->opt);
909 	struct ip_options_rcu *dopt = NULL;
910 
911 	if (opt && opt->optlen) {
912 		int opt_size = sizeof(*dopt) + opt->optlen;
913 
914 		dopt = kmalloc(opt_size, GFP_ATOMIC);
915 		if (dopt) {
916 			if (ip_options_echo(&dopt->opt, skb)) {
917 				kfree(dopt);
918 				dopt = NULL;
919 			}
920 		}
921 	}
922 	return dopt;
923 }
924 
925 #ifdef CONFIG_TCP_MD5SIG
926 /*
927  * RFC2385 MD5 checksumming requires a mapping of
928  * IP address->MD5 Key.
929  * We need to maintain these in the sk structure.
930  */
931 
932 /* Find the Key structure for an address.  */
933 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
934 					 const union tcp_md5_addr *addr,
935 					 int family)
936 {
937 	struct tcp_sock *tp = tcp_sk(sk);
938 	struct tcp_md5sig_key *key;
939 	struct hlist_node *pos;
940 	unsigned int size = sizeof(struct in_addr);
941 	struct tcp_md5sig_info *md5sig;
942 
943 	/* caller either holds rcu_read_lock() or socket lock */
944 	md5sig = rcu_dereference_check(tp->md5sig_info,
945 				       sock_owned_by_user(sk) ||
946 				       lockdep_is_held(&sk->sk_lock.slock));
947 	if (!md5sig)
948 		return NULL;
949 #if IS_ENABLED(CONFIG_IPV6)
950 	if (family == AF_INET6)
951 		size = sizeof(struct in6_addr);
952 #endif
953 	hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
954 		if (key->family != family)
955 			continue;
956 		if (!memcmp(&key->addr, addr, size))
957 			return key;
958 	}
959 	return NULL;
960 }
961 EXPORT_SYMBOL(tcp_md5_do_lookup);
962 
963 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
964 					 struct sock *addr_sk)
965 {
966 	union tcp_md5_addr *addr;
967 
968 	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 	return tcp_md5_do_lookup(sk, addr, AF_INET);
970 }
971 EXPORT_SYMBOL(tcp_v4_md5_lookup);
972 
973 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
974 						      struct request_sock *req)
975 {
976 	union tcp_md5_addr *addr;
977 
978 	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 	return tcp_md5_do_lookup(sk, addr, AF_INET);
980 }
981 
982 /* This can be called on a newly created socket, from other files */
983 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
984 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
985 {
986 	/* Add Key to the list */
987 	struct tcp_md5sig_key *key;
988 	struct tcp_sock *tp = tcp_sk(sk);
989 	struct tcp_md5sig_info *md5sig;
990 
991 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
992 	if (key) {
993 		/* Pre-existing entry - just update that one. */
994 		memcpy(key->key, newkey, newkeylen);
995 		key->keylen = newkeylen;
996 		return 0;
997 	}
998 
999 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1000 					   sock_owned_by_user(sk));
1001 	if (!md5sig) {
1002 		md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 		if (!md5sig)
1004 			return -ENOMEM;
1005 
1006 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007 		INIT_HLIST_HEAD(&md5sig->head);
1008 		rcu_assign_pointer(tp->md5sig_info, md5sig);
1009 	}
1010 
1011 	key = sock_kmalloc(sk, sizeof(*key), gfp);
1012 	if (!key)
1013 		return -ENOMEM;
1014 	if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1015 		sock_kfree_s(sk, key, sizeof(*key));
1016 		return -ENOMEM;
1017 	}
1018 
1019 	memcpy(key->key, newkey, newkeylen);
1020 	key->keylen = newkeylen;
1021 	key->family = family;
1022 	memcpy(&key->addr, addr,
1023 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 				      sizeof(struct in_addr));
1025 	hlist_add_head_rcu(&key->node, &md5sig->head);
1026 	return 0;
1027 }
1028 EXPORT_SYMBOL(tcp_md5_do_add);
1029 
1030 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1031 {
1032 	struct tcp_sock *tp = tcp_sk(sk);
1033 	struct tcp_md5sig_key *key;
1034 	struct tcp_md5sig_info *md5sig;
1035 
1036 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1037 	if (!key)
1038 		return -ENOENT;
1039 	hlist_del_rcu(&key->node);
1040 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1041 	kfree_rcu(key, rcu);
1042 	md5sig = rcu_dereference_protected(tp->md5sig_info,
1043 					   sock_owned_by_user(sk));
1044 	if (hlist_empty(&md5sig->head))
1045 		tcp_free_md5sig_pool();
1046 	return 0;
1047 }
1048 EXPORT_SYMBOL(tcp_md5_do_del);
1049 
1050 void tcp_clear_md5_list(struct sock *sk)
1051 {
1052 	struct tcp_sock *tp = tcp_sk(sk);
1053 	struct tcp_md5sig_key *key;
1054 	struct hlist_node *pos, *n;
1055 	struct tcp_md5sig_info *md5sig;
1056 
1057 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1058 
1059 	if (!hlist_empty(&md5sig->head))
1060 		tcp_free_md5sig_pool();
1061 	hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1062 		hlist_del_rcu(&key->node);
1063 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064 		kfree_rcu(key, rcu);
1065 	}
1066 }
1067 
1068 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1069 				 int optlen)
1070 {
1071 	struct tcp_md5sig cmd;
1072 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1073 
1074 	if (optlen < sizeof(cmd))
1075 		return -EINVAL;
1076 
1077 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1078 		return -EFAULT;
1079 
1080 	if (sin->sin_family != AF_INET)
1081 		return -EINVAL;
1082 
1083 	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1084 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1085 				      AF_INET);
1086 
1087 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1088 		return -EINVAL;
1089 
1090 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092 			      GFP_KERNEL);
1093 }
1094 
1095 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096 					__be32 daddr, __be32 saddr, int nbytes)
1097 {
1098 	struct tcp4_pseudohdr *bp;
1099 	struct scatterlist sg;
1100 
1101 	bp = &hp->md5_blk.ip4;
1102 
1103 	/*
1104 	 * 1. the TCP pseudo-header (in the order: source IP address,
1105 	 * destination IP address, zero-padded protocol number, and
1106 	 * segment length)
1107 	 */
1108 	bp->saddr = saddr;
1109 	bp->daddr = daddr;
1110 	bp->pad = 0;
1111 	bp->protocol = IPPROTO_TCP;
1112 	bp->len = cpu_to_be16(nbytes);
1113 
1114 	sg_init_one(&sg, bp, sizeof(*bp));
1115 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1116 }
1117 
1118 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1119 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1120 {
1121 	struct tcp_md5sig_pool *hp;
1122 	struct hash_desc *desc;
1123 
1124 	hp = tcp_get_md5sig_pool();
1125 	if (!hp)
1126 		goto clear_hash_noput;
1127 	desc = &hp->md5_desc;
1128 
1129 	if (crypto_hash_init(desc))
1130 		goto clear_hash;
1131 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1132 		goto clear_hash;
1133 	if (tcp_md5_hash_header(hp, th))
1134 		goto clear_hash;
1135 	if (tcp_md5_hash_key(hp, key))
1136 		goto clear_hash;
1137 	if (crypto_hash_final(desc, md5_hash))
1138 		goto clear_hash;
1139 
1140 	tcp_put_md5sig_pool();
1141 	return 0;
1142 
1143 clear_hash:
1144 	tcp_put_md5sig_pool();
1145 clear_hash_noput:
1146 	memset(md5_hash, 0, 16);
1147 	return 1;
1148 }
1149 
1150 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1151 			const struct sock *sk, const struct request_sock *req,
1152 			const struct sk_buff *skb)
1153 {
1154 	struct tcp_md5sig_pool *hp;
1155 	struct hash_desc *desc;
1156 	const struct tcphdr *th = tcp_hdr(skb);
1157 	__be32 saddr, daddr;
1158 
1159 	if (sk) {
1160 		saddr = inet_sk(sk)->inet_saddr;
1161 		daddr = inet_sk(sk)->inet_daddr;
1162 	} else if (req) {
1163 		saddr = inet_rsk(req)->loc_addr;
1164 		daddr = inet_rsk(req)->rmt_addr;
1165 	} else {
1166 		const struct iphdr *iph = ip_hdr(skb);
1167 		saddr = iph->saddr;
1168 		daddr = iph->daddr;
1169 	}
1170 
1171 	hp = tcp_get_md5sig_pool();
1172 	if (!hp)
1173 		goto clear_hash_noput;
1174 	desc = &hp->md5_desc;
1175 
1176 	if (crypto_hash_init(desc))
1177 		goto clear_hash;
1178 
1179 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1180 		goto clear_hash;
1181 	if (tcp_md5_hash_header(hp, th))
1182 		goto clear_hash;
1183 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 		goto clear_hash;
1185 	if (tcp_md5_hash_key(hp, key))
1186 		goto clear_hash;
1187 	if (crypto_hash_final(desc, md5_hash))
1188 		goto clear_hash;
1189 
1190 	tcp_put_md5sig_pool();
1191 	return 0;
1192 
1193 clear_hash:
1194 	tcp_put_md5sig_pool();
1195 clear_hash_noput:
1196 	memset(md5_hash, 0, 16);
1197 	return 1;
1198 }
1199 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1200 
1201 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1202 {
1203 	/*
1204 	 * This gets called for each TCP segment that arrives
1205 	 * so we want to be efficient.
1206 	 * We have 3 drop cases:
1207 	 * o No MD5 hash and one expected.
1208 	 * o MD5 hash and we're not expecting one.
1209 	 * o MD5 hash and its wrong.
1210 	 */
1211 	const __u8 *hash_location = NULL;
1212 	struct tcp_md5sig_key *hash_expected;
1213 	const struct iphdr *iph = ip_hdr(skb);
1214 	const struct tcphdr *th = tcp_hdr(skb);
1215 	int genhash;
1216 	unsigned char newhash[16];
1217 
1218 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219 					  AF_INET);
1220 	hash_location = tcp_parse_md5sig_option(th);
1221 
1222 	/* We've parsed the options - do we have a hash? */
1223 	if (!hash_expected && !hash_location)
1224 		return false;
1225 
1226 	if (hash_expected && !hash_location) {
1227 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1228 		return true;
1229 	}
1230 
1231 	if (!hash_expected && hash_location) {
1232 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1233 		return true;
1234 	}
1235 
1236 	/* Okay, so this is hash_expected and hash_location -
1237 	 * so we need to calculate the checksum.
1238 	 */
1239 	genhash = tcp_v4_md5_hash_skb(newhash,
1240 				      hash_expected,
1241 				      NULL, NULL, skb);
1242 
1243 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1244 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245 				     &iph->saddr, ntohs(th->source),
1246 				     &iph->daddr, ntohs(th->dest),
1247 				     genhash ? " tcp_v4_calc_md5_hash failed"
1248 				     : "");
1249 		return true;
1250 	}
1251 	return false;
1252 }
1253 
1254 #endif
1255 
1256 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1257 	.family		=	PF_INET,
1258 	.obj_size	=	sizeof(struct tcp_request_sock),
1259 	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1260 	.send_ack	=	tcp_v4_reqsk_send_ack,
1261 	.destructor	=	tcp_v4_reqsk_destructor,
1262 	.send_reset	=	tcp_v4_send_reset,
1263 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
1264 };
1265 
1266 #ifdef CONFIG_TCP_MD5SIG
1267 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1268 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1269 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1270 };
1271 #endif
1272 
1273 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274 {
1275 	struct tcp_extend_values tmp_ext;
1276 	struct tcp_options_received tmp_opt;
1277 	const u8 *hash_location;
1278 	struct request_sock *req;
1279 	struct inet_request_sock *ireq;
1280 	struct tcp_sock *tp = tcp_sk(sk);
1281 	struct dst_entry *dst = NULL;
1282 	__be32 saddr = ip_hdr(skb)->saddr;
1283 	__be32 daddr = ip_hdr(skb)->daddr;
1284 	__u32 isn = TCP_SKB_CB(skb)->when;
1285 	bool want_cookie = false;
1286 
1287 	/* Never answer to SYNs send to broadcast or multicast */
1288 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1289 		goto drop;
1290 
1291 	/* TW buckets are converted to open requests without
1292 	 * limitations, they conserve resources and peer is
1293 	 * evidently real one.
1294 	 */
1295 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1296 		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1297 		if (!want_cookie)
1298 			goto drop;
1299 	}
1300 
1301 	/* Accept backlog is full. If we have already queued enough
1302 	 * of warm entries in syn queue, drop request. It is better than
1303 	 * clogging syn queue with openreqs with exponentially increasing
1304 	 * timeout.
1305 	 */
1306 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1307 		goto drop;
1308 
1309 	req = inet_reqsk_alloc(&tcp_request_sock_ops);
1310 	if (!req)
1311 		goto drop;
1312 
1313 #ifdef CONFIG_TCP_MD5SIG
1314 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1315 #endif
1316 
1317 	tcp_clear_options(&tmp_opt);
1318 	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1319 	tmp_opt.user_mss  = tp->rx_opt.user_mss;
1320 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1321 
1322 	if (tmp_opt.cookie_plus > 0 &&
1323 	    tmp_opt.saw_tstamp &&
1324 	    !tp->rx_opt.cookie_out_never &&
1325 	    (sysctl_tcp_cookie_size > 0 ||
1326 	     (tp->cookie_values != NULL &&
1327 	      tp->cookie_values->cookie_desired > 0))) {
1328 		u8 *c;
1329 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331 
1332 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333 			goto drop_and_release;
1334 
1335 		/* Secret recipe starts with IP addresses */
1336 		*mess++ ^= (__force u32)daddr;
1337 		*mess++ ^= (__force u32)saddr;
1338 
1339 		/* plus variable length Initiator Cookie */
1340 		c = (u8 *)mess;
1341 		while (l-- > 0)
1342 			*c++ ^= *hash_location++;
1343 
1344 		want_cookie = false;	/* not our kind of cookie */
1345 		tmp_ext.cookie_out_never = 0; /* false */
1346 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347 	} else if (!tp->rx_opt.cookie_in_always) {
1348 		/* redundant indications, but ensure initialization. */
1349 		tmp_ext.cookie_out_never = 1; /* true */
1350 		tmp_ext.cookie_plus = 0;
1351 	} else {
1352 		goto drop_and_release;
1353 	}
1354 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1355 
1356 	if (want_cookie && !tmp_opt.saw_tstamp)
1357 		tcp_clear_options(&tmp_opt);
1358 
1359 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1360 	tcp_openreq_init(req, &tmp_opt, skb);
1361 
1362 	ireq = inet_rsk(req);
1363 	ireq->loc_addr = daddr;
1364 	ireq->rmt_addr = saddr;
1365 	ireq->no_srccheck = inet_sk(sk)->transparent;
1366 	ireq->opt = tcp_v4_save_options(sk, skb);
1367 
1368 	if (security_inet_conn_request(sk, skb, req))
1369 		goto drop_and_free;
1370 
1371 	if (!want_cookie || tmp_opt.tstamp_ok)
1372 		TCP_ECN_create_request(req, skb);
1373 
1374 	if (want_cookie) {
1375 		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376 		req->cookie_ts = tmp_opt.tstamp_ok;
1377 	} else if (!isn) {
1378 		struct flowi4 fl4;
1379 
1380 		/* VJ's idea. We save last timestamp seen
1381 		 * from the destination in peer table, when entering
1382 		 * state TIME-WAIT, and check against it before
1383 		 * accepting new connection request.
1384 		 *
1385 		 * If "isn" is not zero, this request hit alive
1386 		 * timewait bucket, so that all the necessary checks
1387 		 * are made in the function processing timewait state.
1388 		 */
1389 		if (tmp_opt.saw_tstamp &&
1390 		    tcp_death_row.sysctl_tw_recycle &&
1391 		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1392 		    fl4.daddr == saddr) {
1393 			if (!tcp_peer_is_proven(req, dst, true)) {
1394 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1395 				goto drop_and_release;
1396 			}
1397 		}
1398 		/* Kill the following clause, if you dislike this way. */
1399 		else if (!sysctl_tcp_syncookies &&
1400 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1401 			  (sysctl_max_syn_backlog >> 2)) &&
1402 			 !tcp_peer_is_proven(req, dst, false)) {
1403 			/* Without syncookies last quarter of
1404 			 * backlog is filled with destinations,
1405 			 * proven to be alive.
1406 			 * It means that we continue to communicate
1407 			 * to destinations, already remembered
1408 			 * to the moment of synflood.
1409 			 */
1410 			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1411 				       &saddr, ntohs(tcp_hdr(skb)->source));
1412 			goto drop_and_release;
1413 		}
1414 
1415 		isn = tcp_v4_init_sequence(skb);
1416 	}
1417 	tcp_rsk(req)->snt_isn = isn;
1418 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1419 
1420 	if (tcp_v4_send_synack(sk, dst, req,
1421 			       (struct request_values *)&tmp_ext,
1422 			       skb_get_queue_mapping(skb),
1423 			       want_cookie) ||
1424 	    want_cookie)
1425 		goto drop_and_free;
1426 
1427 	inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1428 	return 0;
1429 
1430 drop_and_release:
1431 	dst_release(dst);
1432 drop_and_free:
1433 	reqsk_free(req);
1434 drop:
1435 	return 0;
1436 }
1437 EXPORT_SYMBOL(tcp_v4_conn_request);
1438 
1439 
1440 /*
1441  * The three way handshake has completed - we got a valid synack -
1442  * now create the new socket.
1443  */
1444 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1445 				  struct request_sock *req,
1446 				  struct dst_entry *dst)
1447 {
1448 	struct inet_request_sock *ireq;
1449 	struct inet_sock *newinet;
1450 	struct tcp_sock *newtp;
1451 	struct sock *newsk;
1452 #ifdef CONFIG_TCP_MD5SIG
1453 	struct tcp_md5sig_key *key;
1454 #endif
1455 	struct ip_options_rcu *inet_opt;
1456 
1457 	if (sk_acceptq_is_full(sk))
1458 		goto exit_overflow;
1459 
1460 	newsk = tcp_create_openreq_child(sk, req, skb);
1461 	if (!newsk)
1462 		goto exit_nonewsk;
1463 
1464 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1465 
1466 	newtp		      = tcp_sk(newsk);
1467 	newinet		      = inet_sk(newsk);
1468 	ireq		      = inet_rsk(req);
1469 	newinet->inet_daddr   = ireq->rmt_addr;
1470 	newinet->inet_rcv_saddr = ireq->loc_addr;
1471 	newinet->inet_saddr	      = ireq->loc_addr;
1472 	inet_opt	      = ireq->opt;
1473 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1474 	ireq->opt	      = NULL;
1475 	newinet->mc_index     = inet_iif(skb);
1476 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1477 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1478 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1479 	if (inet_opt)
1480 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1481 	newinet->inet_id = newtp->write_seq ^ jiffies;
1482 
1483 	if (!dst) {
1484 		dst = inet_csk_route_child_sock(sk, newsk, req);
1485 		if (!dst)
1486 			goto put_and_exit;
1487 	} else {
1488 		/* syncookie case : see end of cookie_v4_check() */
1489 	}
1490 	sk_setup_caps(newsk, dst);
1491 
1492 	tcp_mtup_init(newsk);
1493 	tcp_sync_mss(newsk, dst_mtu(dst));
1494 	newtp->advmss = dst_metric_advmss(dst);
1495 	if (tcp_sk(sk)->rx_opt.user_mss &&
1496 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1497 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1498 
1499 	tcp_initialize_rcv_mss(newsk);
1500 	if (tcp_rsk(req)->snt_synack)
1501 		tcp_valid_rtt_meas(newsk,
1502 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1503 	newtp->total_retrans = req->retrans;
1504 
1505 #ifdef CONFIG_TCP_MD5SIG
1506 	/* Copy over the MD5 key from the original socket */
1507 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1508 				AF_INET);
1509 	if (key != NULL) {
1510 		/*
1511 		 * We're using one, so create a matching key
1512 		 * on the newsk structure. If we fail to get
1513 		 * memory, then we end up not copying the key
1514 		 * across. Shucks.
1515 		 */
1516 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1517 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1518 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1519 	}
1520 #endif
1521 
1522 	if (__inet_inherit_port(sk, newsk) < 0)
1523 		goto put_and_exit;
1524 	__inet_hash_nolisten(newsk, NULL);
1525 
1526 	return newsk;
1527 
1528 exit_overflow:
1529 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1530 exit_nonewsk:
1531 	dst_release(dst);
1532 exit:
1533 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1534 	return NULL;
1535 put_and_exit:
1536 	tcp_clear_xmit_timers(newsk);
1537 	tcp_cleanup_congestion_control(newsk);
1538 	bh_unlock_sock(newsk);
1539 	sock_put(newsk);
1540 	goto exit;
1541 }
1542 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1543 
1544 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1545 {
1546 	struct tcphdr *th = tcp_hdr(skb);
1547 	const struct iphdr *iph = ip_hdr(skb);
1548 	struct sock *nsk;
1549 	struct request_sock **prev;
1550 	/* Find possible connection requests. */
1551 	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1552 						       iph->saddr, iph->daddr);
1553 	if (req)
1554 		return tcp_check_req(sk, skb, req, prev);
1555 
1556 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1557 			th->source, iph->daddr, th->dest, inet_iif(skb));
1558 
1559 	if (nsk) {
1560 		if (nsk->sk_state != TCP_TIME_WAIT) {
1561 			bh_lock_sock(nsk);
1562 			return nsk;
1563 		}
1564 		inet_twsk_put(inet_twsk(nsk));
1565 		return NULL;
1566 	}
1567 
1568 #ifdef CONFIG_SYN_COOKIES
1569 	if (!th->syn)
1570 		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1571 #endif
1572 	return sk;
1573 }
1574 
1575 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1576 {
1577 	const struct iphdr *iph = ip_hdr(skb);
1578 
1579 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1580 		if (!tcp_v4_check(skb->len, iph->saddr,
1581 				  iph->daddr, skb->csum)) {
1582 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 			return 0;
1584 		}
1585 	}
1586 
1587 	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1588 				       skb->len, IPPROTO_TCP, 0);
1589 
1590 	if (skb->len <= 76) {
1591 		return __skb_checksum_complete(skb);
1592 	}
1593 	return 0;
1594 }
1595 
1596 
1597 /* The socket must have it's spinlock held when we get
1598  * here.
1599  *
1600  * We have a potential double-lock case here, so even when
1601  * doing backlog processing we use the BH locking scheme.
1602  * This is because we cannot sleep with the original spinlock
1603  * held.
1604  */
1605 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1606 {
1607 	struct sock *rsk;
1608 #ifdef CONFIG_TCP_MD5SIG
1609 	/*
1610 	 * We really want to reject the packet as early as possible
1611 	 * if:
1612 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1613 	 *  o There is an MD5 option and we're not expecting one
1614 	 */
1615 	if (tcp_v4_inbound_md5_hash(sk, skb))
1616 		goto discard;
1617 #endif
1618 
1619 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1620 		sock_rps_save_rxhash(sk, skb);
1621 		if (sk->sk_rx_dst) {
1622 			struct dst_entry *dst = sk->sk_rx_dst;
1623 			if (dst->ops->check(dst, 0) == NULL) {
1624 				dst_release(dst);
1625 				sk->sk_rx_dst = NULL;
1626 			}
1627 		}
1628 		if (unlikely(sk->sk_rx_dst == NULL)) {
1629 			struct inet_sock *icsk = inet_sk(sk);
1630 			struct rtable *rt = skb_rtable(skb);
1631 
1632 			sk->sk_rx_dst = dst_clone(&rt->dst);
1633 			icsk->rx_dst_ifindex = inet_iif(skb);
1634 		}
1635 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1636 			rsk = sk;
1637 			goto reset;
1638 		}
1639 		return 0;
1640 	}
1641 
1642 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1643 		goto csum_err;
1644 
1645 	if (sk->sk_state == TCP_LISTEN) {
1646 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1647 		if (!nsk)
1648 			goto discard;
1649 
1650 		if (nsk != sk) {
1651 			sock_rps_save_rxhash(nsk, skb);
1652 			if (tcp_child_process(sk, nsk, skb)) {
1653 				rsk = nsk;
1654 				goto reset;
1655 			}
1656 			return 0;
1657 		}
1658 	} else
1659 		sock_rps_save_rxhash(sk, skb);
1660 
1661 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1662 		rsk = sk;
1663 		goto reset;
1664 	}
1665 	return 0;
1666 
1667 reset:
1668 	tcp_v4_send_reset(rsk, skb);
1669 discard:
1670 	kfree_skb(skb);
1671 	/* Be careful here. If this function gets more complicated and
1672 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1673 	 * might be destroyed here. This current version compiles correctly,
1674 	 * but you have been warned.
1675 	 */
1676 	return 0;
1677 
1678 csum_err:
1679 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1680 	goto discard;
1681 }
1682 EXPORT_SYMBOL(tcp_v4_do_rcv);
1683 
1684 void tcp_v4_early_demux(struct sk_buff *skb)
1685 {
1686 	struct net *net = dev_net(skb->dev);
1687 	const struct iphdr *iph;
1688 	const struct tcphdr *th;
1689 	struct net_device *dev;
1690 	struct sock *sk;
1691 
1692 	if (skb->pkt_type != PACKET_HOST)
1693 		return;
1694 
1695 	if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1696 		return;
1697 
1698 	iph = ip_hdr(skb);
1699 	th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1700 
1701 	if (th->doff < sizeof(struct tcphdr) / 4)
1702 		return;
1703 
1704 	if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
1705 		return;
1706 
1707 	dev = skb->dev;
1708 	sk = __inet_lookup_established(net, &tcp_hashinfo,
1709 				       iph->saddr, th->source,
1710 				       iph->daddr, ntohs(th->dest),
1711 				       dev->ifindex);
1712 	if (sk) {
1713 		skb->sk = sk;
1714 		skb->destructor = sock_edemux;
1715 		if (sk->sk_state != TCP_TIME_WAIT) {
1716 			struct dst_entry *dst = sk->sk_rx_dst;
1717 			struct inet_sock *icsk = inet_sk(sk);
1718 			if (dst)
1719 				dst = dst_check(dst, 0);
1720 			if (dst &&
1721 			    icsk->rx_dst_ifindex == dev->ifindex)
1722 				skb_dst_set_noref(skb, dst);
1723 		}
1724 	}
1725 }
1726 
1727 /*
1728  *	From tcp_input.c
1729  */
1730 
1731 int tcp_v4_rcv(struct sk_buff *skb)
1732 {
1733 	const struct iphdr *iph;
1734 	const struct tcphdr *th;
1735 	struct sock *sk;
1736 	int ret;
1737 	struct net *net = dev_net(skb->dev);
1738 
1739 	if (skb->pkt_type != PACKET_HOST)
1740 		goto discard_it;
1741 
1742 	/* Count it even if it's bad */
1743 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1744 
1745 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1746 		goto discard_it;
1747 
1748 	th = tcp_hdr(skb);
1749 
1750 	if (th->doff < sizeof(struct tcphdr) / 4)
1751 		goto bad_packet;
1752 	if (!pskb_may_pull(skb, th->doff * 4))
1753 		goto discard_it;
1754 
1755 	/* An explanation is required here, I think.
1756 	 * Packet length and doff are validated by header prediction,
1757 	 * provided case of th->doff==0 is eliminated.
1758 	 * So, we defer the checks. */
1759 	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1760 		goto bad_packet;
1761 
1762 	th = tcp_hdr(skb);
1763 	iph = ip_hdr(skb);
1764 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1765 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1766 				    skb->len - th->doff * 4);
1767 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1768 	TCP_SKB_CB(skb)->when	 = 0;
1769 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1770 	TCP_SKB_CB(skb)->sacked	 = 0;
1771 
1772 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1773 	if (!sk)
1774 		goto no_tcp_socket;
1775 
1776 process:
1777 	if (sk->sk_state == TCP_TIME_WAIT)
1778 		goto do_time_wait;
1779 
1780 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1781 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1782 		goto discard_and_relse;
1783 	}
1784 
1785 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1786 		goto discard_and_relse;
1787 	nf_reset(skb);
1788 
1789 	if (sk_filter(sk, skb))
1790 		goto discard_and_relse;
1791 
1792 	skb->dev = NULL;
1793 
1794 	bh_lock_sock_nested(sk);
1795 	ret = 0;
1796 	if (!sock_owned_by_user(sk)) {
1797 #ifdef CONFIG_NET_DMA
1798 		struct tcp_sock *tp = tcp_sk(sk);
1799 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1800 			tp->ucopy.dma_chan = net_dma_find_channel();
1801 		if (tp->ucopy.dma_chan)
1802 			ret = tcp_v4_do_rcv(sk, skb);
1803 		else
1804 #endif
1805 		{
1806 			if (!tcp_prequeue(sk, skb))
1807 				ret = tcp_v4_do_rcv(sk, skb);
1808 		}
1809 	} else if (unlikely(sk_add_backlog(sk, skb,
1810 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1811 		bh_unlock_sock(sk);
1812 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1813 		goto discard_and_relse;
1814 	}
1815 	bh_unlock_sock(sk);
1816 
1817 	sock_put(sk);
1818 
1819 	return ret;
1820 
1821 no_tcp_socket:
1822 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1823 		goto discard_it;
1824 
1825 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1826 bad_packet:
1827 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1828 	} else {
1829 		tcp_v4_send_reset(NULL, skb);
1830 	}
1831 
1832 discard_it:
1833 	/* Discard frame. */
1834 	kfree_skb(skb);
1835 	return 0;
1836 
1837 discard_and_relse:
1838 	sock_put(sk);
1839 	goto discard_it;
1840 
1841 do_time_wait:
1842 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1843 		inet_twsk_put(inet_twsk(sk));
1844 		goto discard_it;
1845 	}
1846 
1847 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1848 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1849 		inet_twsk_put(inet_twsk(sk));
1850 		goto discard_it;
1851 	}
1852 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1853 	case TCP_TW_SYN: {
1854 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1855 							&tcp_hashinfo,
1856 							iph->daddr, th->dest,
1857 							inet_iif(skb));
1858 		if (sk2) {
1859 			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1860 			inet_twsk_put(inet_twsk(sk));
1861 			sk = sk2;
1862 			goto process;
1863 		}
1864 		/* Fall through to ACK */
1865 	}
1866 	case TCP_TW_ACK:
1867 		tcp_v4_timewait_ack(sk, skb);
1868 		break;
1869 	case TCP_TW_RST:
1870 		goto no_tcp_socket;
1871 	case TCP_TW_SUCCESS:;
1872 	}
1873 	goto discard_it;
1874 }
1875 
1876 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1877 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1878 	.twsk_unique	= tcp_twsk_unique,
1879 	.twsk_destructor= tcp_twsk_destructor,
1880 };
1881 
1882 const struct inet_connection_sock_af_ops ipv4_specific = {
1883 	.queue_xmit	   = ip_queue_xmit,
1884 	.send_check	   = tcp_v4_send_check,
1885 	.rebuild_header	   = inet_sk_rebuild_header,
1886 	.conn_request	   = tcp_v4_conn_request,
1887 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1888 	.net_header_len	   = sizeof(struct iphdr),
1889 	.setsockopt	   = ip_setsockopt,
1890 	.getsockopt	   = ip_getsockopt,
1891 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1892 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1893 	.bind_conflict	   = inet_csk_bind_conflict,
1894 #ifdef CONFIG_COMPAT
1895 	.compat_setsockopt = compat_ip_setsockopt,
1896 	.compat_getsockopt = compat_ip_getsockopt,
1897 #endif
1898 };
1899 EXPORT_SYMBOL(ipv4_specific);
1900 
1901 #ifdef CONFIG_TCP_MD5SIG
1902 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1903 	.md5_lookup		= tcp_v4_md5_lookup,
1904 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1905 	.md5_parse		= tcp_v4_parse_md5_keys,
1906 };
1907 #endif
1908 
1909 /* NOTE: A lot of things set to zero explicitly by call to
1910  *       sk_alloc() so need not be done here.
1911  */
1912 static int tcp_v4_init_sock(struct sock *sk)
1913 {
1914 	struct inet_connection_sock *icsk = inet_csk(sk);
1915 
1916 	tcp_init_sock(sk);
1917 
1918 	icsk->icsk_af_ops = &ipv4_specific;
1919 
1920 #ifdef CONFIG_TCP_MD5SIG
1921 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1922 #endif
1923 
1924 	return 0;
1925 }
1926 
1927 void tcp_v4_destroy_sock(struct sock *sk)
1928 {
1929 	struct tcp_sock *tp = tcp_sk(sk);
1930 
1931 	tcp_clear_xmit_timers(sk);
1932 
1933 	tcp_cleanup_congestion_control(sk);
1934 
1935 	/* Cleanup up the write buffer. */
1936 	tcp_write_queue_purge(sk);
1937 
1938 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1939 	__skb_queue_purge(&tp->out_of_order_queue);
1940 
1941 #ifdef CONFIG_TCP_MD5SIG
1942 	/* Clean up the MD5 key list, if any */
1943 	if (tp->md5sig_info) {
1944 		tcp_clear_md5_list(sk);
1945 		kfree_rcu(tp->md5sig_info, rcu);
1946 		tp->md5sig_info = NULL;
1947 	}
1948 #endif
1949 
1950 #ifdef CONFIG_NET_DMA
1951 	/* Cleans up our sk_async_wait_queue */
1952 	__skb_queue_purge(&sk->sk_async_wait_queue);
1953 #endif
1954 
1955 	/* Clean prequeue, it must be empty really */
1956 	__skb_queue_purge(&tp->ucopy.prequeue);
1957 
1958 	/* Clean up a referenced TCP bind bucket. */
1959 	if (inet_csk(sk)->icsk_bind_hash)
1960 		inet_put_port(sk);
1961 
1962 	/*
1963 	 * If sendmsg cached page exists, toss it.
1964 	 */
1965 	if (sk->sk_sndmsg_page) {
1966 		__free_page(sk->sk_sndmsg_page);
1967 		sk->sk_sndmsg_page = NULL;
1968 	}
1969 
1970 	/* TCP Cookie Transactions */
1971 	if (tp->cookie_values != NULL) {
1972 		kref_put(&tp->cookie_values->kref,
1973 			 tcp_cookie_values_release);
1974 		tp->cookie_values = NULL;
1975 	}
1976 
1977 	/* If socket is aborted during connect operation */
1978 	tcp_free_fastopen_req(tp);
1979 
1980 	sk_sockets_allocated_dec(sk);
1981 	sock_release_memcg(sk);
1982 }
1983 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1984 
1985 #ifdef CONFIG_PROC_FS
1986 /* Proc filesystem TCP sock list dumping. */
1987 
1988 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1989 {
1990 	return hlist_nulls_empty(head) ? NULL :
1991 		list_entry(head->first, struct inet_timewait_sock, tw_node);
1992 }
1993 
1994 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1995 {
1996 	return !is_a_nulls(tw->tw_node.next) ?
1997 		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1998 }
1999 
2000 /*
2001  * Get next listener socket follow cur.  If cur is NULL, get first socket
2002  * starting from bucket given in st->bucket; when st->bucket is zero the
2003  * very first socket in the hash table is returned.
2004  */
2005 static void *listening_get_next(struct seq_file *seq, void *cur)
2006 {
2007 	struct inet_connection_sock *icsk;
2008 	struct hlist_nulls_node *node;
2009 	struct sock *sk = cur;
2010 	struct inet_listen_hashbucket *ilb;
2011 	struct tcp_iter_state *st = seq->private;
2012 	struct net *net = seq_file_net(seq);
2013 
2014 	if (!sk) {
2015 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2016 		spin_lock_bh(&ilb->lock);
2017 		sk = sk_nulls_head(&ilb->head);
2018 		st->offset = 0;
2019 		goto get_sk;
2020 	}
2021 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
2022 	++st->num;
2023 	++st->offset;
2024 
2025 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2026 		struct request_sock *req = cur;
2027 
2028 		icsk = inet_csk(st->syn_wait_sk);
2029 		req = req->dl_next;
2030 		while (1) {
2031 			while (req) {
2032 				if (req->rsk_ops->family == st->family) {
2033 					cur = req;
2034 					goto out;
2035 				}
2036 				req = req->dl_next;
2037 			}
2038 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2039 				break;
2040 get_req:
2041 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2042 		}
2043 		sk	  = sk_nulls_next(st->syn_wait_sk);
2044 		st->state = TCP_SEQ_STATE_LISTENING;
2045 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2046 	} else {
2047 		icsk = inet_csk(sk);
2048 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2049 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
2050 			goto start_req;
2051 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2052 		sk = sk_nulls_next(sk);
2053 	}
2054 get_sk:
2055 	sk_nulls_for_each_from(sk, node) {
2056 		if (!net_eq(sock_net(sk), net))
2057 			continue;
2058 		if (sk->sk_family == st->family) {
2059 			cur = sk;
2060 			goto out;
2061 		}
2062 		icsk = inet_csk(sk);
2063 		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2064 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2065 start_req:
2066 			st->uid		= sock_i_uid(sk);
2067 			st->syn_wait_sk = sk;
2068 			st->state	= TCP_SEQ_STATE_OPENREQ;
2069 			st->sbucket	= 0;
2070 			goto get_req;
2071 		}
2072 		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2073 	}
2074 	spin_unlock_bh(&ilb->lock);
2075 	st->offset = 0;
2076 	if (++st->bucket < INET_LHTABLE_SIZE) {
2077 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2078 		spin_lock_bh(&ilb->lock);
2079 		sk = sk_nulls_head(&ilb->head);
2080 		goto get_sk;
2081 	}
2082 	cur = NULL;
2083 out:
2084 	return cur;
2085 }
2086 
2087 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2088 {
2089 	struct tcp_iter_state *st = seq->private;
2090 	void *rc;
2091 
2092 	st->bucket = 0;
2093 	st->offset = 0;
2094 	rc = listening_get_next(seq, NULL);
2095 
2096 	while (rc && *pos) {
2097 		rc = listening_get_next(seq, rc);
2098 		--*pos;
2099 	}
2100 	return rc;
2101 }
2102 
2103 static inline bool empty_bucket(struct tcp_iter_state *st)
2104 {
2105 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2106 		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2107 }
2108 
2109 /*
2110  * Get first established socket starting from bucket given in st->bucket.
2111  * If st->bucket is zero, the very first socket in the hash is returned.
2112  */
2113 static void *established_get_first(struct seq_file *seq)
2114 {
2115 	struct tcp_iter_state *st = seq->private;
2116 	struct net *net = seq_file_net(seq);
2117 	void *rc = NULL;
2118 
2119 	st->offset = 0;
2120 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2121 		struct sock *sk;
2122 		struct hlist_nulls_node *node;
2123 		struct inet_timewait_sock *tw;
2124 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2125 
2126 		/* Lockless fast path for the common case of empty buckets */
2127 		if (empty_bucket(st))
2128 			continue;
2129 
2130 		spin_lock_bh(lock);
2131 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2132 			if (sk->sk_family != st->family ||
2133 			    !net_eq(sock_net(sk), net)) {
2134 				continue;
2135 			}
2136 			rc = sk;
2137 			goto out;
2138 		}
2139 		st->state = TCP_SEQ_STATE_TIME_WAIT;
2140 		inet_twsk_for_each(tw, node,
2141 				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2142 			if (tw->tw_family != st->family ||
2143 			    !net_eq(twsk_net(tw), net)) {
2144 				continue;
2145 			}
2146 			rc = tw;
2147 			goto out;
2148 		}
2149 		spin_unlock_bh(lock);
2150 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2151 	}
2152 out:
2153 	return rc;
2154 }
2155 
2156 static void *established_get_next(struct seq_file *seq, void *cur)
2157 {
2158 	struct sock *sk = cur;
2159 	struct inet_timewait_sock *tw;
2160 	struct hlist_nulls_node *node;
2161 	struct tcp_iter_state *st = seq->private;
2162 	struct net *net = seq_file_net(seq);
2163 
2164 	++st->num;
2165 	++st->offset;
2166 
2167 	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2168 		tw = cur;
2169 		tw = tw_next(tw);
2170 get_tw:
2171 		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2172 			tw = tw_next(tw);
2173 		}
2174 		if (tw) {
2175 			cur = tw;
2176 			goto out;
2177 		}
2178 		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2179 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2180 
2181 		/* Look for next non empty bucket */
2182 		st->offset = 0;
2183 		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2184 				empty_bucket(st))
2185 			;
2186 		if (st->bucket > tcp_hashinfo.ehash_mask)
2187 			return NULL;
2188 
2189 		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2190 		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2191 	} else
2192 		sk = sk_nulls_next(sk);
2193 
2194 	sk_nulls_for_each_from(sk, node) {
2195 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2196 			goto found;
2197 	}
2198 
2199 	st->state = TCP_SEQ_STATE_TIME_WAIT;
2200 	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2201 	goto get_tw;
2202 found:
2203 	cur = sk;
2204 out:
2205 	return cur;
2206 }
2207 
2208 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2209 {
2210 	struct tcp_iter_state *st = seq->private;
2211 	void *rc;
2212 
2213 	st->bucket = 0;
2214 	rc = established_get_first(seq);
2215 
2216 	while (rc && pos) {
2217 		rc = established_get_next(seq, rc);
2218 		--pos;
2219 	}
2220 	return rc;
2221 }
2222 
2223 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2224 {
2225 	void *rc;
2226 	struct tcp_iter_state *st = seq->private;
2227 
2228 	st->state = TCP_SEQ_STATE_LISTENING;
2229 	rc	  = listening_get_idx(seq, &pos);
2230 
2231 	if (!rc) {
2232 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2233 		rc	  = established_get_idx(seq, pos);
2234 	}
2235 
2236 	return rc;
2237 }
2238 
2239 static void *tcp_seek_last_pos(struct seq_file *seq)
2240 {
2241 	struct tcp_iter_state *st = seq->private;
2242 	int offset = st->offset;
2243 	int orig_num = st->num;
2244 	void *rc = NULL;
2245 
2246 	switch (st->state) {
2247 	case TCP_SEQ_STATE_OPENREQ:
2248 	case TCP_SEQ_STATE_LISTENING:
2249 		if (st->bucket >= INET_LHTABLE_SIZE)
2250 			break;
2251 		st->state = TCP_SEQ_STATE_LISTENING;
2252 		rc = listening_get_next(seq, NULL);
2253 		while (offset-- && rc)
2254 			rc = listening_get_next(seq, rc);
2255 		if (rc)
2256 			break;
2257 		st->bucket = 0;
2258 		/* Fallthrough */
2259 	case TCP_SEQ_STATE_ESTABLISHED:
2260 	case TCP_SEQ_STATE_TIME_WAIT:
2261 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2262 		if (st->bucket > tcp_hashinfo.ehash_mask)
2263 			break;
2264 		rc = established_get_first(seq);
2265 		while (offset-- && rc)
2266 			rc = established_get_next(seq, rc);
2267 	}
2268 
2269 	st->num = orig_num;
2270 
2271 	return rc;
2272 }
2273 
2274 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2275 {
2276 	struct tcp_iter_state *st = seq->private;
2277 	void *rc;
2278 
2279 	if (*pos && *pos == st->last_pos) {
2280 		rc = tcp_seek_last_pos(seq);
2281 		if (rc)
2282 			goto out;
2283 	}
2284 
2285 	st->state = TCP_SEQ_STATE_LISTENING;
2286 	st->num = 0;
2287 	st->bucket = 0;
2288 	st->offset = 0;
2289 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2290 
2291 out:
2292 	st->last_pos = *pos;
2293 	return rc;
2294 }
2295 
2296 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2297 {
2298 	struct tcp_iter_state *st = seq->private;
2299 	void *rc = NULL;
2300 
2301 	if (v == SEQ_START_TOKEN) {
2302 		rc = tcp_get_idx(seq, 0);
2303 		goto out;
2304 	}
2305 
2306 	switch (st->state) {
2307 	case TCP_SEQ_STATE_OPENREQ:
2308 	case TCP_SEQ_STATE_LISTENING:
2309 		rc = listening_get_next(seq, v);
2310 		if (!rc) {
2311 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2312 			st->bucket = 0;
2313 			st->offset = 0;
2314 			rc	  = established_get_first(seq);
2315 		}
2316 		break;
2317 	case TCP_SEQ_STATE_ESTABLISHED:
2318 	case TCP_SEQ_STATE_TIME_WAIT:
2319 		rc = established_get_next(seq, v);
2320 		break;
2321 	}
2322 out:
2323 	++*pos;
2324 	st->last_pos = *pos;
2325 	return rc;
2326 }
2327 
2328 static void tcp_seq_stop(struct seq_file *seq, void *v)
2329 {
2330 	struct tcp_iter_state *st = seq->private;
2331 
2332 	switch (st->state) {
2333 	case TCP_SEQ_STATE_OPENREQ:
2334 		if (v) {
2335 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2336 			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2337 		}
2338 	case TCP_SEQ_STATE_LISTENING:
2339 		if (v != SEQ_START_TOKEN)
2340 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2341 		break;
2342 	case TCP_SEQ_STATE_TIME_WAIT:
2343 	case TCP_SEQ_STATE_ESTABLISHED:
2344 		if (v)
2345 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2346 		break;
2347 	}
2348 }
2349 
2350 int tcp_seq_open(struct inode *inode, struct file *file)
2351 {
2352 	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2353 	struct tcp_iter_state *s;
2354 	int err;
2355 
2356 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2357 			  sizeof(struct tcp_iter_state));
2358 	if (err < 0)
2359 		return err;
2360 
2361 	s = ((struct seq_file *)file->private_data)->private;
2362 	s->family		= afinfo->family;
2363 	s->last_pos 		= 0;
2364 	return 0;
2365 }
2366 EXPORT_SYMBOL(tcp_seq_open);
2367 
2368 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2369 {
2370 	int rc = 0;
2371 	struct proc_dir_entry *p;
2372 
2373 	afinfo->seq_ops.start		= tcp_seq_start;
2374 	afinfo->seq_ops.next		= tcp_seq_next;
2375 	afinfo->seq_ops.stop		= tcp_seq_stop;
2376 
2377 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2378 			     afinfo->seq_fops, afinfo);
2379 	if (!p)
2380 		rc = -ENOMEM;
2381 	return rc;
2382 }
2383 EXPORT_SYMBOL(tcp_proc_register);
2384 
2385 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2386 {
2387 	proc_net_remove(net, afinfo->name);
2388 }
2389 EXPORT_SYMBOL(tcp_proc_unregister);
2390 
2391 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2392 			 struct seq_file *f, int i, int uid, int *len)
2393 {
2394 	const struct inet_request_sock *ireq = inet_rsk(req);
2395 	int ttd = req->expires - jiffies;
2396 
2397 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2398 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2399 		i,
2400 		ireq->loc_addr,
2401 		ntohs(inet_sk(sk)->inet_sport),
2402 		ireq->rmt_addr,
2403 		ntohs(ireq->rmt_port),
2404 		TCP_SYN_RECV,
2405 		0, 0, /* could print option size, but that is af dependent. */
2406 		1,    /* timers active (only the expire timer) */
2407 		jiffies_to_clock_t(ttd),
2408 		req->retrans,
2409 		uid,
2410 		0,  /* non standard timer */
2411 		0, /* open_requests have no inode */
2412 		atomic_read(&sk->sk_refcnt),
2413 		req,
2414 		len);
2415 }
2416 
2417 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2418 {
2419 	int timer_active;
2420 	unsigned long timer_expires;
2421 	const struct tcp_sock *tp = tcp_sk(sk);
2422 	const struct inet_connection_sock *icsk = inet_csk(sk);
2423 	const struct inet_sock *inet = inet_sk(sk);
2424 	__be32 dest = inet->inet_daddr;
2425 	__be32 src = inet->inet_rcv_saddr;
2426 	__u16 destp = ntohs(inet->inet_dport);
2427 	__u16 srcp = ntohs(inet->inet_sport);
2428 	int rx_queue;
2429 
2430 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2431 		timer_active	= 1;
2432 		timer_expires	= icsk->icsk_timeout;
2433 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2434 		timer_active	= 4;
2435 		timer_expires	= icsk->icsk_timeout;
2436 	} else if (timer_pending(&sk->sk_timer)) {
2437 		timer_active	= 2;
2438 		timer_expires	= sk->sk_timer.expires;
2439 	} else {
2440 		timer_active	= 0;
2441 		timer_expires = jiffies;
2442 	}
2443 
2444 	if (sk->sk_state == TCP_LISTEN)
2445 		rx_queue = sk->sk_ack_backlog;
2446 	else
2447 		/*
2448 		 * because we dont lock socket, we might find a transient negative value
2449 		 */
2450 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2451 
2452 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2453 			"%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2454 		i, src, srcp, dest, destp, sk->sk_state,
2455 		tp->write_seq - tp->snd_una,
2456 		rx_queue,
2457 		timer_active,
2458 		jiffies_to_clock_t(timer_expires - jiffies),
2459 		icsk->icsk_retransmits,
2460 		sock_i_uid(sk),
2461 		icsk->icsk_probes_out,
2462 		sock_i_ino(sk),
2463 		atomic_read(&sk->sk_refcnt), sk,
2464 		jiffies_to_clock_t(icsk->icsk_rto),
2465 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2466 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2467 		tp->snd_cwnd,
2468 		tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2469 		len);
2470 }
2471 
2472 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2473 			       struct seq_file *f, int i, int *len)
2474 {
2475 	__be32 dest, src;
2476 	__u16 destp, srcp;
2477 	int ttd = tw->tw_ttd - jiffies;
2478 
2479 	if (ttd < 0)
2480 		ttd = 0;
2481 
2482 	dest  = tw->tw_daddr;
2483 	src   = tw->tw_rcv_saddr;
2484 	destp = ntohs(tw->tw_dport);
2485 	srcp  = ntohs(tw->tw_sport);
2486 
2487 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2488 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2489 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2490 		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2491 		atomic_read(&tw->tw_refcnt), tw, len);
2492 }
2493 
2494 #define TMPSZ 150
2495 
2496 static int tcp4_seq_show(struct seq_file *seq, void *v)
2497 {
2498 	struct tcp_iter_state *st;
2499 	int len;
2500 
2501 	if (v == SEQ_START_TOKEN) {
2502 		seq_printf(seq, "%-*s\n", TMPSZ - 1,
2503 			   "  sl  local_address rem_address   st tx_queue "
2504 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2505 			   "inode");
2506 		goto out;
2507 	}
2508 	st = seq->private;
2509 
2510 	switch (st->state) {
2511 	case TCP_SEQ_STATE_LISTENING:
2512 	case TCP_SEQ_STATE_ESTABLISHED:
2513 		get_tcp4_sock(v, seq, st->num, &len);
2514 		break;
2515 	case TCP_SEQ_STATE_OPENREQ:
2516 		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2517 		break;
2518 	case TCP_SEQ_STATE_TIME_WAIT:
2519 		get_timewait4_sock(v, seq, st->num, &len);
2520 		break;
2521 	}
2522 	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2523 out:
2524 	return 0;
2525 }
2526 
2527 static const struct file_operations tcp_afinfo_seq_fops = {
2528 	.owner   = THIS_MODULE,
2529 	.open    = tcp_seq_open,
2530 	.read    = seq_read,
2531 	.llseek  = seq_lseek,
2532 	.release = seq_release_net
2533 };
2534 
2535 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2536 	.name		= "tcp",
2537 	.family		= AF_INET,
2538 	.seq_fops	= &tcp_afinfo_seq_fops,
2539 	.seq_ops	= {
2540 		.show		= tcp4_seq_show,
2541 	},
2542 };
2543 
2544 static int __net_init tcp4_proc_init_net(struct net *net)
2545 {
2546 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2547 }
2548 
2549 static void __net_exit tcp4_proc_exit_net(struct net *net)
2550 {
2551 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2552 }
2553 
2554 static struct pernet_operations tcp4_net_ops = {
2555 	.init = tcp4_proc_init_net,
2556 	.exit = tcp4_proc_exit_net,
2557 };
2558 
2559 int __init tcp4_proc_init(void)
2560 {
2561 	return register_pernet_subsys(&tcp4_net_ops);
2562 }
2563 
2564 void tcp4_proc_exit(void)
2565 {
2566 	unregister_pernet_subsys(&tcp4_net_ops);
2567 }
2568 #endif /* CONFIG_PROC_FS */
2569 
2570 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2571 {
2572 	const struct iphdr *iph = skb_gro_network_header(skb);
2573 
2574 	switch (skb->ip_summed) {
2575 	case CHECKSUM_COMPLETE:
2576 		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2577 				  skb->csum)) {
2578 			skb->ip_summed = CHECKSUM_UNNECESSARY;
2579 			break;
2580 		}
2581 
2582 		/* fall through */
2583 	case CHECKSUM_NONE:
2584 		NAPI_GRO_CB(skb)->flush = 1;
2585 		return NULL;
2586 	}
2587 
2588 	return tcp_gro_receive(head, skb);
2589 }
2590 
2591 int tcp4_gro_complete(struct sk_buff *skb)
2592 {
2593 	const struct iphdr *iph = ip_hdr(skb);
2594 	struct tcphdr *th = tcp_hdr(skb);
2595 
2596 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2597 				  iph->saddr, iph->daddr, 0);
2598 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2599 
2600 	return tcp_gro_complete(skb);
2601 }
2602 
2603 struct proto tcp_prot = {
2604 	.name			= "TCP",
2605 	.owner			= THIS_MODULE,
2606 	.close			= tcp_close,
2607 	.connect		= tcp_v4_connect,
2608 	.disconnect		= tcp_disconnect,
2609 	.accept			= inet_csk_accept,
2610 	.ioctl			= tcp_ioctl,
2611 	.init			= tcp_v4_init_sock,
2612 	.destroy		= tcp_v4_destroy_sock,
2613 	.shutdown		= tcp_shutdown,
2614 	.setsockopt		= tcp_setsockopt,
2615 	.getsockopt		= tcp_getsockopt,
2616 	.recvmsg		= tcp_recvmsg,
2617 	.sendmsg		= tcp_sendmsg,
2618 	.sendpage		= tcp_sendpage,
2619 	.backlog_rcv		= tcp_v4_do_rcv,
2620 	.release_cb		= tcp_release_cb,
2621 	.mtu_reduced		= tcp_v4_mtu_reduced,
2622 	.hash			= inet_hash,
2623 	.unhash			= inet_unhash,
2624 	.get_port		= inet_csk_get_port,
2625 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2626 	.sockets_allocated	= &tcp_sockets_allocated,
2627 	.orphan_count		= &tcp_orphan_count,
2628 	.memory_allocated	= &tcp_memory_allocated,
2629 	.memory_pressure	= &tcp_memory_pressure,
2630 	.sysctl_wmem		= sysctl_tcp_wmem,
2631 	.sysctl_rmem		= sysctl_tcp_rmem,
2632 	.max_header		= MAX_TCP_HEADER,
2633 	.obj_size		= sizeof(struct tcp_sock),
2634 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2635 	.twsk_prot		= &tcp_timewait_sock_ops,
2636 	.rsk_prot		= &tcp_request_sock_ops,
2637 	.h.hashinfo		= &tcp_hashinfo,
2638 	.no_autobind		= true,
2639 #ifdef CONFIG_COMPAT
2640 	.compat_setsockopt	= compat_tcp_setsockopt,
2641 	.compat_getsockopt	= compat_tcp_getsockopt,
2642 #endif
2643 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2644 	.init_cgroup		= tcp_init_cgroup,
2645 	.destroy_cgroup		= tcp_destroy_cgroup,
2646 	.proto_cgroup		= tcp_proto_cgroup,
2647 #endif
2648 };
2649 EXPORT_SYMBOL(tcp_prot);
2650 
2651 static int __net_init tcp_sk_init(struct net *net)
2652 {
2653 	return 0;
2654 }
2655 
2656 static void __net_exit tcp_sk_exit(struct net *net)
2657 {
2658 }
2659 
2660 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2661 {
2662 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2663 }
2664 
2665 static struct pernet_operations __net_initdata tcp_sk_ops = {
2666        .init	   = tcp_sk_init,
2667        .exit	   = tcp_sk_exit,
2668        .exit_batch = tcp_sk_exit_batch,
2669 };
2670 
2671 void __init tcp_v4_init(void)
2672 {
2673 	inet_hashinfo_init(&tcp_hashinfo);
2674 	if (register_pernet_subsys(&tcp_sk_ops))
2675 		panic("Failed to create the TCP control socket.\n");
2676 }
2677