xref: /linux/net/ipv6/tcp_ipv6.c (revision 2a10154abcb75ad0d7b6bfea6210ac743ec60897)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66 
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69 
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72 
73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 				      struct request_sock *req);
76 
77 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 						   const struct in6_addr *addr)
87 {
88 	return NULL;
89 }
90 #endif
91 
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 	struct dst_entry *dst = skb_dst(skb);
95 
96 	if (dst) {
97 		const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 		dst_hold(dst);
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 	}
104 }
105 
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 {
108 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 					    ipv6_hdr(skb)->saddr.s6_addr32,
110 					    tcp_hdr(skb)->dest,
111 					    tcp_hdr(skb)->source);
112 }
113 
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 			  int addr_len)
116 {
117 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 	struct inet_sock *inet = inet_sk(sk);
119 	struct inet_connection_sock *icsk = inet_csk(sk);
120 	struct ipv6_pinfo *np = inet6_sk(sk);
121 	struct tcp_sock *tp = tcp_sk(sk);
122 	struct in6_addr *saddr = NULL, *final_p, final;
123 	struct rt6_info *rt;
124 	struct flowi6 fl6;
125 	struct dst_entry *dst;
126 	int addr_type;
127 	int err;
128 
129 	if (addr_len < SIN6_LEN_RFC2133)
130 		return -EINVAL;
131 
132 	if (usin->sin6_family != AF_INET6)
133 		return -EAFNOSUPPORT;
134 
135 	memset(&fl6, 0, sizeof(fl6));
136 
137 	if (np->sndflow) {
138 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 		IP6_ECN_flow_init(fl6.flowlabel);
140 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
141 			struct ip6_flowlabel *flowlabel;
142 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
143 			if (!flowlabel)
144 				return -EINVAL;
145 			fl6_sock_release(flowlabel);
146 		}
147 	}
148 
149 	/*
150 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
151 	 */
152 
153 	if (ipv6_addr_any(&usin->sin6_addr))
154 		usin->sin6_addr.s6_addr[15] = 0x1;
155 
156 	addr_type = ipv6_addr_type(&usin->sin6_addr);
157 
158 	if (addr_type & IPV6_ADDR_MULTICAST)
159 		return -ENETUNREACH;
160 
161 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
162 		if (addr_len >= sizeof(struct sockaddr_in6) &&
163 		    usin->sin6_scope_id) {
164 			/* If interface is set while binding, indices
165 			 * must coincide.
166 			 */
167 			if (sk->sk_bound_dev_if &&
168 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
169 				return -EINVAL;
170 
171 			sk->sk_bound_dev_if = usin->sin6_scope_id;
172 		}
173 
174 		/* Connect to link-local address requires an interface */
175 		if (!sk->sk_bound_dev_if)
176 			return -EINVAL;
177 	}
178 
179 	if (tp->rx_opt.ts_recent_stamp &&
180 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
181 		tp->rx_opt.ts_recent = 0;
182 		tp->rx_opt.ts_recent_stamp = 0;
183 		tp->write_seq = 0;
184 	}
185 
186 	sk->sk_v6_daddr = usin->sin6_addr;
187 	np->flow_label = fl6.flowlabel;
188 
189 	/*
190 	 *	TCP over IPv4
191 	 */
192 
193 	if (addr_type == IPV6_ADDR_MAPPED) {
194 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
195 		struct sockaddr_in sin;
196 
197 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
198 
199 		if (__ipv6_only_sock(sk))
200 			return -ENETUNREACH;
201 
202 		sin.sin_family = AF_INET;
203 		sin.sin_port = usin->sin6_port;
204 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
205 
206 		icsk->icsk_af_ops = &ipv6_mapped;
207 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
208 #ifdef CONFIG_TCP_MD5SIG
209 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
210 #endif
211 
212 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 
214 		if (err) {
215 			icsk->icsk_ext_hdr_len = exthdrlen;
216 			icsk->icsk_af_ops = &ipv6_specific;
217 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 			tp->af_specific = &tcp_sock_ipv6_specific;
220 #endif
221 			goto failure;
222 		}
223 		np->saddr = sk->sk_v6_rcv_saddr;
224 
225 		return err;
226 	}
227 
228 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
229 		saddr = &sk->sk_v6_rcv_saddr;
230 
231 	fl6.flowi6_proto = IPPROTO_TCP;
232 	fl6.daddr = sk->sk_v6_daddr;
233 	fl6.saddr = saddr ? *saddr : np->saddr;
234 	fl6.flowi6_oif = sk->sk_bound_dev_if;
235 	fl6.flowi6_mark = sk->sk_mark;
236 	fl6.fl6_dport = usin->sin6_port;
237 	fl6.fl6_sport = inet->inet_sport;
238 
239 	final_p = fl6_update_dst(&fl6, np->opt, &final);
240 
241 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242 
243 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
244 	if (IS_ERR(dst)) {
245 		err = PTR_ERR(dst);
246 		goto failure;
247 	}
248 
249 	if (!saddr) {
250 		saddr = &fl6.saddr;
251 		sk->sk_v6_rcv_saddr = *saddr;
252 	}
253 
254 	/* set the source address */
255 	np->saddr = *saddr;
256 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257 
258 	sk->sk_gso_type = SKB_GSO_TCPV6;
259 	__ip6_dst_store(sk, dst, NULL, NULL);
260 
261 	rt = (struct rt6_info *) dst;
262 	if (tcp_death_row.sysctl_tw_recycle &&
263 	    !tp->rx_opt.ts_recent_stamp &&
264 	    ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
265 		tcp_fetch_timewait_stamp(sk, dst);
266 
267 	icsk->icsk_ext_hdr_len = 0;
268 	if (np->opt)
269 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
270 					  np->opt->opt_nflen);
271 
272 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
273 
274 	inet->inet_dport = usin->sin6_port;
275 
276 	tcp_set_state(sk, TCP_SYN_SENT);
277 	err = inet6_hash_connect(&tcp_death_row, sk);
278 	if (err)
279 		goto late_failure;
280 
281 	ip6_set_txhash(sk);
282 
283 	if (!tp->write_seq && likely(!tp->repair))
284 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
285 							     sk->sk_v6_daddr.s6_addr32,
286 							     inet->inet_sport,
287 							     inet->inet_dport);
288 
289 	err = tcp_connect(sk);
290 	if (err)
291 		goto late_failure;
292 
293 	return 0;
294 
295 late_failure:
296 	tcp_set_state(sk, TCP_CLOSE);
297 	__sk_dst_reset(sk);
298 failure:
299 	inet->inet_dport = 0;
300 	sk->sk_route_caps = 0;
301 	return err;
302 }
303 
304 static void tcp_v6_mtu_reduced(struct sock *sk)
305 {
306 	struct dst_entry *dst;
307 
308 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
309 		return;
310 
311 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
312 	if (!dst)
313 		return;
314 
315 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
316 		tcp_sync_mss(sk, dst_mtu(dst));
317 		tcp_simple_retransmit(sk);
318 	}
319 }
320 
321 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
322 		u8 type, u8 code, int offset, __be32 info)
323 {
324 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
325 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
326 	struct net *net = dev_net(skb->dev);
327 	struct request_sock *fastopen;
328 	struct ipv6_pinfo *np;
329 	struct tcp_sock *tp;
330 	__u32 seq, snd_una;
331 	struct sock *sk;
332 	int err;
333 
334 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
335 					&hdr->daddr, th->dest,
336 					&hdr->saddr, ntohs(th->source),
337 					skb->dev->ifindex);
338 
339 	if (!sk) {
340 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
341 				   ICMP6_MIB_INERRORS);
342 		return;
343 	}
344 
345 	if (sk->sk_state == TCP_TIME_WAIT) {
346 		inet_twsk_put(inet_twsk(sk));
347 		return;
348 	}
349 	seq = ntohl(th->seq);
350 	if (sk->sk_state == TCP_NEW_SYN_RECV)
351 		return tcp_req_err(sk, seq);
352 
353 	bh_lock_sock(sk);
354 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
356 
357 	if (sk->sk_state == TCP_CLOSE)
358 		goto out;
359 
360 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
362 		goto out;
363 	}
364 
365 	tp = tcp_sk(sk);
366 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 	fastopen = tp->fastopen_rsk;
368 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 	if (sk->sk_state != TCP_LISTEN &&
370 	    !between(seq, snd_una, tp->snd_nxt)) {
371 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 		goto out;
373 	}
374 
375 	np = inet6_sk(sk);
376 
377 	if (type == NDISC_REDIRECT) {
378 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379 
380 		if (dst)
381 			dst->ops->redirect(dst, sk, skb);
382 		goto out;
383 	}
384 
385 	if (type == ICMPV6_PKT_TOOBIG) {
386 		/* We are not interested in TCP_LISTEN and open_requests
387 		 * (SYN-ACKs send out by Linux are always <576bytes so
388 		 * they should go through unfragmented).
389 		 */
390 		if (sk->sk_state == TCP_LISTEN)
391 			goto out;
392 
393 		if (!ip6_sk_accept_pmtu(sk))
394 			goto out;
395 
396 		tp->mtu_info = ntohl(info);
397 		if (!sock_owned_by_user(sk))
398 			tcp_v6_mtu_reduced(sk);
399 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 					   &tp->tsq_flags))
401 			sock_hold(sk);
402 		goto out;
403 	}
404 
405 	icmpv6_err_convert(type, code, &err);
406 
407 	/* Might be for an request_sock */
408 	switch (sk->sk_state) {
409 	case TCP_SYN_SENT:
410 	case TCP_SYN_RECV:
411 		/* Only in fast or simultaneous open. If a fast open socket is
412 		 * is already accepted it is treated as a connected one below.
413 		 */
414 		if (fastopen && !fastopen->sk)
415 			break;
416 
417 		if (!sock_owned_by_user(sk)) {
418 			sk->sk_err = err;
419 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
420 
421 			tcp_done(sk);
422 		} else
423 			sk->sk_err_soft = err;
424 		goto out;
425 	}
426 
427 	if (!sock_owned_by_user(sk) && np->recverr) {
428 		sk->sk_err = err;
429 		sk->sk_error_report(sk);
430 	} else
431 		sk->sk_err_soft = err;
432 
433 out:
434 	bh_unlock_sock(sk);
435 	sock_put(sk);
436 }
437 
438 
439 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
440 			      struct flowi *fl,
441 			      struct request_sock *req,
442 			      u16 queue_mapping,
443 			      struct tcp_fastopen_cookie *foc)
444 {
445 	struct inet_request_sock *ireq = inet_rsk(req);
446 	struct ipv6_pinfo *np = inet6_sk(sk);
447 	struct flowi6 *fl6 = &fl->u.ip6;
448 	struct sk_buff *skb;
449 	int err = -ENOMEM;
450 
451 	/* First, grab a route. */
452 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
453 		goto done;
454 
455 	skb = tcp_make_synack(sk, dst, req, foc);
456 
457 	if (skb) {
458 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 				    &ireq->ir_v6_rmt_addr);
460 
461 		fl6->daddr = ireq->ir_v6_rmt_addr;
462 		if (np->repflow && ireq->pktopts)
463 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464 
465 		skb_set_queue_mapping(skb, queue_mapping);
466 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
467 		err = net_xmit_eval(err);
468 	}
469 
470 done:
471 	return err;
472 }
473 
474 
475 static void tcp_v6_reqsk_destructor(struct request_sock *req)
476 {
477 	kfree_skb(inet_rsk(req)->pktopts);
478 }
479 
480 #ifdef CONFIG_TCP_MD5SIG
481 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
482 						   const struct in6_addr *addr)
483 {
484 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
485 }
486 
487 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
488 						const struct sock *addr_sk)
489 {
490 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
491 }
492 
493 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
494 				 int optlen)
495 {
496 	struct tcp_md5sig cmd;
497 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
498 
499 	if (optlen < sizeof(cmd))
500 		return -EINVAL;
501 
502 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
503 		return -EFAULT;
504 
505 	if (sin6->sin6_family != AF_INET6)
506 		return -EINVAL;
507 
508 	if (!cmd.tcpm_keylen) {
509 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
510 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
511 					      AF_INET);
512 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
513 				      AF_INET6);
514 	}
515 
516 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
517 		return -EINVAL;
518 
519 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
520 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
521 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
522 
523 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
524 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
525 }
526 
527 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
528 					const struct in6_addr *daddr,
529 					const struct in6_addr *saddr, int nbytes)
530 {
531 	struct tcp6_pseudohdr *bp;
532 	struct scatterlist sg;
533 
534 	bp = &hp->md5_blk.ip6;
535 	/* 1. TCP pseudo-header (RFC2460) */
536 	bp->saddr = *saddr;
537 	bp->daddr = *daddr;
538 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
539 	bp->len = cpu_to_be32(nbytes);
540 
541 	sg_init_one(&sg, bp, sizeof(*bp));
542 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
543 }
544 
545 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
546 			       const struct in6_addr *daddr, struct in6_addr *saddr,
547 			       const struct tcphdr *th)
548 {
549 	struct tcp_md5sig_pool *hp;
550 	struct hash_desc *desc;
551 
552 	hp = tcp_get_md5sig_pool();
553 	if (!hp)
554 		goto clear_hash_noput;
555 	desc = &hp->md5_desc;
556 
557 	if (crypto_hash_init(desc))
558 		goto clear_hash;
559 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
560 		goto clear_hash;
561 	if (tcp_md5_hash_header(hp, th))
562 		goto clear_hash;
563 	if (tcp_md5_hash_key(hp, key))
564 		goto clear_hash;
565 	if (crypto_hash_final(desc, md5_hash))
566 		goto clear_hash;
567 
568 	tcp_put_md5sig_pool();
569 	return 0;
570 
571 clear_hash:
572 	tcp_put_md5sig_pool();
573 clear_hash_noput:
574 	memset(md5_hash, 0, 16);
575 	return 1;
576 }
577 
578 static int tcp_v6_md5_hash_skb(char *md5_hash,
579 			       const struct tcp_md5sig_key *key,
580 			       const struct sock *sk,
581 			       const struct sk_buff *skb)
582 {
583 	const struct in6_addr *saddr, *daddr;
584 	struct tcp_md5sig_pool *hp;
585 	struct hash_desc *desc;
586 	const struct tcphdr *th = tcp_hdr(skb);
587 
588 	if (sk) { /* valid for establish/request sockets */
589 		saddr = &sk->sk_v6_rcv_saddr;
590 		daddr = &sk->sk_v6_daddr;
591 	} else {
592 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
593 		saddr = &ip6h->saddr;
594 		daddr = &ip6h->daddr;
595 	}
596 
597 	hp = tcp_get_md5sig_pool();
598 	if (!hp)
599 		goto clear_hash_noput;
600 	desc = &hp->md5_desc;
601 
602 	if (crypto_hash_init(desc))
603 		goto clear_hash;
604 
605 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
606 		goto clear_hash;
607 	if (tcp_md5_hash_header(hp, th))
608 		goto clear_hash;
609 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
610 		goto clear_hash;
611 	if (tcp_md5_hash_key(hp, key))
612 		goto clear_hash;
613 	if (crypto_hash_final(desc, md5_hash))
614 		goto clear_hash;
615 
616 	tcp_put_md5sig_pool();
617 	return 0;
618 
619 clear_hash:
620 	tcp_put_md5sig_pool();
621 clear_hash_noput:
622 	memset(md5_hash, 0, 16);
623 	return 1;
624 }
625 
626 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
627 {
628 	const __u8 *hash_location = NULL;
629 	struct tcp_md5sig_key *hash_expected;
630 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
631 	const struct tcphdr *th = tcp_hdr(skb);
632 	int genhash;
633 	u8 newhash[16];
634 
635 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
636 	hash_location = tcp_parse_md5sig_option(th);
637 
638 	/* We've parsed the options - do we have a hash? */
639 	if (!hash_expected && !hash_location)
640 		return false;
641 
642 	if (hash_expected && !hash_location) {
643 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
644 		return true;
645 	}
646 
647 	if (!hash_expected && hash_location) {
648 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
649 		return true;
650 	}
651 
652 	/* check the signature */
653 	genhash = tcp_v6_md5_hash_skb(newhash,
654 				      hash_expected,
655 				      NULL, skb);
656 
657 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
658 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
659 				     genhash ? "failed" : "mismatch",
660 				     &ip6h->saddr, ntohs(th->source),
661 				     &ip6h->daddr, ntohs(th->dest));
662 		return true;
663 	}
664 	return false;
665 }
666 #endif
667 
668 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
669 			    struct sk_buff *skb)
670 {
671 	struct inet_request_sock *ireq = inet_rsk(req);
672 	struct ipv6_pinfo *np = inet6_sk(sk);
673 
674 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
675 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
676 
677 	/* So that link locals have meaning */
678 	if (!sk->sk_bound_dev_if &&
679 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
680 		ireq->ir_iif = tcp_v6_iif(skb);
681 
682 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
683 	    (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
684 	     np->rxopt.bits.rxinfo ||
685 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
686 	     np->rxopt.bits.rxohlim || np->repflow)) {
687 		atomic_inc(&skb->users);
688 		ireq->pktopts = skb;
689 	}
690 }
691 
692 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
693 					  const struct request_sock *req,
694 					  bool *strict)
695 {
696 	if (strict)
697 		*strict = true;
698 	return inet6_csk_route_req(sk, &fl->u.ip6, req);
699 }
700 
701 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
702 	.family		=	AF_INET6,
703 	.obj_size	=	sizeof(struct tcp6_request_sock),
704 	.rtx_syn_ack	=	tcp_rtx_synack,
705 	.send_ack	=	tcp_v6_reqsk_send_ack,
706 	.destructor	=	tcp_v6_reqsk_destructor,
707 	.send_reset	=	tcp_v6_send_reset,
708 	.syn_ack_timeout =	tcp_syn_ack_timeout,
709 };
710 
711 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
712 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
713 				sizeof(struct ipv6hdr),
714 #ifdef CONFIG_TCP_MD5SIG
715 	.req_md5_lookup	=	tcp_v6_md5_lookup,
716 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
717 #endif
718 	.init_req	=	tcp_v6_init_req,
719 #ifdef CONFIG_SYN_COOKIES
720 	.cookie_init_seq =	cookie_v6_init_sequence,
721 #endif
722 	.route_req	=	tcp_v6_route_req,
723 	.init_seq	=	tcp_v6_init_sequence,
724 	.send_synack	=	tcp_v6_send_synack,
725 	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
726 };
727 
728 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
729 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
730 				 int oif, struct tcp_md5sig_key *key, int rst,
731 				 u8 tclass, u32 label)
732 {
733 	const struct tcphdr *th = tcp_hdr(skb);
734 	struct tcphdr *t1;
735 	struct sk_buff *buff;
736 	struct flowi6 fl6;
737 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
738 	struct sock *ctl_sk = net->ipv6.tcp_sk;
739 	unsigned int tot_len = sizeof(struct tcphdr);
740 	struct dst_entry *dst;
741 	__be32 *topt;
742 
743 	if (tsecr)
744 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
745 #ifdef CONFIG_TCP_MD5SIG
746 	if (key)
747 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
748 #endif
749 
750 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
751 			 GFP_ATOMIC);
752 	if (!buff)
753 		return;
754 
755 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
756 
757 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
758 	skb_reset_transport_header(buff);
759 
760 	/* Swap the send and the receive. */
761 	memset(t1, 0, sizeof(*t1));
762 	t1->dest = th->source;
763 	t1->source = th->dest;
764 	t1->doff = tot_len / 4;
765 	t1->seq = htonl(seq);
766 	t1->ack_seq = htonl(ack);
767 	t1->ack = !rst || !th->ack;
768 	t1->rst = rst;
769 	t1->window = htons(win);
770 
771 	topt = (__be32 *)(t1 + 1);
772 
773 	if (tsecr) {
774 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
775 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
776 		*topt++ = htonl(tsval);
777 		*topt++ = htonl(tsecr);
778 	}
779 
780 #ifdef CONFIG_TCP_MD5SIG
781 	if (key) {
782 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
783 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
784 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
785 				    &ipv6_hdr(skb)->saddr,
786 				    &ipv6_hdr(skb)->daddr, t1);
787 	}
788 #endif
789 
790 	memset(&fl6, 0, sizeof(fl6));
791 	fl6.daddr = ipv6_hdr(skb)->saddr;
792 	fl6.saddr = ipv6_hdr(skb)->daddr;
793 	fl6.flowlabel = label;
794 
795 	buff->ip_summed = CHECKSUM_PARTIAL;
796 	buff->csum = 0;
797 
798 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
799 
800 	fl6.flowi6_proto = IPPROTO_TCP;
801 	if (rt6_need_strict(&fl6.daddr) && !oif)
802 		fl6.flowi6_oif = tcp_v6_iif(skb);
803 	else
804 		fl6.flowi6_oif = oif;
805 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
806 	fl6.fl6_dport = t1->dest;
807 	fl6.fl6_sport = t1->source;
808 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
809 
810 	/* Pass a socket to ip6_dst_lookup either it is for RST
811 	 * Underlying function will use this to retrieve the network
812 	 * namespace
813 	 */
814 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
815 	if (!IS_ERR(dst)) {
816 		skb_dst_set(buff, dst);
817 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
818 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
819 		if (rst)
820 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
821 		return;
822 	}
823 
824 	kfree_skb(buff);
825 }
826 
827 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
828 {
829 	const struct tcphdr *th = tcp_hdr(skb);
830 	u32 seq = 0, ack_seq = 0;
831 	struct tcp_md5sig_key *key = NULL;
832 #ifdef CONFIG_TCP_MD5SIG
833 	const __u8 *hash_location = NULL;
834 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
835 	unsigned char newhash[16];
836 	int genhash;
837 	struct sock *sk1 = NULL;
838 #endif
839 	int oif;
840 
841 	if (th->rst)
842 		return;
843 
844 	/* If sk not NULL, it means we did a successful lookup and incoming
845 	 * route had to be correct. prequeue might have dropped our dst.
846 	 */
847 	if (!sk && !ipv6_unicast_destination(skb))
848 		return;
849 
850 #ifdef CONFIG_TCP_MD5SIG
851 	hash_location = tcp_parse_md5sig_option(th);
852 	if (!sk && hash_location) {
853 		/*
854 		 * active side is lost. Try to find listening socket through
855 		 * source port, and then find md5 key through listening socket.
856 		 * we are not loose security here:
857 		 * Incoming packet is checked with md5 hash with finding key,
858 		 * no RST generated if md5 hash doesn't match.
859 		 */
860 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
861 					   &tcp_hashinfo, &ipv6h->saddr,
862 					   th->source, &ipv6h->daddr,
863 					   ntohs(th->source), tcp_v6_iif(skb));
864 		if (!sk1)
865 			return;
866 
867 		rcu_read_lock();
868 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
869 		if (!key)
870 			goto release_sk1;
871 
872 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
873 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
874 			goto release_sk1;
875 	} else {
876 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
877 	}
878 #endif
879 
880 	if (th->ack)
881 		seq = ntohl(th->ack_seq);
882 	else
883 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
884 			  (th->doff << 2);
885 
886 	oif = sk ? sk->sk_bound_dev_if : 0;
887 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
888 
889 #ifdef CONFIG_TCP_MD5SIG
890 release_sk1:
891 	if (sk1) {
892 		rcu_read_unlock();
893 		sock_put(sk1);
894 	}
895 #endif
896 }
897 
898 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
899 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
900 			    struct tcp_md5sig_key *key, u8 tclass,
901 			    u32 label)
902 {
903 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
904 			     tclass, label);
905 }
906 
907 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
908 {
909 	struct inet_timewait_sock *tw = inet_twsk(sk);
910 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
911 
912 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
913 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
914 			tcp_time_stamp + tcptw->tw_ts_offset,
915 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
916 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
917 
918 	inet_twsk_put(tw);
919 }
920 
921 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
922 				  struct request_sock *req)
923 {
924 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
925 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
926 	 */
927 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
928 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
929 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
930 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
931 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
932 			0, 0);
933 }
934 
935 
936 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
937 {
938 	const struct tcphdr *th = tcp_hdr(skb);
939 	struct request_sock *req;
940 	struct sock *nsk;
941 
942 	/* Find possible connection requests. */
943 	req = inet6_csk_search_req(sk, th->source,
944 				   &ipv6_hdr(skb)->saddr,
945 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
946 	if (req) {
947 		nsk = tcp_check_req(sk, skb, req, false);
948 		if (!nsk)
949 			reqsk_put(req);
950 		return nsk;
951 	}
952 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
953 					 &ipv6_hdr(skb)->saddr, th->source,
954 					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
955 					 tcp_v6_iif(skb));
956 
957 	if (nsk) {
958 		if (nsk->sk_state != TCP_TIME_WAIT) {
959 			bh_lock_sock(nsk);
960 			return nsk;
961 		}
962 		inet_twsk_put(inet_twsk(nsk));
963 		return NULL;
964 	}
965 
966 #ifdef CONFIG_SYN_COOKIES
967 	if (!th->syn)
968 		sk = cookie_v6_check(sk, skb);
969 #endif
970 	return sk;
971 }
972 
973 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
974 {
975 	if (skb->protocol == htons(ETH_P_IP))
976 		return tcp_v4_conn_request(sk, skb);
977 
978 	if (!ipv6_unicast_destination(skb))
979 		goto drop;
980 
981 	return tcp_conn_request(&tcp6_request_sock_ops,
982 				&tcp_request_sock_ipv6_ops, sk, skb);
983 
984 drop:
985 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
986 	return 0; /* don't send reset */
987 }
988 
989 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
990 					 struct request_sock *req,
991 					 struct dst_entry *dst)
992 {
993 	struct inet_request_sock *ireq;
994 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
995 	struct tcp6_sock *newtcp6sk;
996 	struct inet_sock *newinet;
997 	struct tcp_sock *newtp;
998 	struct sock *newsk;
999 #ifdef CONFIG_TCP_MD5SIG
1000 	struct tcp_md5sig_key *key;
1001 #endif
1002 	struct flowi6 fl6;
1003 
1004 	if (skb->protocol == htons(ETH_P_IP)) {
1005 		/*
1006 		 *	v6 mapped
1007 		 */
1008 
1009 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1010 
1011 		if (!newsk)
1012 			return NULL;
1013 
1014 		newtcp6sk = (struct tcp6_sock *)newsk;
1015 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1016 
1017 		newinet = inet_sk(newsk);
1018 		newnp = inet6_sk(newsk);
1019 		newtp = tcp_sk(newsk);
1020 
1021 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1022 
1023 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1024 
1025 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1026 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1027 #ifdef CONFIG_TCP_MD5SIG
1028 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1029 #endif
1030 
1031 		newnp->ipv6_ac_list = NULL;
1032 		newnp->ipv6_fl_list = NULL;
1033 		newnp->pktoptions  = NULL;
1034 		newnp->opt	   = NULL;
1035 		newnp->mcast_oif   = tcp_v6_iif(skb);
1036 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1037 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1038 		if (np->repflow)
1039 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1040 
1041 		/*
1042 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1043 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1044 		 * that function for the gory details. -acme
1045 		 */
1046 
1047 		/* It is tricky place. Until this moment IPv4 tcp
1048 		   worked with IPv6 icsk.icsk_af_ops.
1049 		   Sync it now.
1050 		 */
1051 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1052 
1053 		return newsk;
1054 	}
1055 
1056 	ireq = inet_rsk(req);
1057 
1058 	if (sk_acceptq_is_full(sk))
1059 		goto out_overflow;
1060 
1061 	if (!dst) {
1062 		dst = inet6_csk_route_req(sk, &fl6, req);
1063 		if (!dst)
1064 			goto out;
1065 	}
1066 
1067 	newsk = tcp_create_openreq_child(sk, req, skb);
1068 	if (!newsk)
1069 		goto out_nonewsk;
1070 
1071 	/*
1072 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1073 	 * count here, tcp_create_openreq_child now does this for us, see the
1074 	 * comment in that function for the gory details. -acme
1075 	 */
1076 
1077 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1078 	__ip6_dst_store(newsk, dst, NULL, NULL);
1079 	inet6_sk_rx_dst_set(newsk, skb);
1080 
1081 	newtcp6sk = (struct tcp6_sock *)newsk;
1082 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1083 
1084 	newtp = tcp_sk(newsk);
1085 	newinet = inet_sk(newsk);
1086 	newnp = inet6_sk(newsk);
1087 
1088 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1089 
1090 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1091 	newnp->saddr = ireq->ir_v6_loc_addr;
1092 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1093 	newsk->sk_bound_dev_if = ireq->ir_iif;
1094 
1095 	ip6_set_txhash(newsk);
1096 
1097 	/* Now IPv6 options...
1098 
1099 	   First: no IPv4 options.
1100 	 */
1101 	newinet->inet_opt = NULL;
1102 	newnp->ipv6_ac_list = NULL;
1103 	newnp->ipv6_fl_list = NULL;
1104 
1105 	/* Clone RX bits */
1106 	newnp->rxopt.all = np->rxopt.all;
1107 
1108 	/* Clone pktoptions received with SYN */
1109 	newnp->pktoptions = NULL;
1110 	if (ireq->pktopts) {
1111 		newnp->pktoptions = skb_clone(ireq->pktopts,
1112 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1113 		consume_skb(ireq->pktopts);
1114 		ireq->pktopts = NULL;
1115 		if (newnp->pktoptions)
1116 			skb_set_owner_r(newnp->pktoptions, newsk);
1117 	}
1118 	newnp->opt	  = NULL;
1119 	newnp->mcast_oif  = tcp_v6_iif(skb);
1120 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1121 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1122 	if (np->repflow)
1123 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1124 
1125 	/* Clone native IPv6 options from listening socket (if any)
1126 
1127 	   Yes, keeping reference count would be much more clever,
1128 	   but we make one more one thing there: reattach optmem
1129 	   to newsk.
1130 	 */
1131 	if (np->opt)
1132 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1133 
1134 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1135 	if (newnp->opt)
1136 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1137 						     newnp->opt->opt_flen);
1138 
1139 	tcp_ca_openreq_child(newsk, dst);
1140 
1141 	tcp_sync_mss(newsk, dst_mtu(dst));
1142 	newtp->advmss = dst_metric_advmss(dst);
1143 	if (tcp_sk(sk)->rx_opt.user_mss &&
1144 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1145 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1146 
1147 	tcp_initialize_rcv_mss(newsk);
1148 
1149 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1150 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1151 
1152 #ifdef CONFIG_TCP_MD5SIG
1153 	/* Copy over the MD5 key from the original socket */
1154 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1155 	if (key) {
1156 		/* We're using one, so create a matching key
1157 		 * on the newsk structure. If we fail to get
1158 		 * memory, then we end up not copying the key
1159 		 * across. Shucks.
1160 		 */
1161 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1162 			       AF_INET6, key->key, key->keylen,
1163 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1164 	}
1165 #endif
1166 
1167 	if (__inet_inherit_port(sk, newsk) < 0) {
1168 		inet_csk_prepare_forced_close(newsk);
1169 		tcp_done(newsk);
1170 		goto out;
1171 	}
1172 	__inet_hash(newsk, NULL);
1173 
1174 	return newsk;
1175 
1176 out_overflow:
1177 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1178 out_nonewsk:
1179 	dst_release(dst);
1180 out:
1181 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1182 	return NULL;
1183 }
1184 
1185 /* The socket must have it's spinlock held when we get
1186  * here.
1187  *
1188  * We have a potential double-lock case here, so even when
1189  * doing backlog processing we use the BH locking scheme.
1190  * This is because we cannot sleep with the original spinlock
1191  * held.
1192  */
1193 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1194 {
1195 	struct ipv6_pinfo *np = inet6_sk(sk);
1196 	struct tcp_sock *tp;
1197 	struct sk_buff *opt_skb = NULL;
1198 
1199 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1200 	   goes to IPv4 receive handler and backlogged.
1201 	   From backlog it always goes here. Kerboom...
1202 	   Fortunately, tcp_rcv_established and rcv_established
1203 	   handle them correctly, but it is not case with
1204 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1205 	 */
1206 
1207 	if (skb->protocol == htons(ETH_P_IP))
1208 		return tcp_v4_do_rcv(sk, skb);
1209 
1210 	if (sk_filter(sk, skb))
1211 		goto discard;
1212 
1213 	/*
1214 	 *	socket locking is here for SMP purposes as backlog rcv
1215 	 *	is currently called with bh processing disabled.
1216 	 */
1217 
1218 	/* Do Stevens' IPV6_PKTOPTIONS.
1219 
1220 	   Yes, guys, it is the only place in our code, where we
1221 	   may make it not affecting IPv4.
1222 	   The rest of code is protocol independent,
1223 	   and I do not like idea to uglify IPv4.
1224 
1225 	   Actually, all the idea behind IPV6_PKTOPTIONS
1226 	   looks not very well thought. For now we latch
1227 	   options, received in the last packet, enqueued
1228 	   by tcp. Feel free to propose better solution.
1229 					       --ANK (980728)
1230 	 */
1231 	if (np->rxopt.all)
1232 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1233 
1234 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1235 		struct dst_entry *dst = sk->sk_rx_dst;
1236 
1237 		sock_rps_save_rxhash(sk, skb);
1238 		sk_mark_napi_id(sk, skb);
1239 		if (dst) {
1240 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1241 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1242 				dst_release(dst);
1243 				sk->sk_rx_dst = NULL;
1244 			}
1245 		}
1246 
1247 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1248 		if (opt_skb)
1249 			goto ipv6_pktoptions;
1250 		return 0;
1251 	}
1252 
1253 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1254 		goto csum_err;
1255 
1256 	if (sk->sk_state == TCP_LISTEN) {
1257 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1258 		if (!nsk)
1259 			goto discard;
1260 
1261 		/*
1262 		 * Queue it on the new socket if the new socket is active,
1263 		 * otherwise we just shortcircuit this and continue with
1264 		 * the new socket..
1265 		 */
1266 		if (nsk != sk) {
1267 			sock_rps_save_rxhash(nsk, skb);
1268 			sk_mark_napi_id(sk, skb);
1269 			if (tcp_child_process(sk, nsk, skb))
1270 				goto reset;
1271 			if (opt_skb)
1272 				__kfree_skb(opt_skb);
1273 			return 0;
1274 		}
1275 	} else
1276 		sock_rps_save_rxhash(sk, skb);
1277 
1278 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1279 		goto reset;
1280 	if (opt_skb)
1281 		goto ipv6_pktoptions;
1282 	return 0;
1283 
1284 reset:
1285 	tcp_v6_send_reset(sk, skb);
1286 discard:
1287 	if (opt_skb)
1288 		__kfree_skb(opt_skb);
1289 	kfree_skb(skb);
1290 	return 0;
1291 csum_err:
1292 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1293 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1294 	goto discard;
1295 
1296 
1297 ipv6_pktoptions:
1298 	/* Do you ask, what is it?
1299 
1300 	   1. skb was enqueued by tcp.
1301 	   2. skb is added to tail of read queue, rather than out of order.
1302 	   3. socket is not in passive state.
1303 	   4. Finally, it really contains options, which user wants to receive.
1304 	 */
1305 	tp = tcp_sk(sk);
1306 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1307 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1308 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1309 			np->mcast_oif = tcp_v6_iif(opt_skb);
1310 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1311 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1312 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1313 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1314 		if (np->repflow)
1315 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1316 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1317 			skb_set_owner_r(opt_skb, sk);
1318 			opt_skb = xchg(&np->pktoptions, opt_skb);
1319 		} else {
1320 			__kfree_skb(opt_skb);
1321 			opt_skb = xchg(&np->pktoptions, NULL);
1322 		}
1323 	}
1324 
1325 	kfree_skb(opt_skb);
1326 	return 0;
1327 }
1328 
1329 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1330 			   const struct tcphdr *th)
1331 {
1332 	/* This is tricky: we move IP6CB at its correct location into
1333 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1334 	 * _decode_session6() uses IP6CB().
1335 	 * barrier() makes sure compiler won't play aliasing games.
1336 	 */
1337 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1338 		sizeof(struct inet6_skb_parm));
1339 	barrier();
1340 
1341 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1342 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1343 				    skb->len - th->doff*4);
1344 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1345 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1346 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1347 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1348 	TCP_SKB_CB(skb)->sacked = 0;
1349 }
1350 
1351 static void tcp_v6_restore_cb(struct sk_buff *skb)
1352 {
1353 	/* We need to move header back to the beginning if xfrm6_policy_check()
1354 	 * and tcp_v6_fill_cb() are going to be called again.
1355 	 */
1356 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1357 		sizeof(struct inet6_skb_parm));
1358 }
1359 
1360 static int tcp_v6_rcv(struct sk_buff *skb)
1361 {
1362 	const struct tcphdr *th;
1363 	const struct ipv6hdr *hdr;
1364 	struct sock *sk;
1365 	int ret;
1366 	struct net *net = dev_net(skb->dev);
1367 
1368 	if (skb->pkt_type != PACKET_HOST)
1369 		goto discard_it;
1370 
1371 	/*
1372 	 *	Count it even if it's bad.
1373 	 */
1374 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1375 
1376 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1377 		goto discard_it;
1378 
1379 	th = tcp_hdr(skb);
1380 
1381 	if (th->doff < sizeof(struct tcphdr)/4)
1382 		goto bad_packet;
1383 	if (!pskb_may_pull(skb, th->doff*4))
1384 		goto discard_it;
1385 
1386 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1387 		goto csum_error;
1388 
1389 	th = tcp_hdr(skb);
1390 	hdr = ipv6_hdr(skb);
1391 
1392 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1393 				inet6_iif(skb));
1394 	if (!sk)
1395 		goto no_tcp_socket;
1396 
1397 process:
1398 	if (sk->sk_state == TCP_TIME_WAIT)
1399 		goto do_time_wait;
1400 
1401 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1402 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1403 		goto discard_and_relse;
1404 	}
1405 
1406 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1407 		goto discard_and_relse;
1408 
1409 	tcp_v6_fill_cb(skb, hdr, th);
1410 
1411 #ifdef CONFIG_TCP_MD5SIG
1412 	if (tcp_v6_inbound_md5_hash(sk, skb))
1413 		goto discard_and_relse;
1414 #endif
1415 
1416 	if (sk_filter(sk, skb))
1417 		goto discard_and_relse;
1418 
1419 	sk_incoming_cpu_update(sk);
1420 	skb->dev = NULL;
1421 
1422 	bh_lock_sock_nested(sk);
1423 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1424 	ret = 0;
1425 	if (!sock_owned_by_user(sk)) {
1426 		if (!tcp_prequeue(sk, skb))
1427 			ret = tcp_v6_do_rcv(sk, skb);
1428 	} else if (unlikely(sk_add_backlog(sk, skb,
1429 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1430 		bh_unlock_sock(sk);
1431 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1432 		goto discard_and_relse;
1433 	}
1434 	bh_unlock_sock(sk);
1435 
1436 	sock_put(sk);
1437 	return ret ? -1 : 0;
1438 
1439 no_tcp_socket:
1440 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1441 		goto discard_it;
1442 
1443 	tcp_v6_fill_cb(skb, hdr, th);
1444 
1445 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1446 csum_error:
1447 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1448 bad_packet:
1449 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1450 	} else {
1451 		tcp_v6_send_reset(NULL, skb);
1452 	}
1453 
1454 discard_it:
1455 	kfree_skb(skb);
1456 	return 0;
1457 
1458 discard_and_relse:
1459 	sock_put(sk);
1460 	goto discard_it;
1461 
1462 do_time_wait:
1463 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1464 		inet_twsk_put(inet_twsk(sk));
1465 		goto discard_it;
1466 	}
1467 
1468 	tcp_v6_fill_cb(skb, hdr, th);
1469 
1470 	if (skb->len < (th->doff<<2)) {
1471 		inet_twsk_put(inet_twsk(sk));
1472 		goto bad_packet;
1473 	}
1474 	if (tcp_checksum_complete(skb)) {
1475 		inet_twsk_put(inet_twsk(sk));
1476 		goto csum_error;
1477 	}
1478 
1479 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1480 	case TCP_TW_SYN:
1481 	{
1482 		struct sock *sk2;
1483 
1484 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1485 					    &ipv6_hdr(skb)->saddr, th->source,
1486 					    &ipv6_hdr(skb)->daddr,
1487 					    ntohs(th->dest), tcp_v6_iif(skb));
1488 		if (sk2) {
1489 			struct inet_timewait_sock *tw = inet_twsk(sk);
1490 			inet_twsk_deschedule(tw);
1491 			inet_twsk_put(tw);
1492 			sk = sk2;
1493 			tcp_v6_restore_cb(skb);
1494 			goto process;
1495 		}
1496 		/* Fall through to ACK */
1497 	}
1498 	case TCP_TW_ACK:
1499 		tcp_v6_timewait_ack(sk, skb);
1500 		break;
1501 	case TCP_TW_RST:
1502 		tcp_v6_restore_cb(skb);
1503 		goto no_tcp_socket;
1504 	case TCP_TW_SUCCESS:
1505 		;
1506 	}
1507 	goto discard_it;
1508 }
1509 
1510 static void tcp_v6_early_demux(struct sk_buff *skb)
1511 {
1512 	const struct ipv6hdr *hdr;
1513 	const struct tcphdr *th;
1514 	struct sock *sk;
1515 
1516 	if (skb->pkt_type != PACKET_HOST)
1517 		return;
1518 
1519 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1520 		return;
1521 
1522 	hdr = ipv6_hdr(skb);
1523 	th = tcp_hdr(skb);
1524 
1525 	if (th->doff < sizeof(struct tcphdr) / 4)
1526 		return;
1527 
1528 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1529 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1530 					&hdr->saddr, th->source,
1531 					&hdr->daddr, ntohs(th->dest),
1532 					inet6_iif(skb));
1533 	if (sk) {
1534 		skb->sk = sk;
1535 		skb->destructor = sock_edemux;
1536 		if (sk_fullsock(sk)) {
1537 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1538 
1539 			if (dst)
1540 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1541 			if (dst &&
1542 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1543 				skb_dst_set_noref(skb, dst);
1544 		}
1545 	}
1546 }
1547 
1548 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1549 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1550 	.twsk_unique	= tcp_twsk_unique,
1551 	.twsk_destructor = tcp_twsk_destructor,
1552 };
1553 
1554 static const struct inet_connection_sock_af_ops ipv6_specific = {
1555 	.queue_xmit	   = inet6_csk_xmit,
1556 	.send_check	   = tcp_v6_send_check,
1557 	.rebuild_header	   = inet6_sk_rebuild_header,
1558 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1559 	.conn_request	   = tcp_v6_conn_request,
1560 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1561 	.net_header_len	   = sizeof(struct ipv6hdr),
1562 	.net_frag_header_len = sizeof(struct frag_hdr),
1563 	.setsockopt	   = ipv6_setsockopt,
1564 	.getsockopt	   = ipv6_getsockopt,
1565 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1566 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1567 	.bind_conflict	   = inet6_csk_bind_conflict,
1568 #ifdef CONFIG_COMPAT
1569 	.compat_setsockopt = compat_ipv6_setsockopt,
1570 	.compat_getsockopt = compat_ipv6_getsockopt,
1571 #endif
1572 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1573 };
1574 
1575 #ifdef CONFIG_TCP_MD5SIG
1576 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1577 	.md5_lookup	=	tcp_v6_md5_lookup,
1578 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1579 	.md5_parse	=	tcp_v6_parse_md5_keys,
1580 };
1581 #endif
1582 
1583 /*
1584  *	TCP over IPv4 via INET6 API
1585  */
1586 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1587 	.queue_xmit	   = ip_queue_xmit,
1588 	.send_check	   = tcp_v4_send_check,
1589 	.rebuild_header	   = inet_sk_rebuild_header,
1590 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1591 	.conn_request	   = tcp_v6_conn_request,
1592 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1593 	.net_header_len	   = sizeof(struct iphdr),
1594 	.setsockopt	   = ipv6_setsockopt,
1595 	.getsockopt	   = ipv6_getsockopt,
1596 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1597 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1598 	.bind_conflict	   = inet6_csk_bind_conflict,
1599 #ifdef CONFIG_COMPAT
1600 	.compat_setsockopt = compat_ipv6_setsockopt,
1601 	.compat_getsockopt = compat_ipv6_getsockopt,
1602 #endif
1603 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1604 };
1605 
1606 #ifdef CONFIG_TCP_MD5SIG
1607 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1608 	.md5_lookup	=	tcp_v4_md5_lookup,
1609 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1610 	.md5_parse	=	tcp_v6_parse_md5_keys,
1611 };
1612 #endif
1613 
1614 /* NOTE: A lot of things set to zero explicitly by call to
1615  *       sk_alloc() so need not be done here.
1616  */
1617 static int tcp_v6_init_sock(struct sock *sk)
1618 {
1619 	struct inet_connection_sock *icsk = inet_csk(sk);
1620 
1621 	tcp_init_sock(sk);
1622 
1623 	icsk->icsk_af_ops = &ipv6_specific;
1624 
1625 #ifdef CONFIG_TCP_MD5SIG
1626 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1627 #endif
1628 
1629 	return 0;
1630 }
1631 
1632 static void tcp_v6_destroy_sock(struct sock *sk)
1633 {
1634 	tcp_v4_destroy_sock(sk);
1635 	inet6_destroy_sock(sk);
1636 }
1637 
1638 #ifdef CONFIG_PROC_FS
1639 /* Proc filesystem TCPv6 sock list dumping. */
1640 static void get_openreq6(struct seq_file *seq,
1641 			 struct request_sock *req, int i, kuid_t uid)
1642 {
1643 	long ttd = req->rsk_timer.expires - jiffies;
1644 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1645 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1646 
1647 	if (ttd < 0)
1648 		ttd = 0;
1649 
1650 	seq_printf(seq,
1651 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1652 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1653 		   i,
1654 		   src->s6_addr32[0], src->s6_addr32[1],
1655 		   src->s6_addr32[2], src->s6_addr32[3],
1656 		   inet_rsk(req)->ir_num,
1657 		   dest->s6_addr32[0], dest->s6_addr32[1],
1658 		   dest->s6_addr32[2], dest->s6_addr32[3],
1659 		   ntohs(inet_rsk(req)->ir_rmt_port),
1660 		   TCP_SYN_RECV,
1661 		   0, 0, /* could print option size, but that is af dependent. */
1662 		   1,   /* timers active (only the expire timer) */
1663 		   jiffies_to_clock_t(ttd),
1664 		   req->num_timeout,
1665 		   from_kuid_munged(seq_user_ns(seq), uid),
1666 		   0,  /* non standard timer */
1667 		   0, /* open_requests have no inode */
1668 		   0, req);
1669 }
1670 
1671 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1672 {
1673 	const struct in6_addr *dest, *src;
1674 	__u16 destp, srcp;
1675 	int timer_active;
1676 	unsigned long timer_expires;
1677 	const struct inet_sock *inet = inet_sk(sp);
1678 	const struct tcp_sock *tp = tcp_sk(sp);
1679 	const struct inet_connection_sock *icsk = inet_csk(sp);
1680 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1681 
1682 	dest  = &sp->sk_v6_daddr;
1683 	src   = &sp->sk_v6_rcv_saddr;
1684 	destp = ntohs(inet->inet_dport);
1685 	srcp  = ntohs(inet->inet_sport);
1686 
1687 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1688 		timer_active	= 1;
1689 		timer_expires	= icsk->icsk_timeout;
1690 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1691 		timer_active	= 4;
1692 		timer_expires	= icsk->icsk_timeout;
1693 	} else if (timer_pending(&sp->sk_timer)) {
1694 		timer_active	= 2;
1695 		timer_expires	= sp->sk_timer.expires;
1696 	} else {
1697 		timer_active	= 0;
1698 		timer_expires = jiffies;
1699 	}
1700 
1701 	seq_printf(seq,
1702 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1703 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1704 		   i,
1705 		   src->s6_addr32[0], src->s6_addr32[1],
1706 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1707 		   dest->s6_addr32[0], dest->s6_addr32[1],
1708 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1709 		   sp->sk_state,
1710 		   tp->write_seq-tp->snd_una,
1711 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1712 		   timer_active,
1713 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1714 		   icsk->icsk_retransmits,
1715 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1716 		   icsk->icsk_probes_out,
1717 		   sock_i_ino(sp),
1718 		   atomic_read(&sp->sk_refcnt), sp,
1719 		   jiffies_to_clock_t(icsk->icsk_rto),
1720 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1721 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1722 		   tp->snd_cwnd,
1723 		   sp->sk_state == TCP_LISTEN ?
1724 			(fastopenq ? fastopenq->max_qlen : 0) :
1725 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1726 		   );
1727 }
1728 
1729 static void get_timewait6_sock(struct seq_file *seq,
1730 			       struct inet_timewait_sock *tw, int i)
1731 {
1732 	long delta = tw->tw_timer.expires - jiffies;
1733 	const struct in6_addr *dest, *src;
1734 	__u16 destp, srcp;
1735 
1736 	dest = &tw->tw_v6_daddr;
1737 	src  = &tw->tw_v6_rcv_saddr;
1738 	destp = ntohs(tw->tw_dport);
1739 	srcp  = ntohs(tw->tw_sport);
1740 
1741 	seq_printf(seq,
1742 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1743 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1744 		   i,
1745 		   src->s6_addr32[0], src->s6_addr32[1],
1746 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1747 		   dest->s6_addr32[0], dest->s6_addr32[1],
1748 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1749 		   tw->tw_substate, 0, 0,
1750 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1751 		   atomic_read(&tw->tw_refcnt), tw);
1752 }
1753 
1754 static int tcp6_seq_show(struct seq_file *seq, void *v)
1755 {
1756 	struct tcp_iter_state *st;
1757 	struct sock *sk = v;
1758 
1759 	if (v == SEQ_START_TOKEN) {
1760 		seq_puts(seq,
1761 			 "  sl  "
1762 			 "local_address                         "
1763 			 "remote_address                        "
1764 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1765 			 "   uid  timeout inode\n");
1766 		goto out;
1767 	}
1768 	st = seq->private;
1769 
1770 	switch (st->state) {
1771 	case TCP_SEQ_STATE_LISTENING:
1772 	case TCP_SEQ_STATE_ESTABLISHED:
1773 		if (sk->sk_state == TCP_TIME_WAIT)
1774 			get_timewait6_sock(seq, v, st->num);
1775 		else
1776 			get_tcp6_sock(seq, v, st->num);
1777 		break;
1778 	case TCP_SEQ_STATE_OPENREQ:
1779 		get_openreq6(seq, v, st->num, st->uid);
1780 		break;
1781 	}
1782 out:
1783 	return 0;
1784 }
1785 
1786 static const struct file_operations tcp6_afinfo_seq_fops = {
1787 	.owner   = THIS_MODULE,
1788 	.open    = tcp_seq_open,
1789 	.read    = seq_read,
1790 	.llseek  = seq_lseek,
1791 	.release = seq_release_net
1792 };
1793 
1794 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1795 	.name		= "tcp6",
1796 	.family		= AF_INET6,
1797 	.seq_fops	= &tcp6_afinfo_seq_fops,
1798 	.seq_ops	= {
1799 		.show		= tcp6_seq_show,
1800 	},
1801 };
1802 
1803 int __net_init tcp6_proc_init(struct net *net)
1804 {
1805 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1806 }
1807 
1808 void tcp6_proc_exit(struct net *net)
1809 {
1810 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1811 }
1812 #endif
1813 
1814 static void tcp_v6_clear_sk(struct sock *sk, int size)
1815 {
1816 	struct inet_sock *inet = inet_sk(sk);
1817 
1818 	/* we do not want to clear pinet6 field, because of RCU lookups */
1819 	sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1820 
1821 	size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1822 	memset(&inet->pinet6 + 1, 0, size);
1823 }
1824 
1825 struct proto tcpv6_prot = {
1826 	.name			= "TCPv6",
1827 	.owner			= THIS_MODULE,
1828 	.close			= tcp_close,
1829 	.connect		= tcp_v6_connect,
1830 	.disconnect		= tcp_disconnect,
1831 	.accept			= inet_csk_accept,
1832 	.ioctl			= tcp_ioctl,
1833 	.init			= tcp_v6_init_sock,
1834 	.destroy		= tcp_v6_destroy_sock,
1835 	.shutdown		= tcp_shutdown,
1836 	.setsockopt		= tcp_setsockopt,
1837 	.getsockopt		= tcp_getsockopt,
1838 	.recvmsg		= tcp_recvmsg,
1839 	.sendmsg		= tcp_sendmsg,
1840 	.sendpage		= tcp_sendpage,
1841 	.backlog_rcv		= tcp_v6_do_rcv,
1842 	.release_cb		= tcp_release_cb,
1843 	.hash			= inet_hash,
1844 	.unhash			= inet_unhash,
1845 	.get_port		= inet_csk_get_port,
1846 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1847 	.stream_memory_free	= tcp_stream_memory_free,
1848 	.sockets_allocated	= &tcp_sockets_allocated,
1849 	.memory_allocated	= &tcp_memory_allocated,
1850 	.memory_pressure	= &tcp_memory_pressure,
1851 	.orphan_count		= &tcp_orphan_count,
1852 	.sysctl_mem		= sysctl_tcp_mem,
1853 	.sysctl_wmem		= sysctl_tcp_wmem,
1854 	.sysctl_rmem		= sysctl_tcp_rmem,
1855 	.max_header		= MAX_TCP_HEADER,
1856 	.obj_size		= sizeof(struct tcp6_sock),
1857 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1858 	.twsk_prot		= &tcp6_timewait_sock_ops,
1859 	.rsk_prot		= &tcp6_request_sock_ops,
1860 	.h.hashinfo		= &tcp_hashinfo,
1861 	.no_autobind		= true,
1862 #ifdef CONFIG_COMPAT
1863 	.compat_setsockopt	= compat_tcp_setsockopt,
1864 	.compat_getsockopt	= compat_tcp_getsockopt,
1865 #endif
1866 #ifdef CONFIG_MEMCG_KMEM
1867 	.proto_cgroup		= tcp_proto_cgroup,
1868 #endif
1869 	.clear_sk		= tcp_v6_clear_sk,
1870 };
1871 
1872 static const struct inet6_protocol tcpv6_protocol = {
1873 	.early_demux	=	tcp_v6_early_demux,
1874 	.handler	=	tcp_v6_rcv,
1875 	.err_handler	=	tcp_v6_err,
1876 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1877 };
1878 
1879 static struct inet_protosw tcpv6_protosw = {
1880 	.type		=	SOCK_STREAM,
1881 	.protocol	=	IPPROTO_TCP,
1882 	.prot		=	&tcpv6_prot,
1883 	.ops		=	&inet6_stream_ops,
1884 	.flags		=	INET_PROTOSW_PERMANENT |
1885 				INET_PROTOSW_ICSK,
1886 };
1887 
1888 static int __net_init tcpv6_net_init(struct net *net)
1889 {
1890 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1891 				    SOCK_RAW, IPPROTO_TCP, net);
1892 }
1893 
1894 static void __net_exit tcpv6_net_exit(struct net *net)
1895 {
1896 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1897 }
1898 
1899 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1900 {
1901 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1902 }
1903 
1904 static struct pernet_operations tcpv6_net_ops = {
1905 	.init	    = tcpv6_net_init,
1906 	.exit	    = tcpv6_net_exit,
1907 	.exit_batch = tcpv6_net_exit_batch,
1908 };
1909 
1910 int __init tcpv6_init(void)
1911 {
1912 	int ret;
1913 
1914 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1915 	if (ret)
1916 		goto out;
1917 
1918 	/* register inet6 protocol */
1919 	ret = inet6_register_protosw(&tcpv6_protosw);
1920 	if (ret)
1921 		goto out_tcpv6_protocol;
1922 
1923 	ret = register_pernet_subsys(&tcpv6_net_ops);
1924 	if (ret)
1925 		goto out_tcpv6_protosw;
1926 out:
1927 	return ret;
1928 
1929 out_tcpv6_protosw:
1930 	inet6_unregister_protosw(&tcpv6_protosw);
1931 out_tcpv6_protocol:
1932 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1933 	goto out;
1934 }
1935 
1936 void tcpv6_exit(void)
1937 {
1938 	unregister_pernet_subsys(&tcpv6_net_ops);
1939 	inet6_unregister_protosw(&tcpv6_protosw);
1940 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1941 }
1942