xref: /linux/net/ipv6/tcp_ipv6.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 
67 #include <asm/uaccess.h>
68 
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71 
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74 
75 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 				      struct request_sock *req);
78 
79 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 
81 static const struct inet_connection_sock_af_ops ipv6_mapped;
82 static const struct inet_connection_sock_af_ops ipv6_specific;
83 #ifdef CONFIG_TCP_MD5SIG
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
85 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 #else
87 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
88 						   const struct in6_addr *addr)
89 {
90 	return NULL;
91 }
92 #endif
93 
94 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95 {
96 	struct dst_entry *dst = skb_dst(skb);
97 	const struct rt6_info *rt = (const struct rt6_info *)dst;
98 
99 	dst_hold(dst);
100 	sk->sk_rx_dst = dst;
101 	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 	if (rt->rt6i_node)
103 		inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
105 
106 static void tcp_v6_hash(struct sock *sk)
107 {
108 	if (sk->sk_state != TCP_CLOSE) {
109 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
110 			tcp_prot.hash(sk);
111 			return;
112 		}
113 		local_bh_disable();
114 		__inet6_hash(sk, NULL);
115 		local_bh_enable();
116 	}
117 }
118 
119 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 {
121 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
122 					    ipv6_hdr(skb)->saddr.s6_addr32,
123 					    tcp_hdr(skb)->dest,
124 					    tcp_hdr(skb)->source);
125 }
126 
127 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
128 			  int addr_len)
129 {
130 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
131 	struct inet_sock *inet = inet_sk(sk);
132 	struct inet_connection_sock *icsk = inet_csk(sk);
133 	struct ipv6_pinfo *np = inet6_sk(sk);
134 	struct tcp_sock *tp = tcp_sk(sk);
135 	struct in6_addr *saddr = NULL, *final_p, final;
136 	struct rt6_info *rt;
137 	struct flowi6 fl6;
138 	struct dst_entry *dst;
139 	int addr_type;
140 	int err;
141 
142 	if (addr_len < SIN6_LEN_RFC2133)
143 		return -EINVAL;
144 
145 	if (usin->sin6_family != AF_INET6)
146 		return -EAFNOSUPPORT;
147 
148 	memset(&fl6, 0, sizeof(fl6));
149 
150 	if (np->sndflow) {
151 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
152 		IP6_ECN_flow_init(fl6.flowlabel);
153 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
154 			struct ip6_flowlabel *flowlabel;
155 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
156 			if (flowlabel == NULL)
157 				return -EINVAL;
158 			usin->sin6_addr = flowlabel->dst;
159 			fl6_sock_release(flowlabel);
160 		}
161 	}
162 
163 	/*
164 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
165 	 */
166 
167 	if(ipv6_addr_any(&usin->sin6_addr))
168 		usin->sin6_addr.s6_addr[15] = 0x1;
169 
170 	addr_type = ipv6_addr_type(&usin->sin6_addr);
171 
172 	if(addr_type & IPV6_ADDR_MULTICAST)
173 		return -ENETUNREACH;
174 
175 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 		if (addr_len >= sizeof(struct sockaddr_in6) &&
177 		    usin->sin6_scope_id) {
178 			/* If interface is set while binding, indices
179 			 * must coincide.
180 			 */
181 			if (sk->sk_bound_dev_if &&
182 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
183 				return -EINVAL;
184 
185 			sk->sk_bound_dev_if = usin->sin6_scope_id;
186 		}
187 
188 		/* Connect to link-local address requires an interface */
189 		if (!sk->sk_bound_dev_if)
190 			return -EINVAL;
191 	}
192 
193 	if (tp->rx_opt.ts_recent_stamp &&
194 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
195 		tp->rx_opt.ts_recent = 0;
196 		tp->rx_opt.ts_recent_stamp = 0;
197 		tp->write_seq = 0;
198 	}
199 
200 	np->daddr = usin->sin6_addr;
201 	np->flow_label = fl6.flowlabel;
202 
203 	/*
204 	 *	TCP over IPv4
205 	 */
206 
207 	if (addr_type == IPV6_ADDR_MAPPED) {
208 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 		struct sockaddr_in sin;
210 
211 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
212 
213 		if (__ipv6_only_sock(sk))
214 			return -ENETUNREACH;
215 
216 		sin.sin_family = AF_INET;
217 		sin.sin_port = usin->sin6_port;
218 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
219 
220 		icsk->icsk_af_ops = &ipv6_mapped;
221 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 #endif
225 
226 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 
228 		if (err) {
229 			icsk->icsk_ext_hdr_len = exthdrlen;
230 			icsk->icsk_af_ops = &ipv6_specific;
231 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 			tp->af_specific = &tcp_sock_ipv6_specific;
234 #endif
235 			goto failure;
236 		} else {
237 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 					       &np->rcv_saddr);
240 		}
241 
242 		return err;
243 	}
244 
245 	if (!ipv6_addr_any(&np->rcv_saddr))
246 		saddr = &np->rcv_saddr;
247 
248 	fl6.flowi6_proto = IPPROTO_TCP;
249 	fl6.daddr = np->daddr;
250 	fl6.saddr = saddr ? *saddr : np->saddr;
251 	fl6.flowi6_oif = sk->sk_bound_dev_if;
252 	fl6.flowi6_mark = sk->sk_mark;
253 	fl6.fl6_dport = usin->sin6_port;
254 	fl6.fl6_sport = inet->inet_sport;
255 
256 	final_p = fl6_update_dst(&fl6, np->opt, &final);
257 
258 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 
260 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
261 	if (IS_ERR(dst)) {
262 		err = PTR_ERR(dst);
263 		goto failure;
264 	}
265 
266 	if (saddr == NULL) {
267 		saddr = &fl6.saddr;
268 		np->rcv_saddr = *saddr;
269 	}
270 
271 	/* set the source address */
272 	np->saddr = *saddr;
273 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 
275 	sk->sk_gso_type = SKB_GSO_TCPV6;
276 	__ip6_dst_store(sk, dst, NULL, NULL);
277 
278 	rt = (struct rt6_info *) dst;
279 	if (tcp_death_row.sysctl_tw_recycle &&
280 	    !tp->rx_opt.ts_recent_stamp &&
281 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
282 		tcp_fetch_timewait_stamp(sk, dst);
283 
284 	icsk->icsk_ext_hdr_len = 0;
285 	if (np->opt)
286 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 					  np->opt->opt_nflen);
288 
289 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
290 
291 	inet->inet_dport = usin->sin6_port;
292 
293 	tcp_set_state(sk, TCP_SYN_SENT);
294 	err = inet6_hash_connect(&tcp_death_row, sk);
295 	if (err)
296 		goto late_failure;
297 
298 	if (!tp->write_seq && likely(!tp->repair))
299 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 							     np->daddr.s6_addr32,
301 							     inet->inet_sport,
302 							     inet->inet_dport);
303 
304 	err = tcp_connect(sk);
305 	if (err)
306 		goto late_failure;
307 
308 	return 0;
309 
310 late_failure:
311 	tcp_set_state(sk, TCP_CLOSE);
312 	__sk_dst_reset(sk);
313 failure:
314 	inet->inet_dport = 0;
315 	sk->sk_route_caps = 0;
316 	return err;
317 }
318 
319 static void tcp_v6_mtu_reduced(struct sock *sk)
320 {
321 	struct dst_entry *dst;
322 
323 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
324 		return;
325 
326 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
327 	if (!dst)
328 		return;
329 
330 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
331 		tcp_sync_mss(sk, dst_mtu(dst));
332 		tcp_simple_retransmit(sk);
333 	}
334 }
335 
336 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
337 		u8 type, u8 code, int offset, __be32 info)
338 {
339 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
340 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
341 	struct ipv6_pinfo *np;
342 	struct sock *sk;
343 	int err;
344 	struct tcp_sock *tp;
345 	__u32 seq;
346 	struct net *net = dev_net(skb->dev);
347 
348 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
349 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
350 
351 	if (sk == NULL) {
352 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
353 				   ICMP6_MIB_INERRORS);
354 		return;
355 	}
356 
357 	if (sk->sk_state == TCP_TIME_WAIT) {
358 		inet_twsk_put(inet_twsk(sk));
359 		return;
360 	}
361 
362 	bh_lock_sock(sk);
363 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
364 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
365 
366 	if (sk->sk_state == TCP_CLOSE)
367 		goto out;
368 
369 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
370 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
371 		goto out;
372 	}
373 
374 	tp = tcp_sk(sk);
375 	seq = ntohl(th->seq);
376 	if (sk->sk_state != TCP_LISTEN &&
377 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
378 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
379 		goto out;
380 	}
381 
382 	np = inet6_sk(sk);
383 
384 	if (type == NDISC_REDIRECT) {
385 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
386 
387 		if (dst)
388 			dst->ops->redirect(dst, sk, skb);
389 	}
390 
391 	if (type == ICMPV6_PKT_TOOBIG) {
392 		tp->mtu_info = ntohl(info);
393 		if (!sock_owned_by_user(sk))
394 			tcp_v6_mtu_reduced(sk);
395 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
396 					   &tp->tsq_flags))
397 			sock_hold(sk);
398 		goto out;
399 	}
400 
401 	icmpv6_err_convert(type, code, &err);
402 
403 	/* Might be for an request_sock */
404 	switch (sk->sk_state) {
405 		struct request_sock *req, **prev;
406 	case TCP_LISTEN:
407 		if (sock_owned_by_user(sk))
408 			goto out;
409 
410 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
411 					   &hdr->saddr, inet6_iif(skb));
412 		if (!req)
413 			goto out;
414 
415 		/* ICMPs are not backlogged, hence we cannot get
416 		 * an established socket here.
417 		 */
418 		WARN_ON(req->sk != NULL);
419 
420 		if (seq != tcp_rsk(req)->snt_isn) {
421 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
422 			goto out;
423 		}
424 
425 		inet_csk_reqsk_queue_drop(sk, req, prev);
426 		goto out;
427 
428 	case TCP_SYN_SENT:
429 	case TCP_SYN_RECV:  /* Cannot happen.
430 			       It can, it SYNs are crossed. --ANK */
431 		if (!sock_owned_by_user(sk)) {
432 			sk->sk_err = err;
433 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
434 
435 			tcp_done(sk);
436 		} else
437 			sk->sk_err_soft = err;
438 		goto out;
439 	}
440 
441 	if (!sock_owned_by_user(sk) && np->recverr) {
442 		sk->sk_err = err;
443 		sk->sk_error_report(sk);
444 	} else
445 		sk->sk_err_soft = err;
446 
447 out:
448 	bh_unlock_sock(sk);
449 	sock_put(sk);
450 }
451 
452 
453 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
454 			      struct flowi6 *fl6,
455 			      struct request_sock *req,
456 			      struct request_values *rvp,
457 			      u16 queue_mapping)
458 {
459 	struct inet6_request_sock *treq = inet6_rsk(req);
460 	struct ipv6_pinfo *np = inet6_sk(sk);
461 	struct sk_buff * skb;
462 	int err = -ENOMEM;
463 
464 	/* First, grab a route. */
465 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
466 		goto done;
467 
468 	skb = tcp_make_synack(sk, dst, req, rvp, NULL);
469 
470 	if (skb) {
471 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
472 
473 		fl6->daddr = treq->rmt_addr;
474 		skb_set_queue_mapping(skb, queue_mapping);
475 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
476 		err = net_xmit_eval(err);
477 	}
478 
479 done:
480 	return err;
481 }
482 
483 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
484 			     struct request_values *rvp)
485 {
486 	struct flowi6 fl6;
487 	int res;
488 
489 	res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
490 	if (!res)
491 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
492 	return res;
493 }
494 
495 static void tcp_v6_reqsk_destructor(struct request_sock *req)
496 {
497 	kfree_skb(inet6_rsk(req)->pktopts);
498 }
499 
500 #ifdef CONFIG_TCP_MD5SIG
501 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
502 						   const struct in6_addr *addr)
503 {
504 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
505 }
506 
507 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
508 						struct sock *addr_sk)
509 {
510 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
511 }
512 
513 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
514 						      struct request_sock *req)
515 {
516 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
517 }
518 
519 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
520 				  int optlen)
521 {
522 	struct tcp_md5sig cmd;
523 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
524 
525 	if (optlen < sizeof(cmd))
526 		return -EINVAL;
527 
528 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
529 		return -EFAULT;
530 
531 	if (sin6->sin6_family != AF_INET6)
532 		return -EINVAL;
533 
534 	if (!cmd.tcpm_keylen) {
535 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
536 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
537 					      AF_INET);
538 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
539 				      AF_INET6);
540 	}
541 
542 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
543 		return -EINVAL;
544 
545 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
546 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
547 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
548 
549 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
550 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
551 }
552 
553 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
554 					const struct in6_addr *daddr,
555 					const struct in6_addr *saddr, int nbytes)
556 {
557 	struct tcp6_pseudohdr *bp;
558 	struct scatterlist sg;
559 
560 	bp = &hp->md5_blk.ip6;
561 	/* 1. TCP pseudo-header (RFC2460) */
562 	bp->saddr = *saddr;
563 	bp->daddr = *daddr;
564 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
565 	bp->len = cpu_to_be32(nbytes);
566 
567 	sg_init_one(&sg, bp, sizeof(*bp));
568 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
569 }
570 
571 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
572 			       const struct in6_addr *daddr, struct in6_addr *saddr,
573 			       const struct tcphdr *th)
574 {
575 	struct tcp_md5sig_pool *hp;
576 	struct hash_desc *desc;
577 
578 	hp = tcp_get_md5sig_pool();
579 	if (!hp)
580 		goto clear_hash_noput;
581 	desc = &hp->md5_desc;
582 
583 	if (crypto_hash_init(desc))
584 		goto clear_hash;
585 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
586 		goto clear_hash;
587 	if (tcp_md5_hash_header(hp, th))
588 		goto clear_hash;
589 	if (tcp_md5_hash_key(hp, key))
590 		goto clear_hash;
591 	if (crypto_hash_final(desc, md5_hash))
592 		goto clear_hash;
593 
594 	tcp_put_md5sig_pool();
595 	return 0;
596 
597 clear_hash:
598 	tcp_put_md5sig_pool();
599 clear_hash_noput:
600 	memset(md5_hash, 0, 16);
601 	return 1;
602 }
603 
604 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
605 			       const struct sock *sk,
606 			       const struct request_sock *req,
607 			       const struct sk_buff *skb)
608 {
609 	const struct in6_addr *saddr, *daddr;
610 	struct tcp_md5sig_pool *hp;
611 	struct hash_desc *desc;
612 	const struct tcphdr *th = tcp_hdr(skb);
613 
614 	if (sk) {
615 		saddr = &inet6_sk(sk)->saddr;
616 		daddr = &inet6_sk(sk)->daddr;
617 	} else if (req) {
618 		saddr = &inet6_rsk(req)->loc_addr;
619 		daddr = &inet6_rsk(req)->rmt_addr;
620 	} else {
621 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
622 		saddr = &ip6h->saddr;
623 		daddr = &ip6h->daddr;
624 	}
625 
626 	hp = tcp_get_md5sig_pool();
627 	if (!hp)
628 		goto clear_hash_noput;
629 	desc = &hp->md5_desc;
630 
631 	if (crypto_hash_init(desc))
632 		goto clear_hash;
633 
634 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
635 		goto clear_hash;
636 	if (tcp_md5_hash_header(hp, th))
637 		goto clear_hash;
638 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
639 		goto clear_hash;
640 	if (tcp_md5_hash_key(hp, key))
641 		goto clear_hash;
642 	if (crypto_hash_final(desc, md5_hash))
643 		goto clear_hash;
644 
645 	tcp_put_md5sig_pool();
646 	return 0;
647 
648 clear_hash:
649 	tcp_put_md5sig_pool();
650 clear_hash_noput:
651 	memset(md5_hash, 0, 16);
652 	return 1;
653 }
654 
655 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
656 {
657 	const __u8 *hash_location = NULL;
658 	struct tcp_md5sig_key *hash_expected;
659 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
660 	const struct tcphdr *th = tcp_hdr(skb);
661 	int genhash;
662 	u8 newhash[16];
663 
664 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
665 	hash_location = tcp_parse_md5sig_option(th);
666 
667 	/* We've parsed the options - do we have a hash? */
668 	if (!hash_expected && !hash_location)
669 		return 0;
670 
671 	if (hash_expected && !hash_location) {
672 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
673 		return 1;
674 	}
675 
676 	if (!hash_expected && hash_location) {
677 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
678 		return 1;
679 	}
680 
681 	/* check the signature */
682 	genhash = tcp_v6_md5_hash_skb(newhash,
683 				      hash_expected,
684 				      NULL, NULL, skb);
685 
686 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
687 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
688 				     genhash ? "failed" : "mismatch",
689 				     &ip6h->saddr, ntohs(th->source),
690 				     &ip6h->daddr, ntohs(th->dest));
691 		return 1;
692 	}
693 	return 0;
694 }
695 #endif
696 
697 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
698 	.family		=	AF_INET6,
699 	.obj_size	=	sizeof(struct tcp6_request_sock),
700 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
701 	.send_ack	=	tcp_v6_reqsk_send_ack,
702 	.destructor	=	tcp_v6_reqsk_destructor,
703 	.send_reset	=	tcp_v6_send_reset,
704 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
705 };
706 
707 #ifdef CONFIG_TCP_MD5SIG
708 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
709 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
710 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
711 };
712 #endif
713 
714 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
715 				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
716 {
717 	const struct tcphdr *th = tcp_hdr(skb);
718 	struct tcphdr *t1;
719 	struct sk_buff *buff;
720 	struct flowi6 fl6;
721 	struct net *net = dev_net(skb_dst(skb)->dev);
722 	struct sock *ctl_sk = net->ipv6.tcp_sk;
723 	unsigned int tot_len = sizeof(struct tcphdr);
724 	struct dst_entry *dst;
725 	__be32 *topt;
726 
727 	if (ts)
728 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
729 #ifdef CONFIG_TCP_MD5SIG
730 	if (key)
731 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
732 #endif
733 
734 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
735 			 GFP_ATOMIC);
736 	if (buff == NULL)
737 		return;
738 
739 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
740 
741 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
742 	skb_reset_transport_header(buff);
743 
744 	/* Swap the send and the receive. */
745 	memset(t1, 0, sizeof(*t1));
746 	t1->dest = th->source;
747 	t1->source = th->dest;
748 	t1->doff = tot_len / 4;
749 	t1->seq = htonl(seq);
750 	t1->ack_seq = htonl(ack);
751 	t1->ack = !rst || !th->ack;
752 	t1->rst = rst;
753 	t1->window = htons(win);
754 
755 	topt = (__be32 *)(t1 + 1);
756 
757 	if (ts) {
758 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
759 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
760 		*topt++ = htonl(tcp_time_stamp);
761 		*topt++ = htonl(ts);
762 	}
763 
764 #ifdef CONFIG_TCP_MD5SIG
765 	if (key) {
766 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
767 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
768 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
769 				    &ipv6_hdr(skb)->saddr,
770 				    &ipv6_hdr(skb)->daddr, t1);
771 	}
772 #endif
773 
774 	memset(&fl6, 0, sizeof(fl6));
775 	fl6.daddr = ipv6_hdr(skb)->saddr;
776 	fl6.saddr = ipv6_hdr(skb)->daddr;
777 
778 	buff->ip_summed = CHECKSUM_PARTIAL;
779 	buff->csum = 0;
780 
781 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
782 
783 	fl6.flowi6_proto = IPPROTO_TCP;
784 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
785 		fl6.flowi6_oif = inet6_iif(skb);
786 	fl6.fl6_dport = t1->dest;
787 	fl6.fl6_sport = t1->source;
788 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
789 
790 	/* Pass a socket to ip6_dst_lookup either it is for RST
791 	 * Underlying function will use this to retrieve the network
792 	 * namespace
793 	 */
794 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
795 	if (!IS_ERR(dst)) {
796 		skb_dst_set(buff, dst);
797 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
798 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
799 		if (rst)
800 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
801 		return;
802 	}
803 
804 	kfree_skb(buff);
805 }
806 
807 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
808 {
809 	const struct tcphdr *th = tcp_hdr(skb);
810 	u32 seq = 0, ack_seq = 0;
811 	struct tcp_md5sig_key *key = NULL;
812 #ifdef CONFIG_TCP_MD5SIG
813 	const __u8 *hash_location = NULL;
814 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
815 	unsigned char newhash[16];
816 	int genhash;
817 	struct sock *sk1 = NULL;
818 #endif
819 
820 	if (th->rst)
821 		return;
822 
823 	if (!ipv6_unicast_destination(skb))
824 		return;
825 
826 #ifdef CONFIG_TCP_MD5SIG
827 	hash_location = tcp_parse_md5sig_option(th);
828 	if (!sk && hash_location) {
829 		/*
830 		 * active side is lost. Try to find listening socket through
831 		 * source port, and then find md5 key through listening socket.
832 		 * we are not loose security here:
833 		 * Incoming packet is checked with md5 hash with finding key,
834 		 * no RST generated if md5 hash doesn't match.
835 		 */
836 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
837 					   &tcp_hashinfo, &ipv6h->daddr,
838 					   ntohs(th->source), inet6_iif(skb));
839 		if (!sk1)
840 			return;
841 
842 		rcu_read_lock();
843 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
844 		if (!key)
845 			goto release_sk1;
846 
847 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
848 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
849 			goto release_sk1;
850 	} else {
851 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
852 	}
853 #endif
854 
855 	if (th->ack)
856 		seq = ntohl(th->ack_seq);
857 	else
858 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
859 			  (th->doff << 2);
860 
861 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
862 
863 #ifdef CONFIG_TCP_MD5SIG
864 release_sk1:
865 	if (sk1) {
866 		rcu_read_unlock();
867 		sock_put(sk1);
868 	}
869 #endif
870 }
871 
872 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
873 			    struct tcp_md5sig_key *key, u8 tclass)
874 {
875 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
876 }
877 
878 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
879 {
880 	struct inet_timewait_sock *tw = inet_twsk(sk);
881 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
882 
883 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
884 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
885 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
886 			tw->tw_tclass);
887 
888 	inet_twsk_put(tw);
889 }
890 
891 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
892 				  struct request_sock *req)
893 {
894 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
895 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
896 }
897 
898 
899 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
900 {
901 	struct request_sock *req, **prev;
902 	const struct tcphdr *th = tcp_hdr(skb);
903 	struct sock *nsk;
904 
905 	/* Find possible connection requests. */
906 	req = inet6_csk_search_req(sk, &prev, th->source,
907 				   &ipv6_hdr(skb)->saddr,
908 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
909 	if (req)
910 		return tcp_check_req(sk, skb, req, prev, false);
911 
912 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
913 			&ipv6_hdr(skb)->saddr, th->source,
914 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
915 
916 	if (nsk) {
917 		if (nsk->sk_state != TCP_TIME_WAIT) {
918 			bh_lock_sock(nsk);
919 			return nsk;
920 		}
921 		inet_twsk_put(inet_twsk(nsk));
922 		return NULL;
923 	}
924 
925 #ifdef CONFIG_SYN_COOKIES
926 	if (!th->syn)
927 		sk = cookie_v6_check(sk, skb);
928 #endif
929 	return sk;
930 }
931 
932 /* FIXME: this is substantially similar to the ipv4 code.
933  * Can some kind of merge be done? -- erics
934  */
935 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
936 {
937 	struct tcp_extend_values tmp_ext;
938 	struct tcp_options_received tmp_opt;
939 	const u8 *hash_location;
940 	struct request_sock *req;
941 	struct inet6_request_sock *treq;
942 	struct ipv6_pinfo *np = inet6_sk(sk);
943 	struct tcp_sock *tp = tcp_sk(sk);
944 	__u32 isn = TCP_SKB_CB(skb)->when;
945 	struct dst_entry *dst = NULL;
946 	struct flowi6 fl6;
947 	bool want_cookie = false;
948 
949 	if (skb->protocol == htons(ETH_P_IP))
950 		return tcp_v4_conn_request(sk, skb);
951 
952 	if (!ipv6_unicast_destination(skb))
953 		goto drop;
954 
955 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
956 		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
957 		if (!want_cookie)
958 			goto drop;
959 	}
960 
961 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
962 		goto drop;
963 
964 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
965 	if (req == NULL)
966 		goto drop;
967 
968 #ifdef CONFIG_TCP_MD5SIG
969 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
970 #endif
971 
972 	tcp_clear_options(&tmp_opt);
973 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
974 	tmp_opt.user_mss = tp->rx_opt.user_mss;
975 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
976 
977 	if (tmp_opt.cookie_plus > 0 &&
978 	    tmp_opt.saw_tstamp &&
979 	    !tp->rx_opt.cookie_out_never &&
980 	    (sysctl_tcp_cookie_size > 0 ||
981 	     (tp->cookie_values != NULL &&
982 	      tp->cookie_values->cookie_desired > 0))) {
983 		u8 *c;
984 		u32 *d;
985 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
986 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
987 
988 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
989 			goto drop_and_free;
990 
991 		/* Secret recipe starts with IP addresses */
992 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
993 		*mess++ ^= *d++;
994 		*mess++ ^= *d++;
995 		*mess++ ^= *d++;
996 		*mess++ ^= *d++;
997 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
998 		*mess++ ^= *d++;
999 		*mess++ ^= *d++;
1000 		*mess++ ^= *d++;
1001 		*mess++ ^= *d++;
1002 
1003 		/* plus variable length Initiator Cookie */
1004 		c = (u8 *)mess;
1005 		while (l-- > 0)
1006 			*c++ ^= *hash_location++;
1007 
1008 		want_cookie = false;	/* not our kind of cookie */
1009 		tmp_ext.cookie_out_never = 0; /* false */
1010 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1011 	} else if (!tp->rx_opt.cookie_in_always) {
1012 		/* redundant indications, but ensure initialization. */
1013 		tmp_ext.cookie_out_never = 1; /* true */
1014 		tmp_ext.cookie_plus = 0;
1015 	} else {
1016 		goto drop_and_free;
1017 	}
1018 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1019 
1020 	if (want_cookie && !tmp_opt.saw_tstamp)
1021 		tcp_clear_options(&tmp_opt);
1022 
1023 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1024 	tcp_openreq_init(req, &tmp_opt, skb);
1025 
1026 	treq = inet6_rsk(req);
1027 	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1028 	treq->loc_addr = ipv6_hdr(skb)->daddr;
1029 	if (!want_cookie || tmp_opt.tstamp_ok)
1030 		TCP_ECN_create_request(req, skb);
1031 
1032 	treq->iif = sk->sk_bound_dev_if;
1033 
1034 	/* So that link locals have meaning */
1035 	if (!sk->sk_bound_dev_if &&
1036 	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1037 		treq->iif = inet6_iif(skb);
1038 
1039 	if (!isn) {
1040 		if (ipv6_opt_accepted(sk, skb) ||
1041 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1042 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1043 			atomic_inc(&skb->users);
1044 			treq->pktopts = skb;
1045 		}
1046 
1047 		if (want_cookie) {
1048 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1049 			req->cookie_ts = tmp_opt.tstamp_ok;
1050 			goto have_isn;
1051 		}
1052 
1053 		/* VJ's idea. We save last timestamp seen
1054 		 * from the destination in peer table, when entering
1055 		 * state TIME-WAIT, and check against it before
1056 		 * accepting new connection request.
1057 		 *
1058 		 * If "isn" is not zero, this request hit alive
1059 		 * timewait bucket, so that all the necessary checks
1060 		 * are made in the function processing timewait state.
1061 		 */
1062 		if (tmp_opt.saw_tstamp &&
1063 		    tcp_death_row.sysctl_tw_recycle &&
1064 		    (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1065 			if (!tcp_peer_is_proven(req, dst, true)) {
1066 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1067 				goto drop_and_release;
1068 			}
1069 		}
1070 		/* Kill the following clause, if you dislike this way. */
1071 		else if (!sysctl_tcp_syncookies &&
1072 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1073 			  (sysctl_max_syn_backlog >> 2)) &&
1074 			 !tcp_peer_is_proven(req, dst, false)) {
1075 			/* Without syncookies last quarter of
1076 			 * backlog is filled with destinations,
1077 			 * proven to be alive.
1078 			 * It means that we continue to communicate
1079 			 * to destinations, already remembered
1080 			 * to the moment of synflood.
1081 			 */
1082 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1083 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1084 			goto drop_and_release;
1085 		}
1086 
1087 		isn = tcp_v6_init_sequence(skb);
1088 	}
1089 have_isn:
1090 	tcp_rsk(req)->snt_isn = isn;
1091 
1092 	if (security_inet_conn_request(sk, skb, req))
1093 		goto drop_and_release;
1094 
1095 	if (tcp_v6_send_synack(sk, dst, &fl6, req,
1096 			       (struct request_values *)&tmp_ext,
1097 			       skb_get_queue_mapping(skb)) ||
1098 	    want_cookie)
1099 		goto drop_and_free;
1100 
1101 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1102 	tcp_rsk(req)->listener = NULL;
1103 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1104 	return 0;
1105 
1106 drop_and_release:
1107 	dst_release(dst);
1108 drop_and_free:
1109 	reqsk_free(req);
1110 drop:
1111 	return 0; /* don't send reset */
1112 }
1113 
1114 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1115 					  struct request_sock *req,
1116 					  struct dst_entry *dst)
1117 {
1118 	struct inet6_request_sock *treq;
1119 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1120 	struct tcp6_sock *newtcp6sk;
1121 	struct inet_sock *newinet;
1122 	struct tcp_sock *newtp;
1123 	struct sock *newsk;
1124 #ifdef CONFIG_TCP_MD5SIG
1125 	struct tcp_md5sig_key *key;
1126 #endif
1127 	struct flowi6 fl6;
1128 
1129 	if (skb->protocol == htons(ETH_P_IP)) {
1130 		/*
1131 		 *	v6 mapped
1132 		 */
1133 
1134 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1135 
1136 		if (newsk == NULL)
1137 			return NULL;
1138 
1139 		newtcp6sk = (struct tcp6_sock *)newsk;
1140 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1141 
1142 		newinet = inet_sk(newsk);
1143 		newnp = inet6_sk(newsk);
1144 		newtp = tcp_sk(newsk);
1145 
1146 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1147 
1148 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1149 
1150 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1151 
1152 		newnp->rcv_saddr = newnp->saddr;
1153 
1154 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1155 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1156 #ifdef CONFIG_TCP_MD5SIG
1157 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1158 #endif
1159 
1160 		newnp->ipv6_ac_list = NULL;
1161 		newnp->ipv6_fl_list = NULL;
1162 		newnp->pktoptions  = NULL;
1163 		newnp->opt	   = NULL;
1164 		newnp->mcast_oif   = inet6_iif(skb);
1165 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1166 		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1167 
1168 		/*
1169 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1170 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1171 		 * that function for the gory details. -acme
1172 		 */
1173 
1174 		/* It is tricky place. Until this moment IPv4 tcp
1175 		   worked with IPv6 icsk.icsk_af_ops.
1176 		   Sync it now.
1177 		 */
1178 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1179 
1180 		return newsk;
1181 	}
1182 
1183 	treq = inet6_rsk(req);
1184 
1185 	if (sk_acceptq_is_full(sk))
1186 		goto out_overflow;
1187 
1188 	if (!dst) {
1189 		dst = inet6_csk_route_req(sk, &fl6, req);
1190 		if (!dst)
1191 			goto out;
1192 	}
1193 
1194 	newsk = tcp_create_openreq_child(sk, req, skb);
1195 	if (newsk == NULL)
1196 		goto out_nonewsk;
1197 
1198 	/*
1199 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1200 	 * count here, tcp_create_openreq_child now does this for us, see the
1201 	 * comment in that function for the gory details. -acme
1202 	 */
1203 
1204 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1205 	__ip6_dst_store(newsk, dst, NULL, NULL);
1206 	inet6_sk_rx_dst_set(newsk, skb);
1207 
1208 	newtcp6sk = (struct tcp6_sock *)newsk;
1209 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1210 
1211 	newtp = tcp_sk(newsk);
1212 	newinet = inet_sk(newsk);
1213 	newnp = inet6_sk(newsk);
1214 
1215 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1216 
1217 	newnp->daddr = treq->rmt_addr;
1218 	newnp->saddr = treq->loc_addr;
1219 	newnp->rcv_saddr = treq->loc_addr;
1220 	newsk->sk_bound_dev_if = treq->iif;
1221 
1222 	/* Now IPv6 options...
1223 
1224 	   First: no IPv4 options.
1225 	 */
1226 	newinet->inet_opt = NULL;
1227 	newnp->ipv6_ac_list = NULL;
1228 	newnp->ipv6_fl_list = NULL;
1229 
1230 	/* Clone RX bits */
1231 	newnp->rxopt.all = np->rxopt.all;
1232 
1233 	/* Clone pktoptions received with SYN */
1234 	newnp->pktoptions = NULL;
1235 	if (treq->pktopts != NULL) {
1236 		newnp->pktoptions = skb_clone(treq->pktopts,
1237 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1238 		consume_skb(treq->pktopts);
1239 		treq->pktopts = NULL;
1240 		if (newnp->pktoptions)
1241 			skb_set_owner_r(newnp->pktoptions, newsk);
1242 	}
1243 	newnp->opt	  = NULL;
1244 	newnp->mcast_oif  = inet6_iif(skb);
1245 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1246 	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1247 
1248 	/* Clone native IPv6 options from listening socket (if any)
1249 
1250 	   Yes, keeping reference count would be much more clever,
1251 	   but we make one more one thing there: reattach optmem
1252 	   to newsk.
1253 	 */
1254 	if (np->opt)
1255 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1256 
1257 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1258 	if (newnp->opt)
1259 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1260 						     newnp->opt->opt_flen);
1261 
1262 	tcp_mtup_init(newsk);
1263 	tcp_sync_mss(newsk, dst_mtu(dst));
1264 	newtp->advmss = dst_metric_advmss(dst);
1265 	if (tcp_sk(sk)->rx_opt.user_mss &&
1266 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1267 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1268 
1269 	tcp_initialize_rcv_mss(newsk);
1270 	tcp_synack_rtt_meas(newsk, req);
1271 	newtp->total_retrans = req->num_retrans;
1272 
1273 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1274 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1275 
1276 #ifdef CONFIG_TCP_MD5SIG
1277 	/* Copy over the MD5 key from the original socket */
1278 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1279 		/* We're using one, so create a matching key
1280 		 * on the newsk structure. If we fail to get
1281 		 * memory, then we end up not copying the key
1282 		 * across. Shucks.
1283 		 */
1284 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1285 			       AF_INET6, key->key, key->keylen,
1286 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1287 	}
1288 #endif
1289 
1290 	if (__inet_inherit_port(sk, newsk) < 0) {
1291 		inet_csk_prepare_forced_close(newsk);
1292 		tcp_done(newsk);
1293 		goto out;
1294 	}
1295 	__inet6_hash(newsk, NULL);
1296 
1297 	return newsk;
1298 
1299 out_overflow:
1300 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1301 out_nonewsk:
1302 	dst_release(dst);
1303 out:
1304 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1305 	return NULL;
1306 }
1307 
1308 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1309 {
1310 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1311 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1312 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1313 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1314 			return 0;
1315 		}
1316 	}
1317 
1318 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1319 					      &ipv6_hdr(skb)->saddr,
1320 					      &ipv6_hdr(skb)->daddr, 0));
1321 
1322 	if (skb->len <= 76) {
1323 		return __skb_checksum_complete(skb);
1324 	}
1325 	return 0;
1326 }
1327 
1328 /* The socket must have it's spinlock held when we get
1329  * here.
1330  *
1331  * We have a potential double-lock case here, so even when
1332  * doing backlog processing we use the BH locking scheme.
1333  * This is because we cannot sleep with the original spinlock
1334  * held.
1335  */
1336 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1337 {
1338 	struct ipv6_pinfo *np = inet6_sk(sk);
1339 	struct tcp_sock *tp;
1340 	struct sk_buff *opt_skb = NULL;
1341 
1342 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1343 	   goes to IPv4 receive handler and backlogged.
1344 	   From backlog it always goes here. Kerboom...
1345 	   Fortunately, tcp_rcv_established and rcv_established
1346 	   handle them correctly, but it is not case with
1347 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1348 	 */
1349 
1350 	if (skb->protocol == htons(ETH_P_IP))
1351 		return tcp_v4_do_rcv(sk, skb);
1352 
1353 #ifdef CONFIG_TCP_MD5SIG
1354 	if (tcp_v6_inbound_md5_hash (sk, skb))
1355 		goto discard;
1356 #endif
1357 
1358 	if (sk_filter(sk, skb))
1359 		goto discard;
1360 
1361 	/*
1362 	 *	socket locking is here for SMP purposes as backlog rcv
1363 	 *	is currently called with bh processing disabled.
1364 	 */
1365 
1366 	/* Do Stevens' IPV6_PKTOPTIONS.
1367 
1368 	   Yes, guys, it is the only place in our code, where we
1369 	   may make it not affecting IPv4.
1370 	   The rest of code is protocol independent,
1371 	   and I do not like idea to uglify IPv4.
1372 
1373 	   Actually, all the idea behind IPV6_PKTOPTIONS
1374 	   looks not very well thought. For now we latch
1375 	   options, received in the last packet, enqueued
1376 	   by tcp. Feel free to propose better solution.
1377 					       --ANK (980728)
1378 	 */
1379 	if (np->rxopt.all)
1380 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1381 
1382 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1383 		struct dst_entry *dst = sk->sk_rx_dst;
1384 
1385 		sock_rps_save_rxhash(sk, skb);
1386 		if (dst) {
1387 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1388 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1389 				dst_release(dst);
1390 				sk->sk_rx_dst = NULL;
1391 			}
1392 		}
1393 
1394 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1395 			goto reset;
1396 		if (opt_skb)
1397 			goto ipv6_pktoptions;
1398 		return 0;
1399 	}
1400 
1401 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1402 		goto csum_err;
1403 
1404 	if (sk->sk_state == TCP_LISTEN) {
1405 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1406 		if (!nsk)
1407 			goto discard;
1408 
1409 		/*
1410 		 * Queue it on the new socket if the new socket is active,
1411 		 * otherwise we just shortcircuit this and continue with
1412 		 * the new socket..
1413 		 */
1414 		if(nsk != sk) {
1415 			sock_rps_save_rxhash(nsk, skb);
1416 			if (tcp_child_process(sk, nsk, skb))
1417 				goto reset;
1418 			if (opt_skb)
1419 				__kfree_skb(opt_skb);
1420 			return 0;
1421 		}
1422 	} else
1423 		sock_rps_save_rxhash(sk, skb);
1424 
1425 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1426 		goto reset;
1427 	if (opt_skb)
1428 		goto ipv6_pktoptions;
1429 	return 0;
1430 
1431 reset:
1432 	tcp_v6_send_reset(sk, skb);
1433 discard:
1434 	if (opt_skb)
1435 		__kfree_skb(opt_skb);
1436 	kfree_skb(skb);
1437 	return 0;
1438 csum_err:
1439 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1440 	goto discard;
1441 
1442 
1443 ipv6_pktoptions:
1444 	/* Do you ask, what is it?
1445 
1446 	   1. skb was enqueued by tcp.
1447 	   2. skb is added to tail of read queue, rather than out of order.
1448 	   3. socket is not in passive state.
1449 	   4. Finally, it really contains options, which user wants to receive.
1450 	 */
1451 	tp = tcp_sk(sk);
1452 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1453 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1454 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1455 			np->mcast_oif = inet6_iif(opt_skb);
1456 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1457 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1458 		if (np->rxopt.bits.rxtclass)
1459 			np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1460 		if (ipv6_opt_accepted(sk, opt_skb)) {
1461 			skb_set_owner_r(opt_skb, sk);
1462 			opt_skb = xchg(&np->pktoptions, opt_skb);
1463 		} else {
1464 			__kfree_skb(opt_skb);
1465 			opt_skb = xchg(&np->pktoptions, NULL);
1466 		}
1467 	}
1468 
1469 	kfree_skb(opt_skb);
1470 	return 0;
1471 }
1472 
1473 static int tcp_v6_rcv(struct sk_buff *skb)
1474 {
1475 	const struct tcphdr *th;
1476 	const struct ipv6hdr *hdr;
1477 	struct sock *sk;
1478 	int ret;
1479 	struct net *net = dev_net(skb->dev);
1480 
1481 	if (skb->pkt_type != PACKET_HOST)
1482 		goto discard_it;
1483 
1484 	/*
1485 	 *	Count it even if it's bad.
1486 	 */
1487 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1488 
1489 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1490 		goto discard_it;
1491 
1492 	th = tcp_hdr(skb);
1493 
1494 	if (th->doff < sizeof(struct tcphdr)/4)
1495 		goto bad_packet;
1496 	if (!pskb_may_pull(skb, th->doff*4))
1497 		goto discard_it;
1498 
1499 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1500 		goto bad_packet;
1501 
1502 	th = tcp_hdr(skb);
1503 	hdr = ipv6_hdr(skb);
1504 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1505 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1506 				    skb->len - th->doff*4);
1507 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1508 	TCP_SKB_CB(skb)->when = 0;
1509 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1510 	TCP_SKB_CB(skb)->sacked = 0;
1511 
1512 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1513 	if (!sk)
1514 		goto no_tcp_socket;
1515 
1516 process:
1517 	if (sk->sk_state == TCP_TIME_WAIT)
1518 		goto do_time_wait;
1519 
1520 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1521 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1522 		goto discard_and_relse;
1523 	}
1524 
1525 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1526 		goto discard_and_relse;
1527 
1528 	if (sk_filter(sk, skb))
1529 		goto discard_and_relse;
1530 
1531 	skb->dev = NULL;
1532 
1533 	bh_lock_sock_nested(sk);
1534 	ret = 0;
1535 	if (!sock_owned_by_user(sk)) {
1536 #ifdef CONFIG_NET_DMA
1537 		struct tcp_sock *tp = tcp_sk(sk);
1538 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1539 			tp->ucopy.dma_chan = net_dma_find_channel();
1540 		if (tp->ucopy.dma_chan)
1541 			ret = tcp_v6_do_rcv(sk, skb);
1542 		else
1543 #endif
1544 		{
1545 			if (!tcp_prequeue(sk, skb))
1546 				ret = tcp_v6_do_rcv(sk, skb);
1547 		}
1548 	} else if (unlikely(sk_add_backlog(sk, skb,
1549 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1550 		bh_unlock_sock(sk);
1551 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1552 		goto discard_and_relse;
1553 	}
1554 	bh_unlock_sock(sk);
1555 
1556 	sock_put(sk);
1557 	return ret ? -1 : 0;
1558 
1559 no_tcp_socket:
1560 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1561 		goto discard_it;
1562 
1563 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1564 bad_packet:
1565 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1566 	} else {
1567 		tcp_v6_send_reset(NULL, skb);
1568 	}
1569 
1570 discard_it:
1571 
1572 	/*
1573 	 *	Discard frame
1574 	 */
1575 
1576 	kfree_skb(skb);
1577 	return 0;
1578 
1579 discard_and_relse:
1580 	sock_put(sk);
1581 	goto discard_it;
1582 
1583 do_time_wait:
1584 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1585 		inet_twsk_put(inet_twsk(sk));
1586 		goto discard_it;
1587 	}
1588 
1589 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1590 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1591 		inet_twsk_put(inet_twsk(sk));
1592 		goto discard_it;
1593 	}
1594 
1595 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1596 	case TCP_TW_SYN:
1597 	{
1598 		struct sock *sk2;
1599 
1600 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1601 					    &ipv6_hdr(skb)->daddr,
1602 					    ntohs(th->dest), inet6_iif(skb));
1603 		if (sk2 != NULL) {
1604 			struct inet_timewait_sock *tw = inet_twsk(sk);
1605 			inet_twsk_deschedule(tw, &tcp_death_row);
1606 			inet_twsk_put(tw);
1607 			sk = sk2;
1608 			goto process;
1609 		}
1610 		/* Fall through to ACK */
1611 	}
1612 	case TCP_TW_ACK:
1613 		tcp_v6_timewait_ack(sk, skb);
1614 		break;
1615 	case TCP_TW_RST:
1616 		goto no_tcp_socket;
1617 	case TCP_TW_SUCCESS:;
1618 	}
1619 	goto discard_it;
1620 }
1621 
1622 static void tcp_v6_early_demux(struct sk_buff *skb)
1623 {
1624 	const struct ipv6hdr *hdr;
1625 	const struct tcphdr *th;
1626 	struct sock *sk;
1627 
1628 	if (skb->pkt_type != PACKET_HOST)
1629 		return;
1630 
1631 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1632 		return;
1633 
1634 	hdr = ipv6_hdr(skb);
1635 	th = tcp_hdr(skb);
1636 
1637 	if (th->doff < sizeof(struct tcphdr) / 4)
1638 		return;
1639 
1640 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1641 					&hdr->saddr, th->source,
1642 					&hdr->daddr, ntohs(th->dest),
1643 					inet6_iif(skb));
1644 	if (sk) {
1645 		skb->sk = sk;
1646 		skb->destructor = sock_edemux;
1647 		if (sk->sk_state != TCP_TIME_WAIT) {
1648 			struct dst_entry *dst = sk->sk_rx_dst;
1649 
1650 			if (dst)
1651 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1652 			if (dst &&
1653 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1654 				skb_dst_set_noref(skb, dst);
1655 		}
1656 	}
1657 }
1658 
1659 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1660 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1661 	.twsk_unique	= tcp_twsk_unique,
1662 	.twsk_destructor= tcp_twsk_destructor,
1663 };
1664 
1665 static const struct inet_connection_sock_af_ops ipv6_specific = {
1666 	.queue_xmit	   = inet6_csk_xmit,
1667 	.send_check	   = tcp_v6_send_check,
1668 	.rebuild_header	   = inet6_sk_rebuild_header,
1669 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1670 	.conn_request	   = tcp_v6_conn_request,
1671 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1672 	.net_header_len	   = sizeof(struct ipv6hdr),
1673 	.net_frag_header_len = sizeof(struct frag_hdr),
1674 	.setsockopt	   = ipv6_setsockopt,
1675 	.getsockopt	   = ipv6_getsockopt,
1676 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1677 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1678 	.bind_conflict	   = inet6_csk_bind_conflict,
1679 #ifdef CONFIG_COMPAT
1680 	.compat_setsockopt = compat_ipv6_setsockopt,
1681 	.compat_getsockopt = compat_ipv6_getsockopt,
1682 #endif
1683 };
1684 
1685 #ifdef CONFIG_TCP_MD5SIG
1686 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1687 	.md5_lookup	=	tcp_v6_md5_lookup,
1688 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1689 	.md5_parse	=	tcp_v6_parse_md5_keys,
1690 };
1691 #endif
1692 
1693 /*
1694  *	TCP over IPv4 via INET6 API
1695  */
1696 
1697 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1698 	.queue_xmit	   = ip_queue_xmit,
1699 	.send_check	   = tcp_v4_send_check,
1700 	.rebuild_header	   = inet_sk_rebuild_header,
1701 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1702 	.conn_request	   = tcp_v6_conn_request,
1703 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1704 	.net_header_len	   = sizeof(struct iphdr),
1705 	.setsockopt	   = ipv6_setsockopt,
1706 	.getsockopt	   = ipv6_getsockopt,
1707 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1708 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1709 	.bind_conflict	   = inet6_csk_bind_conflict,
1710 #ifdef CONFIG_COMPAT
1711 	.compat_setsockopt = compat_ipv6_setsockopt,
1712 	.compat_getsockopt = compat_ipv6_getsockopt,
1713 #endif
1714 };
1715 
1716 #ifdef CONFIG_TCP_MD5SIG
1717 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1718 	.md5_lookup	=	tcp_v4_md5_lookup,
1719 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1720 	.md5_parse	=	tcp_v6_parse_md5_keys,
1721 };
1722 #endif
1723 
1724 /* NOTE: A lot of things set to zero explicitly by call to
1725  *       sk_alloc() so need not be done here.
1726  */
1727 static int tcp_v6_init_sock(struct sock *sk)
1728 {
1729 	struct inet_connection_sock *icsk = inet_csk(sk);
1730 
1731 	tcp_init_sock(sk);
1732 
1733 	icsk->icsk_af_ops = &ipv6_specific;
1734 
1735 #ifdef CONFIG_TCP_MD5SIG
1736 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1737 #endif
1738 
1739 	return 0;
1740 }
1741 
1742 static void tcp_v6_destroy_sock(struct sock *sk)
1743 {
1744 	tcp_v4_destroy_sock(sk);
1745 	inet6_destroy_sock(sk);
1746 }
1747 
1748 #ifdef CONFIG_PROC_FS
1749 /* Proc filesystem TCPv6 sock list dumping. */
1750 static void get_openreq6(struct seq_file *seq,
1751 			 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1752 {
1753 	int ttd = req->expires - jiffies;
1754 	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1755 	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1756 
1757 	if (ttd < 0)
1758 		ttd = 0;
1759 
1760 	seq_printf(seq,
1761 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1762 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1763 		   i,
1764 		   src->s6_addr32[0], src->s6_addr32[1],
1765 		   src->s6_addr32[2], src->s6_addr32[3],
1766 		   ntohs(inet_rsk(req)->loc_port),
1767 		   dest->s6_addr32[0], dest->s6_addr32[1],
1768 		   dest->s6_addr32[2], dest->s6_addr32[3],
1769 		   ntohs(inet_rsk(req)->rmt_port),
1770 		   TCP_SYN_RECV,
1771 		   0,0, /* could print option size, but that is af dependent. */
1772 		   1,   /* timers active (only the expire timer) */
1773 		   jiffies_to_clock_t(ttd),
1774 		   req->num_timeout,
1775 		   from_kuid_munged(seq_user_ns(seq), uid),
1776 		   0,  /* non standard timer */
1777 		   0, /* open_requests have no inode */
1778 		   0, req);
1779 }
1780 
1781 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1782 {
1783 	const struct in6_addr *dest, *src;
1784 	__u16 destp, srcp;
1785 	int timer_active;
1786 	unsigned long timer_expires;
1787 	const struct inet_sock *inet = inet_sk(sp);
1788 	const struct tcp_sock *tp = tcp_sk(sp);
1789 	const struct inet_connection_sock *icsk = inet_csk(sp);
1790 	const struct ipv6_pinfo *np = inet6_sk(sp);
1791 
1792 	dest  = &np->daddr;
1793 	src   = &np->rcv_saddr;
1794 	destp = ntohs(inet->inet_dport);
1795 	srcp  = ntohs(inet->inet_sport);
1796 
1797 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1798 		timer_active	= 1;
1799 		timer_expires	= icsk->icsk_timeout;
1800 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1801 		timer_active	= 4;
1802 		timer_expires	= icsk->icsk_timeout;
1803 	} else if (timer_pending(&sp->sk_timer)) {
1804 		timer_active	= 2;
1805 		timer_expires	= sp->sk_timer.expires;
1806 	} else {
1807 		timer_active	= 0;
1808 		timer_expires = jiffies;
1809 	}
1810 
1811 	seq_printf(seq,
1812 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1813 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1814 		   i,
1815 		   src->s6_addr32[0], src->s6_addr32[1],
1816 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1817 		   dest->s6_addr32[0], dest->s6_addr32[1],
1818 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1819 		   sp->sk_state,
1820 		   tp->write_seq-tp->snd_una,
1821 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1822 		   timer_active,
1823 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1824 		   icsk->icsk_retransmits,
1825 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1826 		   icsk->icsk_probes_out,
1827 		   sock_i_ino(sp),
1828 		   atomic_read(&sp->sk_refcnt), sp,
1829 		   jiffies_to_clock_t(icsk->icsk_rto),
1830 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1831 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1832 		   tp->snd_cwnd,
1833 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1834 		   );
1835 }
1836 
1837 static void get_timewait6_sock(struct seq_file *seq,
1838 			       struct inet_timewait_sock *tw, int i)
1839 {
1840 	const struct in6_addr *dest, *src;
1841 	__u16 destp, srcp;
1842 	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1843 	long delta = tw->tw_ttd - jiffies;
1844 
1845 	dest = &tw6->tw_v6_daddr;
1846 	src  = &tw6->tw_v6_rcv_saddr;
1847 	destp = ntohs(tw->tw_dport);
1848 	srcp  = ntohs(tw->tw_sport);
1849 
1850 	seq_printf(seq,
1851 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1852 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1853 		   i,
1854 		   src->s6_addr32[0], src->s6_addr32[1],
1855 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1856 		   dest->s6_addr32[0], dest->s6_addr32[1],
1857 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1858 		   tw->tw_substate, 0, 0,
1859 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1860 		   atomic_read(&tw->tw_refcnt), tw);
1861 }
1862 
1863 static int tcp6_seq_show(struct seq_file *seq, void *v)
1864 {
1865 	struct tcp_iter_state *st;
1866 
1867 	if (v == SEQ_START_TOKEN) {
1868 		seq_puts(seq,
1869 			 "  sl  "
1870 			 "local_address                         "
1871 			 "remote_address                        "
1872 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1873 			 "   uid  timeout inode\n");
1874 		goto out;
1875 	}
1876 	st = seq->private;
1877 
1878 	switch (st->state) {
1879 	case TCP_SEQ_STATE_LISTENING:
1880 	case TCP_SEQ_STATE_ESTABLISHED:
1881 		get_tcp6_sock(seq, v, st->num);
1882 		break;
1883 	case TCP_SEQ_STATE_OPENREQ:
1884 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1885 		break;
1886 	case TCP_SEQ_STATE_TIME_WAIT:
1887 		get_timewait6_sock(seq, v, st->num);
1888 		break;
1889 	}
1890 out:
1891 	return 0;
1892 }
1893 
1894 static const struct file_operations tcp6_afinfo_seq_fops = {
1895 	.owner   = THIS_MODULE,
1896 	.open    = tcp_seq_open,
1897 	.read    = seq_read,
1898 	.llseek  = seq_lseek,
1899 	.release = seq_release_net
1900 };
1901 
1902 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1903 	.name		= "tcp6",
1904 	.family		= AF_INET6,
1905 	.seq_fops	= &tcp6_afinfo_seq_fops,
1906 	.seq_ops	= {
1907 		.show		= tcp6_seq_show,
1908 	},
1909 };
1910 
1911 int __net_init tcp6_proc_init(struct net *net)
1912 {
1913 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1914 }
1915 
1916 void tcp6_proc_exit(struct net *net)
1917 {
1918 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1919 }
1920 #endif
1921 
1922 struct proto tcpv6_prot = {
1923 	.name			= "TCPv6",
1924 	.owner			= THIS_MODULE,
1925 	.close			= tcp_close,
1926 	.connect		= tcp_v6_connect,
1927 	.disconnect		= tcp_disconnect,
1928 	.accept			= inet_csk_accept,
1929 	.ioctl			= tcp_ioctl,
1930 	.init			= tcp_v6_init_sock,
1931 	.destroy		= tcp_v6_destroy_sock,
1932 	.shutdown		= tcp_shutdown,
1933 	.setsockopt		= tcp_setsockopt,
1934 	.getsockopt		= tcp_getsockopt,
1935 	.recvmsg		= tcp_recvmsg,
1936 	.sendmsg		= tcp_sendmsg,
1937 	.sendpage		= tcp_sendpage,
1938 	.backlog_rcv		= tcp_v6_do_rcv,
1939 	.release_cb		= tcp_release_cb,
1940 	.mtu_reduced		= tcp_v6_mtu_reduced,
1941 	.hash			= tcp_v6_hash,
1942 	.unhash			= inet_unhash,
1943 	.get_port		= inet_csk_get_port,
1944 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1945 	.sockets_allocated	= &tcp_sockets_allocated,
1946 	.memory_allocated	= &tcp_memory_allocated,
1947 	.memory_pressure	= &tcp_memory_pressure,
1948 	.orphan_count		= &tcp_orphan_count,
1949 	.sysctl_wmem		= sysctl_tcp_wmem,
1950 	.sysctl_rmem		= sysctl_tcp_rmem,
1951 	.max_header		= MAX_TCP_HEADER,
1952 	.obj_size		= sizeof(struct tcp6_sock),
1953 	.slab_flags		= SLAB_DESTROY_BY_RCU,
1954 	.twsk_prot		= &tcp6_timewait_sock_ops,
1955 	.rsk_prot		= &tcp6_request_sock_ops,
1956 	.h.hashinfo		= &tcp_hashinfo,
1957 	.no_autobind		= true,
1958 #ifdef CONFIG_COMPAT
1959 	.compat_setsockopt	= compat_tcp_setsockopt,
1960 	.compat_getsockopt	= compat_tcp_getsockopt,
1961 #endif
1962 #ifdef CONFIG_MEMCG_KMEM
1963 	.proto_cgroup		= tcp_proto_cgroup,
1964 #endif
1965 };
1966 
1967 static const struct inet6_protocol tcpv6_protocol = {
1968 	.early_demux	=	tcp_v6_early_demux,
1969 	.handler	=	tcp_v6_rcv,
1970 	.err_handler	=	tcp_v6_err,
1971 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1972 };
1973 
1974 static struct inet_protosw tcpv6_protosw = {
1975 	.type		=	SOCK_STREAM,
1976 	.protocol	=	IPPROTO_TCP,
1977 	.prot		=	&tcpv6_prot,
1978 	.ops		=	&inet6_stream_ops,
1979 	.no_check	=	0,
1980 	.flags		=	INET_PROTOSW_PERMANENT |
1981 				INET_PROTOSW_ICSK,
1982 };
1983 
1984 static int __net_init tcpv6_net_init(struct net *net)
1985 {
1986 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1987 				    SOCK_RAW, IPPROTO_TCP, net);
1988 }
1989 
1990 static void __net_exit tcpv6_net_exit(struct net *net)
1991 {
1992 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1993 }
1994 
1995 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1996 {
1997 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1998 }
1999 
2000 static struct pernet_operations tcpv6_net_ops = {
2001 	.init	    = tcpv6_net_init,
2002 	.exit	    = tcpv6_net_exit,
2003 	.exit_batch = tcpv6_net_exit_batch,
2004 };
2005 
2006 int __init tcpv6_init(void)
2007 {
2008 	int ret;
2009 
2010 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2011 	if (ret)
2012 		goto out;
2013 
2014 	/* register inet6 protocol */
2015 	ret = inet6_register_protosw(&tcpv6_protosw);
2016 	if (ret)
2017 		goto out_tcpv6_protocol;
2018 
2019 	ret = register_pernet_subsys(&tcpv6_net_ops);
2020 	if (ret)
2021 		goto out_tcpv6_protosw;
2022 out:
2023 	return ret;
2024 
2025 out_tcpv6_protosw:
2026 	inet6_unregister_protosw(&tcpv6_protosw);
2027 out_tcpv6_protocol:
2028 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2029 	goto out;
2030 }
2031 
2032 void tcpv6_exit(void)
2033 {
2034 	unregister_pernet_subsys(&tcpv6_net_ops);
2035 	inet6_unregister_protosw(&tcpv6_protosw);
2036 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2037 }
2038