xref: /linux/net/ipv6/tcp_ipv6.c (revision 25aee3debe0464f6c680173041fa3de30ec9ff54)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 
67 #include <asm/uaccess.h>
68 
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71 
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74 
75 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 				      struct request_sock *req);
78 
79 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void	__tcp_v6_send_check(struct sk_buff *skb,
81 				    const struct in6_addr *saddr,
82 				    const struct in6_addr *daddr);
83 
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 						   const struct in6_addr *addr)
92 {
93 	return NULL;
94 }
95 #endif
96 
97 static void tcp_v6_hash(struct sock *sk)
98 {
99 	if (sk->sk_state != TCP_CLOSE) {
100 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101 			tcp_prot.hash(sk);
102 			return;
103 		}
104 		local_bh_disable();
105 		__inet6_hash(sk, NULL);
106 		local_bh_enable();
107 	}
108 }
109 
110 static __inline__ __sum16 tcp_v6_check(int len,
111 				   const struct in6_addr *saddr,
112 				   const struct in6_addr *daddr,
113 				   __wsum base)
114 {
115 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116 }
117 
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 					    ipv6_hdr(skb)->saddr.s6_addr32,
122 					    tcp_hdr(skb)->dest,
123 					    tcp_hdr(skb)->source);
124 }
125 
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 			  int addr_len)
128 {
129 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 	struct inet_sock *inet = inet_sk(sk);
131 	struct inet_connection_sock *icsk = inet_csk(sk);
132 	struct ipv6_pinfo *np = inet6_sk(sk);
133 	struct tcp_sock *tp = tcp_sk(sk);
134 	struct in6_addr *saddr = NULL, *final_p, final;
135 	struct rt6_info *rt;
136 	struct flowi6 fl6;
137 	struct dst_entry *dst;
138 	int addr_type;
139 	int err;
140 
141 	if (addr_len < SIN6_LEN_RFC2133)
142 		return -EINVAL;
143 
144 	if (usin->sin6_family != AF_INET6)
145 		return -EAFNOSUPPORT;
146 
147 	memset(&fl6, 0, sizeof(fl6));
148 
149 	if (np->sndflow) {
150 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 		IP6_ECN_flow_init(fl6.flowlabel);
152 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 			struct ip6_flowlabel *flowlabel;
154 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 			if (flowlabel == NULL)
156 				return -EINVAL;
157 			usin->sin6_addr = flowlabel->dst;
158 			fl6_sock_release(flowlabel);
159 		}
160 	}
161 
162 	/*
163 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
164 	 */
165 
166 	if(ipv6_addr_any(&usin->sin6_addr))
167 		usin->sin6_addr.s6_addr[15] = 0x1;
168 
169 	addr_type = ipv6_addr_type(&usin->sin6_addr);
170 
171 	if(addr_type & IPV6_ADDR_MULTICAST)
172 		return -ENETUNREACH;
173 
174 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 		if (addr_len >= sizeof(struct sockaddr_in6) &&
176 		    usin->sin6_scope_id) {
177 			/* If interface is set while binding, indices
178 			 * must coincide.
179 			 */
180 			if (sk->sk_bound_dev_if &&
181 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
182 				return -EINVAL;
183 
184 			sk->sk_bound_dev_if = usin->sin6_scope_id;
185 		}
186 
187 		/* Connect to link-local address requires an interface */
188 		if (!sk->sk_bound_dev_if)
189 			return -EINVAL;
190 	}
191 
192 	if (tp->rx_opt.ts_recent_stamp &&
193 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 		tp->rx_opt.ts_recent = 0;
195 		tp->rx_opt.ts_recent_stamp = 0;
196 		tp->write_seq = 0;
197 	}
198 
199 	np->daddr = usin->sin6_addr;
200 	np->flow_label = fl6.flowlabel;
201 
202 	/*
203 	 *	TCP over IPv4
204 	 */
205 
206 	if (addr_type == IPV6_ADDR_MAPPED) {
207 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 		struct sockaddr_in sin;
209 
210 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 
212 		if (__ipv6_only_sock(sk))
213 			return -ENETUNREACH;
214 
215 		sin.sin_family = AF_INET;
216 		sin.sin_port = usin->sin6_port;
217 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 
219 		icsk->icsk_af_ops = &ipv6_mapped;
220 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223 #endif
224 
225 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226 
227 		if (err) {
228 			icsk->icsk_ext_hdr_len = exthdrlen;
229 			icsk->icsk_af_ops = &ipv6_specific;
230 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 			tp->af_specific = &tcp_sock_ipv6_specific;
233 #endif
234 			goto failure;
235 		} else {
236 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 					       &np->rcv_saddr);
239 		}
240 
241 		return err;
242 	}
243 
244 	if (!ipv6_addr_any(&np->rcv_saddr))
245 		saddr = &np->rcv_saddr;
246 
247 	fl6.flowi6_proto = IPPROTO_TCP;
248 	fl6.daddr = np->daddr;
249 	fl6.saddr = saddr ? *saddr : np->saddr;
250 	fl6.flowi6_oif = sk->sk_bound_dev_if;
251 	fl6.flowi6_mark = sk->sk_mark;
252 	fl6.fl6_dport = usin->sin6_port;
253 	fl6.fl6_sport = inet->inet_sport;
254 
255 	final_p = fl6_update_dst(&fl6, np->opt, &final);
256 
257 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 
259 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 	if (IS_ERR(dst)) {
261 		err = PTR_ERR(dst);
262 		goto failure;
263 	}
264 
265 	if (saddr == NULL) {
266 		saddr = &fl6.saddr;
267 		np->rcv_saddr = *saddr;
268 	}
269 
270 	/* set the source address */
271 	np->saddr = *saddr;
272 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 
274 	sk->sk_gso_type = SKB_GSO_TCPV6;
275 	__ip6_dst_store(sk, dst, NULL, NULL);
276 
277 	rt = (struct rt6_info *) dst;
278 	if (tcp_death_row.sysctl_tw_recycle &&
279 	    !tp->rx_opt.ts_recent_stamp &&
280 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
281 		tcp_fetch_timewait_stamp(sk, dst);
282 
283 	icsk->icsk_ext_hdr_len = 0;
284 	if (np->opt)
285 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286 					  np->opt->opt_nflen);
287 
288 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289 
290 	inet->inet_dport = usin->sin6_port;
291 
292 	tcp_set_state(sk, TCP_SYN_SENT);
293 	err = inet6_hash_connect(&tcp_death_row, sk);
294 	if (err)
295 		goto late_failure;
296 
297 	if (!tp->write_seq)
298 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299 							     np->daddr.s6_addr32,
300 							     inet->inet_sport,
301 							     inet->inet_dport);
302 
303 	err = tcp_connect(sk);
304 	if (err)
305 		goto late_failure;
306 
307 	return 0;
308 
309 late_failure:
310 	tcp_set_state(sk, TCP_CLOSE);
311 	__sk_dst_reset(sk);
312 failure:
313 	inet->inet_dport = 0;
314 	sk->sk_route_caps = 0;
315 	return err;
316 }
317 
318 static void tcp_v6_mtu_reduced(struct sock *sk)
319 {
320 	struct dst_entry *dst;
321 
322 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
323 		return;
324 
325 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
326 	if (!dst)
327 		return;
328 
329 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
330 		tcp_sync_mss(sk, dst_mtu(dst));
331 		tcp_simple_retransmit(sk);
332 	}
333 }
334 
335 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
336 		u8 type, u8 code, int offset, __be32 info)
337 {
338 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
339 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
340 	struct ipv6_pinfo *np;
341 	struct sock *sk;
342 	int err;
343 	struct tcp_sock *tp;
344 	__u32 seq;
345 	struct net *net = dev_net(skb->dev);
346 
347 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
348 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
349 
350 	if (sk == NULL) {
351 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
352 				   ICMP6_MIB_INERRORS);
353 		return;
354 	}
355 
356 	if (sk->sk_state == TCP_TIME_WAIT) {
357 		inet_twsk_put(inet_twsk(sk));
358 		return;
359 	}
360 
361 	bh_lock_sock(sk);
362 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
363 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
364 
365 	if (sk->sk_state == TCP_CLOSE)
366 		goto out;
367 
368 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
369 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
370 		goto out;
371 	}
372 
373 	tp = tcp_sk(sk);
374 	seq = ntohl(th->seq);
375 	if (sk->sk_state != TCP_LISTEN &&
376 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
377 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
378 		goto out;
379 	}
380 
381 	np = inet6_sk(sk);
382 
383 	if (type == NDISC_REDIRECT) {
384 		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
385 
386 		if (dst)
387 			dst->ops->redirect(dst, sk, skb);
388 	}
389 
390 	if (type == ICMPV6_PKT_TOOBIG) {
391 		tp->mtu_info = ntohl(info);
392 		if (!sock_owned_by_user(sk))
393 			tcp_v6_mtu_reduced(sk);
394 		else
395 			set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
396 		goto out;
397 	}
398 
399 	icmpv6_err_convert(type, code, &err);
400 
401 	/* Might be for an request_sock */
402 	switch (sk->sk_state) {
403 		struct request_sock *req, **prev;
404 	case TCP_LISTEN:
405 		if (sock_owned_by_user(sk))
406 			goto out;
407 
408 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
409 					   &hdr->saddr, inet6_iif(skb));
410 		if (!req)
411 			goto out;
412 
413 		/* ICMPs are not backlogged, hence we cannot get
414 		 * an established socket here.
415 		 */
416 		WARN_ON(req->sk != NULL);
417 
418 		if (seq != tcp_rsk(req)->snt_isn) {
419 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
420 			goto out;
421 		}
422 
423 		inet_csk_reqsk_queue_drop(sk, req, prev);
424 		goto out;
425 
426 	case TCP_SYN_SENT:
427 	case TCP_SYN_RECV:  /* Cannot happen.
428 			       It can, it SYNs are crossed. --ANK */
429 		if (!sock_owned_by_user(sk)) {
430 			sk->sk_err = err;
431 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
432 
433 			tcp_done(sk);
434 		} else
435 			sk->sk_err_soft = err;
436 		goto out;
437 	}
438 
439 	if (!sock_owned_by_user(sk) && np->recverr) {
440 		sk->sk_err = err;
441 		sk->sk_error_report(sk);
442 	} else
443 		sk->sk_err_soft = err;
444 
445 out:
446 	bh_unlock_sock(sk);
447 	sock_put(sk);
448 }
449 
450 
451 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
452 			      struct flowi6 *fl6,
453 			      struct request_sock *req,
454 			      struct request_values *rvp,
455 			      u16 queue_mapping)
456 {
457 	struct inet6_request_sock *treq = inet6_rsk(req);
458 	struct ipv6_pinfo *np = inet6_sk(sk);
459 	struct sk_buff * skb;
460 	int err = -ENOMEM;
461 
462 	/* First, grab a route. */
463 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
464 		goto done;
465 
466 	skb = tcp_make_synack(sk, dst, req, rvp);
467 
468 	if (skb) {
469 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
470 
471 		fl6->daddr = treq->rmt_addr;
472 		skb_set_queue_mapping(skb, queue_mapping);
473 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
474 		err = net_xmit_eval(err);
475 	}
476 
477 done:
478 	return err;
479 }
480 
481 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
482 			     struct request_values *rvp)
483 {
484 	struct flowi6 fl6;
485 
486 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
487 	return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
488 }
489 
490 static void tcp_v6_reqsk_destructor(struct request_sock *req)
491 {
492 	kfree_skb(inet6_rsk(req)->pktopts);
493 }
494 
495 #ifdef CONFIG_TCP_MD5SIG
496 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
497 						   const struct in6_addr *addr)
498 {
499 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
500 }
501 
502 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
503 						struct sock *addr_sk)
504 {
505 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
506 }
507 
508 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
509 						      struct request_sock *req)
510 {
511 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
512 }
513 
514 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
515 				  int optlen)
516 {
517 	struct tcp_md5sig cmd;
518 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
519 
520 	if (optlen < sizeof(cmd))
521 		return -EINVAL;
522 
523 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
524 		return -EFAULT;
525 
526 	if (sin6->sin6_family != AF_INET6)
527 		return -EINVAL;
528 
529 	if (!cmd.tcpm_keylen) {
530 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
531 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
532 					      AF_INET);
533 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
534 				      AF_INET6);
535 	}
536 
537 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
538 		return -EINVAL;
539 
540 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
541 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
542 				      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
543 
544 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
545 			      AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
546 }
547 
548 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
549 					const struct in6_addr *daddr,
550 					const struct in6_addr *saddr, int nbytes)
551 {
552 	struct tcp6_pseudohdr *bp;
553 	struct scatterlist sg;
554 
555 	bp = &hp->md5_blk.ip6;
556 	/* 1. TCP pseudo-header (RFC2460) */
557 	bp->saddr = *saddr;
558 	bp->daddr = *daddr;
559 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
560 	bp->len = cpu_to_be32(nbytes);
561 
562 	sg_init_one(&sg, bp, sizeof(*bp));
563 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
564 }
565 
566 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
567 			       const struct in6_addr *daddr, struct in6_addr *saddr,
568 			       const struct tcphdr *th)
569 {
570 	struct tcp_md5sig_pool *hp;
571 	struct hash_desc *desc;
572 
573 	hp = tcp_get_md5sig_pool();
574 	if (!hp)
575 		goto clear_hash_noput;
576 	desc = &hp->md5_desc;
577 
578 	if (crypto_hash_init(desc))
579 		goto clear_hash;
580 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
581 		goto clear_hash;
582 	if (tcp_md5_hash_header(hp, th))
583 		goto clear_hash;
584 	if (tcp_md5_hash_key(hp, key))
585 		goto clear_hash;
586 	if (crypto_hash_final(desc, md5_hash))
587 		goto clear_hash;
588 
589 	tcp_put_md5sig_pool();
590 	return 0;
591 
592 clear_hash:
593 	tcp_put_md5sig_pool();
594 clear_hash_noput:
595 	memset(md5_hash, 0, 16);
596 	return 1;
597 }
598 
599 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
600 			       const struct sock *sk,
601 			       const struct request_sock *req,
602 			       const struct sk_buff *skb)
603 {
604 	const struct in6_addr *saddr, *daddr;
605 	struct tcp_md5sig_pool *hp;
606 	struct hash_desc *desc;
607 	const struct tcphdr *th = tcp_hdr(skb);
608 
609 	if (sk) {
610 		saddr = &inet6_sk(sk)->saddr;
611 		daddr = &inet6_sk(sk)->daddr;
612 	} else if (req) {
613 		saddr = &inet6_rsk(req)->loc_addr;
614 		daddr = &inet6_rsk(req)->rmt_addr;
615 	} else {
616 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
617 		saddr = &ip6h->saddr;
618 		daddr = &ip6h->daddr;
619 	}
620 
621 	hp = tcp_get_md5sig_pool();
622 	if (!hp)
623 		goto clear_hash_noput;
624 	desc = &hp->md5_desc;
625 
626 	if (crypto_hash_init(desc))
627 		goto clear_hash;
628 
629 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
630 		goto clear_hash;
631 	if (tcp_md5_hash_header(hp, th))
632 		goto clear_hash;
633 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
634 		goto clear_hash;
635 	if (tcp_md5_hash_key(hp, key))
636 		goto clear_hash;
637 	if (crypto_hash_final(desc, md5_hash))
638 		goto clear_hash;
639 
640 	tcp_put_md5sig_pool();
641 	return 0;
642 
643 clear_hash:
644 	tcp_put_md5sig_pool();
645 clear_hash_noput:
646 	memset(md5_hash, 0, 16);
647 	return 1;
648 }
649 
650 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
651 {
652 	const __u8 *hash_location = NULL;
653 	struct tcp_md5sig_key *hash_expected;
654 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
655 	const struct tcphdr *th = tcp_hdr(skb);
656 	int genhash;
657 	u8 newhash[16];
658 
659 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
660 	hash_location = tcp_parse_md5sig_option(th);
661 
662 	/* We've parsed the options - do we have a hash? */
663 	if (!hash_expected && !hash_location)
664 		return 0;
665 
666 	if (hash_expected && !hash_location) {
667 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
668 		return 1;
669 	}
670 
671 	if (!hash_expected && hash_location) {
672 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
673 		return 1;
674 	}
675 
676 	/* check the signature */
677 	genhash = tcp_v6_md5_hash_skb(newhash,
678 				      hash_expected,
679 				      NULL, NULL, skb);
680 
681 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
682 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
683 				     genhash ? "failed" : "mismatch",
684 				     &ip6h->saddr, ntohs(th->source),
685 				     &ip6h->daddr, ntohs(th->dest));
686 		return 1;
687 	}
688 	return 0;
689 }
690 #endif
691 
692 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
693 	.family		=	AF_INET6,
694 	.obj_size	=	sizeof(struct tcp6_request_sock),
695 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
696 	.send_ack	=	tcp_v6_reqsk_send_ack,
697 	.destructor	=	tcp_v6_reqsk_destructor,
698 	.send_reset	=	tcp_v6_send_reset,
699 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
700 };
701 
702 #ifdef CONFIG_TCP_MD5SIG
703 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
704 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
705 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
706 };
707 #endif
708 
709 static void __tcp_v6_send_check(struct sk_buff *skb,
710 				const struct in6_addr *saddr, const struct in6_addr *daddr)
711 {
712 	struct tcphdr *th = tcp_hdr(skb);
713 
714 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
715 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
716 		skb->csum_start = skb_transport_header(skb) - skb->head;
717 		skb->csum_offset = offsetof(struct tcphdr, check);
718 	} else {
719 		th->check = tcp_v6_check(skb->len, saddr, daddr,
720 					 csum_partial(th, th->doff << 2,
721 						      skb->csum));
722 	}
723 }
724 
725 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
726 {
727 	struct ipv6_pinfo *np = inet6_sk(sk);
728 
729 	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
730 }
731 
732 static int tcp_v6_gso_send_check(struct sk_buff *skb)
733 {
734 	const struct ipv6hdr *ipv6h;
735 	struct tcphdr *th;
736 
737 	if (!pskb_may_pull(skb, sizeof(*th)))
738 		return -EINVAL;
739 
740 	ipv6h = ipv6_hdr(skb);
741 	th = tcp_hdr(skb);
742 
743 	th->check = 0;
744 	skb->ip_summed = CHECKSUM_PARTIAL;
745 	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
746 	return 0;
747 }
748 
749 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
750 					 struct sk_buff *skb)
751 {
752 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
753 
754 	switch (skb->ip_summed) {
755 	case CHECKSUM_COMPLETE:
756 		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
757 				  skb->csum)) {
758 			skb->ip_summed = CHECKSUM_UNNECESSARY;
759 			break;
760 		}
761 
762 		/* fall through */
763 	case CHECKSUM_NONE:
764 		NAPI_GRO_CB(skb)->flush = 1;
765 		return NULL;
766 	}
767 
768 	return tcp_gro_receive(head, skb);
769 }
770 
771 static int tcp6_gro_complete(struct sk_buff *skb)
772 {
773 	const struct ipv6hdr *iph = ipv6_hdr(skb);
774 	struct tcphdr *th = tcp_hdr(skb);
775 
776 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
777 				  &iph->saddr, &iph->daddr, 0);
778 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
779 
780 	return tcp_gro_complete(skb);
781 }
782 
783 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
784 				 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
785 {
786 	const struct tcphdr *th = tcp_hdr(skb);
787 	struct tcphdr *t1;
788 	struct sk_buff *buff;
789 	struct flowi6 fl6;
790 	struct net *net = dev_net(skb_dst(skb)->dev);
791 	struct sock *ctl_sk = net->ipv6.tcp_sk;
792 	unsigned int tot_len = sizeof(struct tcphdr);
793 	struct dst_entry *dst;
794 	__be32 *topt;
795 
796 	if (ts)
797 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
798 #ifdef CONFIG_TCP_MD5SIG
799 	if (key)
800 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
801 #endif
802 
803 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
804 			 GFP_ATOMIC);
805 	if (buff == NULL)
806 		return;
807 
808 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
809 
810 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
811 	skb_reset_transport_header(buff);
812 
813 	/* Swap the send and the receive. */
814 	memset(t1, 0, sizeof(*t1));
815 	t1->dest = th->source;
816 	t1->source = th->dest;
817 	t1->doff = tot_len / 4;
818 	t1->seq = htonl(seq);
819 	t1->ack_seq = htonl(ack);
820 	t1->ack = !rst || !th->ack;
821 	t1->rst = rst;
822 	t1->window = htons(win);
823 
824 	topt = (__be32 *)(t1 + 1);
825 
826 	if (ts) {
827 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
828 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
829 		*topt++ = htonl(tcp_time_stamp);
830 		*topt++ = htonl(ts);
831 	}
832 
833 #ifdef CONFIG_TCP_MD5SIG
834 	if (key) {
835 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
836 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
837 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
838 				    &ipv6_hdr(skb)->saddr,
839 				    &ipv6_hdr(skb)->daddr, t1);
840 	}
841 #endif
842 
843 	memset(&fl6, 0, sizeof(fl6));
844 	fl6.daddr = ipv6_hdr(skb)->saddr;
845 	fl6.saddr = ipv6_hdr(skb)->daddr;
846 
847 	buff->ip_summed = CHECKSUM_PARTIAL;
848 	buff->csum = 0;
849 
850 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
851 
852 	fl6.flowi6_proto = IPPROTO_TCP;
853 	fl6.flowi6_oif = inet6_iif(skb);
854 	fl6.fl6_dport = t1->dest;
855 	fl6.fl6_sport = t1->source;
856 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
857 
858 	/* Pass a socket to ip6_dst_lookup either it is for RST
859 	 * Underlying function will use this to retrieve the network
860 	 * namespace
861 	 */
862 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
863 	if (!IS_ERR(dst)) {
864 		skb_dst_set(buff, dst);
865 		ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
866 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
867 		if (rst)
868 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
869 		return;
870 	}
871 
872 	kfree_skb(buff);
873 }
874 
875 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
876 {
877 	const struct tcphdr *th = tcp_hdr(skb);
878 	u32 seq = 0, ack_seq = 0;
879 	struct tcp_md5sig_key *key = NULL;
880 #ifdef CONFIG_TCP_MD5SIG
881 	const __u8 *hash_location = NULL;
882 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
883 	unsigned char newhash[16];
884 	int genhash;
885 	struct sock *sk1 = NULL;
886 #endif
887 
888 	if (th->rst)
889 		return;
890 
891 	if (!ipv6_unicast_destination(skb))
892 		return;
893 
894 #ifdef CONFIG_TCP_MD5SIG
895 	hash_location = tcp_parse_md5sig_option(th);
896 	if (!sk && hash_location) {
897 		/*
898 		 * active side is lost. Try to find listening socket through
899 		 * source port, and then find md5 key through listening socket.
900 		 * we are not loose security here:
901 		 * Incoming packet is checked with md5 hash with finding key,
902 		 * no RST generated if md5 hash doesn't match.
903 		 */
904 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
905 					   &tcp_hashinfo, &ipv6h->daddr,
906 					   ntohs(th->source), inet6_iif(skb));
907 		if (!sk1)
908 			return;
909 
910 		rcu_read_lock();
911 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
912 		if (!key)
913 			goto release_sk1;
914 
915 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
916 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
917 			goto release_sk1;
918 	} else {
919 		key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
920 	}
921 #endif
922 
923 	if (th->ack)
924 		seq = ntohl(th->ack_seq);
925 	else
926 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
927 			  (th->doff << 2);
928 
929 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
930 
931 #ifdef CONFIG_TCP_MD5SIG
932 release_sk1:
933 	if (sk1) {
934 		rcu_read_unlock();
935 		sock_put(sk1);
936 	}
937 #endif
938 }
939 
940 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
941 			    struct tcp_md5sig_key *key, u8 tclass)
942 {
943 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
944 }
945 
946 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
947 {
948 	struct inet_timewait_sock *tw = inet_twsk(sk);
949 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
950 
951 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
952 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
953 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
954 			tw->tw_tclass);
955 
956 	inet_twsk_put(tw);
957 }
958 
959 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
960 				  struct request_sock *req)
961 {
962 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
963 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
964 }
965 
966 
967 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
968 {
969 	struct request_sock *req, **prev;
970 	const struct tcphdr *th = tcp_hdr(skb);
971 	struct sock *nsk;
972 
973 	/* Find possible connection requests. */
974 	req = inet6_csk_search_req(sk, &prev, th->source,
975 				   &ipv6_hdr(skb)->saddr,
976 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
977 	if (req)
978 		return tcp_check_req(sk, skb, req, prev);
979 
980 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
981 			&ipv6_hdr(skb)->saddr, th->source,
982 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
983 
984 	if (nsk) {
985 		if (nsk->sk_state != TCP_TIME_WAIT) {
986 			bh_lock_sock(nsk);
987 			return nsk;
988 		}
989 		inet_twsk_put(inet_twsk(nsk));
990 		return NULL;
991 	}
992 
993 #ifdef CONFIG_SYN_COOKIES
994 	if (!th->syn)
995 		sk = cookie_v6_check(sk, skb);
996 #endif
997 	return sk;
998 }
999 
1000 /* FIXME: this is substantially similar to the ipv4 code.
1001  * Can some kind of merge be done? -- erics
1002  */
1003 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1004 {
1005 	struct tcp_extend_values tmp_ext;
1006 	struct tcp_options_received tmp_opt;
1007 	const u8 *hash_location;
1008 	struct request_sock *req;
1009 	struct inet6_request_sock *treq;
1010 	struct ipv6_pinfo *np = inet6_sk(sk);
1011 	struct tcp_sock *tp = tcp_sk(sk);
1012 	__u32 isn = TCP_SKB_CB(skb)->when;
1013 	struct dst_entry *dst = NULL;
1014 	struct flowi6 fl6;
1015 	bool want_cookie = false;
1016 
1017 	if (skb->protocol == htons(ETH_P_IP))
1018 		return tcp_v4_conn_request(sk, skb);
1019 
1020 	if (!ipv6_unicast_destination(skb))
1021 		goto drop;
1022 
1023 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1024 		want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1025 		if (!want_cookie)
1026 			goto drop;
1027 	}
1028 
1029 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1030 		goto drop;
1031 
1032 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1033 	if (req == NULL)
1034 		goto drop;
1035 
1036 #ifdef CONFIG_TCP_MD5SIG
1037 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1038 #endif
1039 
1040 	tcp_clear_options(&tmp_opt);
1041 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1042 	tmp_opt.user_mss = tp->rx_opt.user_mss;
1043 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
1044 
1045 	if (tmp_opt.cookie_plus > 0 &&
1046 	    tmp_opt.saw_tstamp &&
1047 	    !tp->rx_opt.cookie_out_never &&
1048 	    (sysctl_tcp_cookie_size > 0 ||
1049 	     (tp->cookie_values != NULL &&
1050 	      tp->cookie_values->cookie_desired > 0))) {
1051 		u8 *c;
1052 		u32 *d;
1053 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1054 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1055 
1056 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1057 			goto drop_and_free;
1058 
1059 		/* Secret recipe starts with IP addresses */
1060 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1061 		*mess++ ^= *d++;
1062 		*mess++ ^= *d++;
1063 		*mess++ ^= *d++;
1064 		*mess++ ^= *d++;
1065 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1066 		*mess++ ^= *d++;
1067 		*mess++ ^= *d++;
1068 		*mess++ ^= *d++;
1069 		*mess++ ^= *d++;
1070 
1071 		/* plus variable length Initiator Cookie */
1072 		c = (u8 *)mess;
1073 		while (l-- > 0)
1074 			*c++ ^= *hash_location++;
1075 
1076 		want_cookie = false;	/* not our kind of cookie */
1077 		tmp_ext.cookie_out_never = 0; /* false */
1078 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1079 	} else if (!tp->rx_opt.cookie_in_always) {
1080 		/* redundant indications, but ensure initialization. */
1081 		tmp_ext.cookie_out_never = 1; /* true */
1082 		tmp_ext.cookie_plus = 0;
1083 	} else {
1084 		goto drop_and_free;
1085 	}
1086 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1087 
1088 	if (want_cookie && !tmp_opt.saw_tstamp)
1089 		tcp_clear_options(&tmp_opt);
1090 
1091 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1092 	tcp_openreq_init(req, &tmp_opt, skb);
1093 
1094 	treq = inet6_rsk(req);
1095 	treq->rmt_addr = ipv6_hdr(skb)->saddr;
1096 	treq->loc_addr = ipv6_hdr(skb)->daddr;
1097 	if (!want_cookie || tmp_opt.tstamp_ok)
1098 		TCP_ECN_create_request(req, skb);
1099 
1100 	treq->iif = sk->sk_bound_dev_if;
1101 
1102 	/* So that link locals have meaning */
1103 	if (!sk->sk_bound_dev_if &&
1104 	    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1105 		treq->iif = inet6_iif(skb);
1106 
1107 	if (!isn) {
1108 		if (ipv6_opt_accepted(sk, skb) ||
1109 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1110 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1111 			atomic_inc(&skb->users);
1112 			treq->pktopts = skb;
1113 		}
1114 
1115 		if (want_cookie) {
1116 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1117 			req->cookie_ts = tmp_opt.tstamp_ok;
1118 			goto have_isn;
1119 		}
1120 
1121 		/* VJ's idea. We save last timestamp seen
1122 		 * from the destination in peer table, when entering
1123 		 * state TIME-WAIT, and check against it before
1124 		 * accepting new connection request.
1125 		 *
1126 		 * If "isn" is not zero, this request hit alive
1127 		 * timewait bucket, so that all the necessary checks
1128 		 * are made in the function processing timewait state.
1129 		 */
1130 		if (tmp_opt.saw_tstamp &&
1131 		    tcp_death_row.sysctl_tw_recycle &&
1132 		    (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1133 			if (!tcp_peer_is_proven(req, dst, true)) {
1134 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1135 				goto drop_and_release;
1136 			}
1137 		}
1138 		/* Kill the following clause, if you dislike this way. */
1139 		else if (!sysctl_tcp_syncookies &&
1140 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1141 			  (sysctl_max_syn_backlog >> 2)) &&
1142 			 !tcp_peer_is_proven(req, dst, false)) {
1143 			/* Without syncookies last quarter of
1144 			 * backlog is filled with destinations,
1145 			 * proven to be alive.
1146 			 * It means that we continue to communicate
1147 			 * to destinations, already remembered
1148 			 * to the moment of synflood.
1149 			 */
1150 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1151 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1152 			goto drop_and_release;
1153 		}
1154 
1155 		isn = tcp_v6_init_sequence(skb);
1156 	}
1157 have_isn:
1158 	tcp_rsk(req)->snt_isn = isn;
1159 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1160 
1161 	if (security_inet_conn_request(sk, skb, req))
1162 		goto drop_and_release;
1163 
1164 	if (tcp_v6_send_synack(sk, dst, &fl6, req,
1165 			       (struct request_values *)&tmp_ext,
1166 			       skb_get_queue_mapping(skb)) ||
1167 	    want_cookie)
1168 		goto drop_and_free;
1169 
1170 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1171 	return 0;
1172 
1173 drop_and_release:
1174 	dst_release(dst);
1175 drop_and_free:
1176 	reqsk_free(req);
1177 drop:
1178 	return 0; /* don't send reset */
1179 }
1180 
1181 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1182 					  struct request_sock *req,
1183 					  struct dst_entry *dst)
1184 {
1185 	struct inet6_request_sock *treq;
1186 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1187 	struct tcp6_sock *newtcp6sk;
1188 	struct inet_sock *newinet;
1189 	struct tcp_sock *newtp;
1190 	struct sock *newsk;
1191 #ifdef CONFIG_TCP_MD5SIG
1192 	struct tcp_md5sig_key *key;
1193 #endif
1194 	struct flowi6 fl6;
1195 
1196 	if (skb->protocol == htons(ETH_P_IP)) {
1197 		/*
1198 		 *	v6 mapped
1199 		 */
1200 
1201 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1202 
1203 		if (newsk == NULL)
1204 			return NULL;
1205 
1206 		newtcp6sk = (struct tcp6_sock *)newsk;
1207 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1208 
1209 		newinet = inet_sk(newsk);
1210 		newnp = inet6_sk(newsk);
1211 		newtp = tcp_sk(newsk);
1212 
1213 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1214 
1215 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1216 
1217 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1218 
1219 		newnp->rcv_saddr = newnp->saddr;
1220 
1221 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1222 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1223 #ifdef CONFIG_TCP_MD5SIG
1224 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1225 #endif
1226 
1227 		newnp->ipv6_ac_list = NULL;
1228 		newnp->ipv6_fl_list = NULL;
1229 		newnp->pktoptions  = NULL;
1230 		newnp->opt	   = NULL;
1231 		newnp->mcast_oif   = inet6_iif(skb);
1232 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1233 		newnp->rcv_tclass  = ipv6_tclass(ipv6_hdr(skb));
1234 
1235 		/*
1236 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1237 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1238 		 * that function for the gory details. -acme
1239 		 */
1240 
1241 		/* It is tricky place. Until this moment IPv4 tcp
1242 		   worked with IPv6 icsk.icsk_af_ops.
1243 		   Sync it now.
1244 		 */
1245 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1246 
1247 		return newsk;
1248 	}
1249 
1250 	treq = inet6_rsk(req);
1251 
1252 	if (sk_acceptq_is_full(sk))
1253 		goto out_overflow;
1254 
1255 	if (!dst) {
1256 		dst = inet6_csk_route_req(sk, &fl6, req);
1257 		if (!dst)
1258 			goto out;
1259 	}
1260 
1261 	newsk = tcp_create_openreq_child(sk, req, skb);
1262 	if (newsk == NULL)
1263 		goto out_nonewsk;
1264 
1265 	/*
1266 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1267 	 * count here, tcp_create_openreq_child now does this for us, see the
1268 	 * comment in that function for the gory details. -acme
1269 	 */
1270 
1271 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1272 	__ip6_dst_store(newsk, dst, NULL, NULL);
1273 
1274 	newtcp6sk = (struct tcp6_sock *)newsk;
1275 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1276 
1277 	newtp = tcp_sk(newsk);
1278 	newinet = inet_sk(newsk);
1279 	newnp = inet6_sk(newsk);
1280 
1281 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1282 
1283 	newnp->daddr = treq->rmt_addr;
1284 	newnp->saddr = treq->loc_addr;
1285 	newnp->rcv_saddr = treq->loc_addr;
1286 	newsk->sk_bound_dev_if = treq->iif;
1287 
1288 	/* Now IPv6 options...
1289 
1290 	   First: no IPv4 options.
1291 	 */
1292 	newinet->inet_opt = NULL;
1293 	newnp->ipv6_ac_list = NULL;
1294 	newnp->ipv6_fl_list = NULL;
1295 
1296 	/* Clone RX bits */
1297 	newnp->rxopt.all = np->rxopt.all;
1298 
1299 	/* Clone pktoptions received with SYN */
1300 	newnp->pktoptions = NULL;
1301 	if (treq->pktopts != NULL) {
1302 		newnp->pktoptions = skb_clone(treq->pktopts,
1303 					      sk_gfp_atomic(sk, GFP_ATOMIC));
1304 		consume_skb(treq->pktopts);
1305 		treq->pktopts = NULL;
1306 		if (newnp->pktoptions)
1307 			skb_set_owner_r(newnp->pktoptions, newsk);
1308 	}
1309 	newnp->opt	  = NULL;
1310 	newnp->mcast_oif  = inet6_iif(skb);
1311 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1312 	newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1313 
1314 	/* Clone native IPv6 options from listening socket (if any)
1315 
1316 	   Yes, keeping reference count would be much more clever,
1317 	   but we make one more one thing there: reattach optmem
1318 	   to newsk.
1319 	 */
1320 	if (np->opt)
1321 		newnp->opt = ipv6_dup_options(newsk, np->opt);
1322 
1323 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1324 	if (newnp->opt)
1325 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1326 						     newnp->opt->opt_flen);
1327 
1328 	tcp_mtup_init(newsk);
1329 	tcp_sync_mss(newsk, dst_mtu(dst));
1330 	newtp->advmss = dst_metric_advmss(dst);
1331 	if (tcp_sk(sk)->rx_opt.user_mss &&
1332 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1333 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1334 
1335 	tcp_initialize_rcv_mss(newsk);
1336 	if (tcp_rsk(req)->snt_synack)
1337 		tcp_valid_rtt_meas(newsk,
1338 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1339 	newtp->total_retrans = req->retrans;
1340 
1341 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1342 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1343 
1344 #ifdef CONFIG_TCP_MD5SIG
1345 	/* Copy over the MD5 key from the original socket */
1346 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1347 		/* We're using one, so create a matching key
1348 		 * on the newsk structure. If we fail to get
1349 		 * memory, then we end up not copying the key
1350 		 * across. Shucks.
1351 		 */
1352 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1353 			       AF_INET6, key->key, key->keylen,
1354 			       sk_gfp_atomic(sk, GFP_ATOMIC));
1355 	}
1356 #endif
1357 
1358 	if (__inet_inherit_port(sk, newsk) < 0) {
1359 		sock_put(newsk);
1360 		goto out;
1361 	}
1362 	__inet6_hash(newsk, NULL);
1363 
1364 	return newsk;
1365 
1366 out_overflow:
1367 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1368 out_nonewsk:
1369 	dst_release(dst);
1370 out:
1371 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1372 	return NULL;
1373 }
1374 
1375 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1376 {
1377 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1378 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1379 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1380 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1381 			return 0;
1382 		}
1383 	}
1384 
1385 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1386 					      &ipv6_hdr(skb)->saddr,
1387 					      &ipv6_hdr(skb)->daddr, 0));
1388 
1389 	if (skb->len <= 76) {
1390 		return __skb_checksum_complete(skb);
1391 	}
1392 	return 0;
1393 }
1394 
1395 /* The socket must have it's spinlock held when we get
1396  * here.
1397  *
1398  * We have a potential double-lock case here, so even when
1399  * doing backlog processing we use the BH locking scheme.
1400  * This is because we cannot sleep with the original spinlock
1401  * held.
1402  */
1403 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1404 {
1405 	struct ipv6_pinfo *np = inet6_sk(sk);
1406 	struct tcp_sock *tp;
1407 	struct sk_buff *opt_skb = NULL;
1408 
1409 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1410 	   goes to IPv4 receive handler and backlogged.
1411 	   From backlog it always goes here. Kerboom...
1412 	   Fortunately, tcp_rcv_established and rcv_established
1413 	   handle them correctly, but it is not case with
1414 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1415 	 */
1416 
1417 	if (skb->protocol == htons(ETH_P_IP))
1418 		return tcp_v4_do_rcv(sk, skb);
1419 
1420 #ifdef CONFIG_TCP_MD5SIG
1421 	if (tcp_v6_inbound_md5_hash (sk, skb))
1422 		goto discard;
1423 #endif
1424 
1425 	if (sk_filter(sk, skb))
1426 		goto discard;
1427 
1428 	/*
1429 	 *	socket locking is here for SMP purposes as backlog rcv
1430 	 *	is currently called with bh processing disabled.
1431 	 */
1432 
1433 	/* Do Stevens' IPV6_PKTOPTIONS.
1434 
1435 	   Yes, guys, it is the only place in our code, where we
1436 	   may make it not affecting IPv4.
1437 	   The rest of code is protocol independent,
1438 	   and I do not like idea to uglify IPv4.
1439 
1440 	   Actually, all the idea behind IPV6_PKTOPTIONS
1441 	   looks not very well thought. For now we latch
1442 	   options, received in the last packet, enqueued
1443 	   by tcp. Feel free to propose better solution.
1444 					       --ANK (980728)
1445 	 */
1446 	if (np->rxopt.all)
1447 		opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1448 
1449 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1450 		sock_rps_save_rxhash(sk, skb);
1451 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1452 			goto reset;
1453 		if (opt_skb)
1454 			goto ipv6_pktoptions;
1455 		return 0;
1456 	}
1457 
1458 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1459 		goto csum_err;
1460 
1461 	if (sk->sk_state == TCP_LISTEN) {
1462 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1463 		if (!nsk)
1464 			goto discard;
1465 
1466 		/*
1467 		 * Queue it on the new socket if the new socket is active,
1468 		 * otherwise we just shortcircuit this and continue with
1469 		 * the new socket..
1470 		 */
1471 		if(nsk != sk) {
1472 			sock_rps_save_rxhash(nsk, skb);
1473 			if (tcp_child_process(sk, nsk, skb))
1474 				goto reset;
1475 			if (opt_skb)
1476 				__kfree_skb(opt_skb);
1477 			return 0;
1478 		}
1479 	} else
1480 		sock_rps_save_rxhash(sk, skb);
1481 
1482 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1483 		goto reset;
1484 	if (opt_skb)
1485 		goto ipv6_pktoptions;
1486 	return 0;
1487 
1488 reset:
1489 	tcp_v6_send_reset(sk, skb);
1490 discard:
1491 	if (opt_skb)
1492 		__kfree_skb(opt_skb);
1493 	kfree_skb(skb);
1494 	return 0;
1495 csum_err:
1496 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1497 	goto discard;
1498 
1499 
1500 ipv6_pktoptions:
1501 	/* Do you ask, what is it?
1502 
1503 	   1. skb was enqueued by tcp.
1504 	   2. skb is added to tail of read queue, rather than out of order.
1505 	   3. socket is not in passive state.
1506 	   4. Finally, it really contains options, which user wants to receive.
1507 	 */
1508 	tp = tcp_sk(sk);
1509 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1510 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1511 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1512 			np->mcast_oif = inet6_iif(opt_skb);
1513 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1514 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1515 		if (np->rxopt.bits.rxtclass)
1516 			np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1517 		if (ipv6_opt_accepted(sk, opt_skb)) {
1518 			skb_set_owner_r(opt_skb, sk);
1519 			opt_skb = xchg(&np->pktoptions, opt_skb);
1520 		} else {
1521 			__kfree_skb(opt_skb);
1522 			opt_skb = xchg(&np->pktoptions, NULL);
1523 		}
1524 	}
1525 
1526 	kfree_skb(opt_skb);
1527 	return 0;
1528 }
1529 
1530 static int tcp_v6_rcv(struct sk_buff *skb)
1531 {
1532 	const struct tcphdr *th;
1533 	const struct ipv6hdr *hdr;
1534 	struct sock *sk;
1535 	int ret;
1536 	struct net *net = dev_net(skb->dev);
1537 
1538 	if (skb->pkt_type != PACKET_HOST)
1539 		goto discard_it;
1540 
1541 	/*
1542 	 *	Count it even if it's bad.
1543 	 */
1544 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1545 
1546 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1547 		goto discard_it;
1548 
1549 	th = tcp_hdr(skb);
1550 
1551 	if (th->doff < sizeof(struct tcphdr)/4)
1552 		goto bad_packet;
1553 	if (!pskb_may_pull(skb, th->doff*4))
1554 		goto discard_it;
1555 
1556 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1557 		goto bad_packet;
1558 
1559 	th = tcp_hdr(skb);
1560 	hdr = ipv6_hdr(skb);
1561 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1562 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1563 				    skb->len - th->doff*4);
1564 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1565 	TCP_SKB_CB(skb)->when = 0;
1566 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1567 	TCP_SKB_CB(skb)->sacked = 0;
1568 
1569 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1570 	if (!sk)
1571 		goto no_tcp_socket;
1572 
1573 process:
1574 	if (sk->sk_state == TCP_TIME_WAIT)
1575 		goto do_time_wait;
1576 
1577 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1578 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1579 		goto discard_and_relse;
1580 	}
1581 
1582 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1583 		goto discard_and_relse;
1584 
1585 	if (sk_filter(sk, skb))
1586 		goto discard_and_relse;
1587 
1588 	skb->dev = NULL;
1589 
1590 	bh_lock_sock_nested(sk);
1591 	ret = 0;
1592 	if (!sock_owned_by_user(sk)) {
1593 #ifdef CONFIG_NET_DMA
1594 		struct tcp_sock *tp = tcp_sk(sk);
1595 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1596 			tp->ucopy.dma_chan = net_dma_find_channel();
1597 		if (tp->ucopy.dma_chan)
1598 			ret = tcp_v6_do_rcv(sk, skb);
1599 		else
1600 #endif
1601 		{
1602 			if (!tcp_prequeue(sk, skb))
1603 				ret = tcp_v6_do_rcv(sk, skb);
1604 		}
1605 	} else if (unlikely(sk_add_backlog(sk, skb,
1606 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1607 		bh_unlock_sock(sk);
1608 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1609 		goto discard_and_relse;
1610 	}
1611 	bh_unlock_sock(sk);
1612 
1613 	sock_put(sk);
1614 	return ret ? -1 : 0;
1615 
1616 no_tcp_socket:
1617 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1618 		goto discard_it;
1619 
1620 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1621 bad_packet:
1622 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1623 	} else {
1624 		tcp_v6_send_reset(NULL, skb);
1625 	}
1626 
1627 discard_it:
1628 
1629 	/*
1630 	 *	Discard frame
1631 	 */
1632 
1633 	kfree_skb(skb);
1634 	return 0;
1635 
1636 discard_and_relse:
1637 	sock_put(sk);
1638 	goto discard_it;
1639 
1640 do_time_wait:
1641 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1642 		inet_twsk_put(inet_twsk(sk));
1643 		goto discard_it;
1644 	}
1645 
1646 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1647 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1648 		inet_twsk_put(inet_twsk(sk));
1649 		goto discard_it;
1650 	}
1651 
1652 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1653 	case TCP_TW_SYN:
1654 	{
1655 		struct sock *sk2;
1656 
1657 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1658 					    &ipv6_hdr(skb)->daddr,
1659 					    ntohs(th->dest), inet6_iif(skb));
1660 		if (sk2 != NULL) {
1661 			struct inet_timewait_sock *tw = inet_twsk(sk);
1662 			inet_twsk_deschedule(tw, &tcp_death_row);
1663 			inet_twsk_put(tw);
1664 			sk = sk2;
1665 			goto process;
1666 		}
1667 		/* Fall through to ACK */
1668 	}
1669 	case TCP_TW_ACK:
1670 		tcp_v6_timewait_ack(sk, skb);
1671 		break;
1672 	case TCP_TW_RST:
1673 		goto no_tcp_socket;
1674 	case TCP_TW_SUCCESS:;
1675 	}
1676 	goto discard_it;
1677 }
1678 
1679 static void tcp_v6_early_demux(struct sk_buff *skb)
1680 {
1681 	const struct ipv6hdr *hdr;
1682 	const struct tcphdr *th;
1683 	struct sock *sk;
1684 
1685 	if (skb->pkt_type != PACKET_HOST)
1686 		return;
1687 
1688 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1689 		return;
1690 
1691 	hdr = ipv6_hdr(skb);
1692 	th = tcp_hdr(skb);
1693 
1694 	if (th->doff < sizeof(struct tcphdr) / 4)
1695 		return;
1696 
1697 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1698 					&hdr->saddr, th->source,
1699 					&hdr->daddr, ntohs(th->dest),
1700 					inet6_iif(skb));
1701 	if (sk) {
1702 		skb->sk = sk;
1703 		skb->destructor = sock_edemux;
1704 		if (sk->sk_state != TCP_TIME_WAIT) {
1705 			struct dst_entry *dst = sk->sk_rx_dst;
1706 			struct inet_sock *icsk = inet_sk(sk);
1707 			if (dst)
1708 				dst = dst_check(dst, 0);
1709 			if (dst &&
1710 			    icsk->rx_dst_ifindex == inet6_iif(skb))
1711 				skb_dst_set_noref(skb, dst);
1712 		}
1713 	}
1714 }
1715 
1716 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1717 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1718 	.twsk_unique	= tcp_twsk_unique,
1719 	.twsk_destructor= tcp_twsk_destructor,
1720 };
1721 
1722 static const struct inet_connection_sock_af_ops ipv6_specific = {
1723 	.queue_xmit	   = inet6_csk_xmit,
1724 	.send_check	   = tcp_v6_send_check,
1725 	.rebuild_header	   = inet6_sk_rebuild_header,
1726 	.conn_request	   = tcp_v6_conn_request,
1727 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1728 	.net_header_len	   = sizeof(struct ipv6hdr),
1729 	.net_frag_header_len = sizeof(struct frag_hdr),
1730 	.setsockopt	   = ipv6_setsockopt,
1731 	.getsockopt	   = ipv6_getsockopt,
1732 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1733 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1734 	.bind_conflict	   = inet6_csk_bind_conflict,
1735 #ifdef CONFIG_COMPAT
1736 	.compat_setsockopt = compat_ipv6_setsockopt,
1737 	.compat_getsockopt = compat_ipv6_getsockopt,
1738 #endif
1739 };
1740 
1741 #ifdef CONFIG_TCP_MD5SIG
1742 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1743 	.md5_lookup	=	tcp_v6_md5_lookup,
1744 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1745 	.md5_parse	=	tcp_v6_parse_md5_keys,
1746 };
1747 #endif
1748 
1749 /*
1750  *	TCP over IPv4 via INET6 API
1751  */
1752 
1753 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1754 	.queue_xmit	   = ip_queue_xmit,
1755 	.send_check	   = tcp_v4_send_check,
1756 	.rebuild_header	   = inet_sk_rebuild_header,
1757 	.conn_request	   = tcp_v6_conn_request,
1758 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1759 	.net_header_len	   = sizeof(struct iphdr),
1760 	.setsockopt	   = ipv6_setsockopt,
1761 	.getsockopt	   = ipv6_getsockopt,
1762 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1763 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1764 	.bind_conflict	   = inet6_csk_bind_conflict,
1765 #ifdef CONFIG_COMPAT
1766 	.compat_setsockopt = compat_ipv6_setsockopt,
1767 	.compat_getsockopt = compat_ipv6_getsockopt,
1768 #endif
1769 };
1770 
1771 #ifdef CONFIG_TCP_MD5SIG
1772 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1773 	.md5_lookup	=	tcp_v4_md5_lookup,
1774 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1775 	.md5_parse	=	tcp_v6_parse_md5_keys,
1776 };
1777 #endif
1778 
1779 /* NOTE: A lot of things set to zero explicitly by call to
1780  *       sk_alloc() so need not be done here.
1781  */
1782 static int tcp_v6_init_sock(struct sock *sk)
1783 {
1784 	struct inet_connection_sock *icsk = inet_csk(sk);
1785 
1786 	tcp_init_sock(sk);
1787 
1788 	icsk->icsk_af_ops = &ipv6_specific;
1789 
1790 #ifdef CONFIG_TCP_MD5SIG
1791 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1792 #endif
1793 
1794 	return 0;
1795 }
1796 
1797 static void tcp_v6_destroy_sock(struct sock *sk)
1798 {
1799 	tcp_v4_destroy_sock(sk);
1800 	inet6_destroy_sock(sk);
1801 }
1802 
1803 #ifdef CONFIG_PROC_FS
1804 /* Proc filesystem TCPv6 sock list dumping. */
1805 static void get_openreq6(struct seq_file *seq,
1806 			 const struct sock *sk, struct request_sock *req, int i, int uid)
1807 {
1808 	int ttd = req->expires - jiffies;
1809 	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1810 	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1811 
1812 	if (ttd < 0)
1813 		ttd = 0;
1814 
1815 	seq_printf(seq,
1816 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1817 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1818 		   i,
1819 		   src->s6_addr32[0], src->s6_addr32[1],
1820 		   src->s6_addr32[2], src->s6_addr32[3],
1821 		   ntohs(inet_rsk(req)->loc_port),
1822 		   dest->s6_addr32[0], dest->s6_addr32[1],
1823 		   dest->s6_addr32[2], dest->s6_addr32[3],
1824 		   ntohs(inet_rsk(req)->rmt_port),
1825 		   TCP_SYN_RECV,
1826 		   0,0, /* could print option size, but that is af dependent. */
1827 		   1,   /* timers active (only the expire timer) */
1828 		   jiffies_to_clock_t(ttd),
1829 		   req->retrans,
1830 		   uid,
1831 		   0,  /* non standard timer */
1832 		   0, /* open_requests have no inode */
1833 		   0, req);
1834 }
1835 
1836 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1837 {
1838 	const struct in6_addr *dest, *src;
1839 	__u16 destp, srcp;
1840 	int timer_active;
1841 	unsigned long timer_expires;
1842 	const struct inet_sock *inet = inet_sk(sp);
1843 	const struct tcp_sock *tp = tcp_sk(sp);
1844 	const struct inet_connection_sock *icsk = inet_csk(sp);
1845 	const struct ipv6_pinfo *np = inet6_sk(sp);
1846 
1847 	dest  = &np->daddr;
1848 	src   = &np->rcv_saddr;
1849 	destp = ntohs(inet->inet_dport);
1850 	srcp  = ntohs(inet->inet_sport);
1851 
1852 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1853 		timer_active	= 1;
1854 		timer_expires	= icsk->icsk_timeout;
1855 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1856 		timer_active	= 4;
1857 		timer_expires	= icsk->icsk_timeout;
1858 	} else if (timer_pending(&sp->sk_timer)) {
1859 		timer_active	= 2;
1860 		timer_expires	= sp->sk_timer.expires;
1861 	} else {
1862 		timer_active	= 0;
1863 		timer_expires = jiffies;
1864 	}
1865 
1866 	seq_printf(seq,
1867 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1868 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1869 		   i,
1870 		   src->s6_addr32[0], src->s6_addr32[1],
1871 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1872 		   dest->s6_addr32[0], dest->s6_addr32[1],
1873 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1874 		   sp->sk_state,
1875 		   tp->write_seq-tp->snd_una,
1876 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1877 		   timer_active,
1878 		   jiffies_to_clock_t(timer_expires - jiffies),
1879 		   icsk->icsk_retransmits,
1880 		   sock_i_uid(sp),
1881 		   icsk->icsk_probes_out,
1882 		   sock_i_ino(sp),
1883 		   atomic_read(&sp->sk_refcnt), sp,
1884 		   jiffies_to_clock_t(icsk->icsk_rto),
1885 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1886 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1887 		   tp->snd_cwnd,
1888 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1889 		   );
1890 }
1891 
1892 static void get_timewait6_sock(struct seq_file *seq,
1893 			       struct inet_timewait_sock *tw, int i)
1894 {
1895 	const struct in6_addr *dest, *src;
1896 	__u16 destp, srcp;
1897 	const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1898 	int ttd = tw->tw_ttd - jiffies;
1899 
1900 	if (ttd < 0)
1901 		ttd = 0;
1902 
1903 	dest = &tw6->tw_v6_daddr;
1904 	src  = &tw6->tw_v6_rcv_saddr;
1905 	destp = ntohs(tw->tw_dport);
1906 	srcp  = ntohs(tw->tw_sport);
1907 
1908 	seq_printf(seq,
1909 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1910 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1911 		   i,
1912 		   src->s6_addr32[0], src->s6_addr32[1],
1913 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1914 		   dest->s6_addr32[0], dest->s6_addr32[1],
1915 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1916 		   tw->tw_substate, 0, 0,
1917 		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1918 		   atomic_read(&tw->tw_refcnt), tw);
1919 }
1920 
1921 static int tcp6_seq_show(struct seq_file *seq, void *v)
1922 {
1923 	struct tcp_iter_state *st;
1924 
1925 	if (v == SEQ_START_TOKEN) {
1926 		seq_puts(seq,
1927 			 "  sl  "
1928 			 "local_address                         "
1929 			 "remote_address                        "
1930 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1931 			 "   uid  timeout inode\n");
1932 		goto out;
1933 	}
1934 	st = seq->private;
1935 
1936 	switch (st->state) {
1937 	case TCP_SEQ_STATE_LISTENING:
1938 	case TCP_SEQ_STATE_ESTABLISHED:
1939 		get_tcp6_sock(seq, v, st->num);
1940 		break;
1941 	case TCP_SEQ_STATE_OPENREQ:
1942 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1943 		break;
1944 	case TCP_SEQ_STATE_TIME_WAIT:
1945 		get_timewait6_sock(seq, v, st->num);
1946 		break;
1947 	}
1948 out:
1949 	return 0;
1950 }
1951 
1952 static const struct file_operations tcp6_afinfo_seq_fops = {
1953 	.owner   = THIS_MODULE,
1954 	.open    = tcp_seq_open,
1955 	.read    = seq_read,
1956 	.llseek  = seq_lseek,
1957 	.release = seq_release_net
1958 };
1959 
1960 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1961 	.name		= "tcp6",
1962 	.family		= AF_INET6,
1963 	.seq_fops	= &tcp6_afinfo_seq_fops,
1964 	.seq_ops	= {
1965 		.show		= tcp6_seq_show,
1966 	},
1967 };
1968 
1969 int __net_init tcp6_proc_init(struct net *net)
1970 {
1971 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1972 }
1973 
1974 void tcp6_proc_exit(struct net *net)
1975 {
1976 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1977 }
1978 #endif
1979 
1980 struct proto tcpv6_prot = {
1981 	.name			= "TCPv6",
1982 	.owner			= THIS_MODULE,
1983 	.close			= tcp_close,
1984 	.connect		= tcp_v6_connect,
1985 	.disconnect		= tcp_disconnect,
1986 	.accept			= inet_csk_accept,
1987 	.ioctl			= tcp_ioctl,
1988 	.init			= tcp_v6_init_sock,
1989 	.destroy		= tcp_v6_destroy_sock,
1990 	.shutdown		= tcp_shutdown,
1991 	.setsockopt		= tcp_setsockopt,
1992 	.getsockopt		= tcp_getsockopt,
1993 	.recvmsg		= tcp_recvmsg,
1994 	.sendmsg		= tcp_sendmsg,
1995 	.sendpage		= tcp_sendpage,
1996 	.backlog_rcv		= tcp_v6_do_rcv,
1997 	.release_cb		= tcp_release_cb,
1998 	.mtu_reduced		= tcp_v6_mtu_reduced,
1999 	.hash			= tcp_v6_hash,
2000 	.unhash			= inet_unhash,
2001 	.get_port		= inet_csk_get_port,
2002 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2003 	.sockets_allocated	= &tcp_sockets_allocated,
2004 	.memory_allocated	= &tcp_memory_allocated,
2005 	.memory_pressure	= &tcp_memory_pressure,
2006 	.orphan_count		= &tcp_orphan_count,
2007 	.sysctl_wmem		= sysctl_tcp_wmem,
2008 	.sysctl_rmem		= sysctl_tcp_rmem,
2009 	.max_header		= MAX_TCP_HEADER,
2010 	.obj_size		= sizeof(struct tcp6_sock),
2011 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2012 	.twsk_prot		= &tcp6_timewait_sock_ops,
2013 	.rsk_prot		= &tcp6_request_sock_ops,
2014 	.h.hashinfo		= &tcp_hashinfo,
2015 	.no_autobind		= true,
2016 #ifdef CONFIG_COMPAT
2017 	.compat_setsockopt	= compat_tcp_setsockopt,
2018 	.compat_getsockopt	= compat_tcp_getsockopt,
2019 #endif
2020 #ifdef CONFIG_MEMCG_KMEM
2021 	.proto_cgroup		= tcp_proto_cgroup,
2022 #endif
2023 };
2024 
2025 static const struct inet6_protocol tcpv6_protocol = {
2026 	.early_demux	=	tcp_v6_early_demux,
2027 	.handler	=	tcp_v6_rcv,
2028 	.err_handler	=	tcp_v6_err,
2029 	.gso_send_check	=	tcp_v6_gso_send_check,
2030 	.gso_segment	=	tcp_tso_segment,
2031 	.gro_receive	=	tcp6_gro_receive,
2032 	.gro_complete	=	tcp6_gro_complete,
2033 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2034 };
2035 
2036 static struct inet_protosw tcpv6_protosw = {
2037 	.type		=	SOCK_STREAM,
2038 	.protocol	=	IPPROTO_TCP,
2039 	.prot		=	&tcpv6_prot,
2040 	.ops		=	&inet6_stream_ops,
2041 	.no_check	=	0,
2042 	.flags		=	INET_PROTOSW_PERMANENT |
2043 				INET_PROTOSW_ICSK,
2044 };
2045 
2046 static int __net_init tcpv6_net_init(struct net *net)
2047 {
2048 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2049 				    SOCK_RAW, IPPROTO_TCP, net);
2050 }
2051 
2052 static void __net_exit tcpv6_net_exit(struct net *net)
2053 {
2054 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2055 }
2056 
2057 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2058 {
2059 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2060 }
2061 
2062 static struct pernet_operations tcpv6_net_ops = {
2063 	.init	    = tcpv6_net_init,
2064 	.exit	    = tcpv6_net_exit,
2065 	.exit_batch = tcpv6_net_exit_batch,
2066 };
2067 
2068 int __init tcpv6_init(void)
2069 {
2070 	int ret;
2071 
2072 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2073 	if (ret)
2074 		goto out;
2075 
2076 	/* register inet6 protocol */
2077 	ret = inet6_register_protosw(&tcpv6_protosw);
2078 	if (ret)
2079 		goto out_tcpv6_protocol;
2080 
2081 	ret = register_pernet_subsys(&tcpv6_net_ops);
2082 	if (ret)
2083 		goto out_tcpv6_protosw;
2084 out:
2085 	return ret;
2086 
2087 out_tcpv6_protocol:
2088 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2089 out_tcpv6_protosw:
2090 	inet6_unregister_protosw(&tcpv6_protosw);
2091 	goto out;
2092 }
2093 
2094 void tcpv6_exit(void)
2095 {
2096 	unregister_pernet_subsys(&tcpv6_net_ops);
2097 	inet6_unregister_protosw(&tcpv6_protosw);
2098 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2099 }
2100