xref: /linux/net/ipv6/tcp_ipv6.c (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65 
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68 
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71 
72 #include <trace/events/tcp.h>
73 
74 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 				      struct request_sock *req);
77 
78 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #else
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87 						   const struct in6_addr *addr)
88 {
89 	return NULL;
90 }
91 #endif
92 
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 {
95 	struct dst_entry *dst = skb_dst(skb);
96 
97 	if (dst && dst_hold_safe(dst)) {
98 		const struct rt6_info *rt = (const struct rt6_info *)dst;
99 
100 		sk->sk_rx_dst = dst;
101 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 	}
104 }
105 
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108 	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 				ipv6_hdr(skb)->saddr.s6_addr32,
110 				tcp_hdr(skb)->dest,
111 				tcp_hdr(skb)->source);
112 }
113 
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116 	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 				   ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119 
120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
121 			  int addr_len)
122 {
123 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
124 	struct inet_sock *inet = inet_sk(sk);
125 	struct inet_connection_sock *icsk = inet_csk(sk);
126 	struct ipv6_pinfo *np = inet6_sk(sk);
127 	struct tcp_sock *tp = tcp_sk(sk);
128 	struct in6_addr *saddr = NULL, *final_p, final;
129 	struct ipv6_txoptions *opt;
130 	struct flowi6 fl6;
131 	struct dst_entry *dst;
132 	int addr_type;
133 	int err;
134 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
135 
136 	if (addr_len < SIN6_LEN_RFC2133)
137 		return -EINVAL;
138 
139 	if (usin->sin6_family != AF_INET6)
140 		return -EAFNOSUPPORT;
141 
142 	memset(&fl6, 0, sizeof(fl6));
143 
144 	if (np->sndflow) {
145 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146 		IP6_ECN_flow_init(fl6.flowlabel);
147 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
148 			struct ip6_flowlabel *flowlabel;
149 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
150 			if (!flowlabel)
151 				return -EINVAL;
152 			fl6_sock_release(flowlabel);
153 		}
154 	}
155 
156 	/*
157 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
158 	 */
159 
160 	if (ipv6_addr_any(&usin->sin6_addr)) {
161 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
162 			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
163 					       &usin->sin6_addr);
164 		else
165 			usin->sin6_addr = in6addr_loopback;
166 	}
167 
168 	addr_type = ipv6_addr_type(&usin->sin6_addr);
169 
170 	if (addr_type & IPV6_ADDR_MULTICAST)
171 		return -ENETUNREACH;
172 
173 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 		if (addr_len >= sizeof(struct sockaddr_in6) &&
175 		    usin->sin6_scope_id) {
176 			/* If interface is set while binding, indices
177 			 * must coincide.
178 			 */
179 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
180 				return -EINVAL;
181 
182 			sk->sk_bound_dev_if = usin->sin6_scope_id;
183 		}
184 
185 		/* Connect to link-local address requires an interface */
186 		if (!sk->sk_bound_dev_if)
187 			return -EINVAL;
188 	}
189 
190 	if (tp->rx_opt.ts_recent_stamp &&
191 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
192 		tp->rx_opt.ts_recent = 0;
193 		tp->rx_opt.ts_recent_stamp = 0;
194 		tp->write_seq = 0;
195 	}
196 
197 	sk->sk_v6_daddr = usin->sin6_addr;
198 	np->flow_label = fl6.flowlabel;
199 
200 	/*
201 	 *	TCP over IPv4
202 	 */
203 
204 	if (addr_type & IPV6_ADDR_MAPPED) {
205 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
206 		struct sockaddr_in sin;
207 
208 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
209 
210 		if (__ipv6_only_sock(sk))
211 			return -ENETUNREACH;
212 
213 		sin.sin_family = AF_INET;
214 		sin.sin_port = usin->sin6_port;
215 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
216 
217 		icsk->icsk_af_ops = &ipv6_mapped;
218 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
221 #endif
222 
223 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
224 
225 		if (err) {
226 			icsk->icsk_ext_hdr_len = exthdrlen;
227 			icsk->icsk_af_ops = &ipv6_specific;
228 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
229 #ifdef CONFIG_TCP_MD5SIG
230 			tp->af_specific = &tcp_sock_ipv6_specific;
231 #endif
232 			goto failure;
233 		}
234 		np->saddr = sk->sk_v6_rcv_saddr;
235 
236 		return err;
237 	}
238 
239 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
240 		saddr = &sk->sk_v6_rcv_saddr;
241 
242 	fl6.flowi6_proto = IPPROTO_TCP;
243 	fl6.daddr = sk->sk_v6_daddr;
244 	fl6.saddr = saddr ? *saddr : np->saddr;
245 	fl6.flowi6_oif = sk->sk_bound_dev_if;
246 	fl6.flowi6_mark = sk->sk_mark;
247 	fl6.fl6_dport = usin->sin6_port;
248 	fl6.fl6_sport = inet->inet_sport;
249 	fl6.flowi6_uid = sk->sk_uid;
250 
251 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
252 	final_p = fl6_update_dst(&fl6, opt, &final);
253 
254 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
255 
256 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
257 	if (IS_ERR(dst)) {
258 		err = PTR_ERR(dst);
259 		goto failure;
260 	}
261 
262 	if (!saddr) {
263 		saddr = &fl6.saddr;
264 		sk->sk_v6_rcv_saddr = *saddr;
265 	}
266 
267 	/* set the source address */
268 	np->saddr = *saddr;
269 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
270 
271 	sk->sk_gso_type = SKB_GSO_TCPV6;
272 	ip6_dst_store(sk, dst, NULL, NULL);
273 
274 	icsk->icsk_ext_hdr_len = 0;
275 	if (opt)
276 		icsk->icsk_ext_hdr_len = opt->opt_flen +
277 					 opt->opt_nflen;
278 
279 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
280 
281 	inet->inet_dport = usin->sin6_port;
282 
283 	tcp_set_state(sk, TCP_SYN_SENT);
284 	err = inet6_hash_connect(tcp_death_row, sk);
285 	if (err)
286 		goto late_failure;
287 
288 	sk_set_txhash(sk);
289 
290 	if (likely(!tp->repair)) {
291 		if (!tp->write_seq)
292 			tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
293 							 sk->sk_v6_daddr.s6_addr32,
294 							 inet->inet_sport,
295 							 inet->inet_dport);
296 		tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
297 						   np->saddr.s6_addr32,
298 						   sk->sk_v6_daddr.s6_addr32);
299 	}
300 
301 	if (tcp_fastopen_defer_connect(sk, &err))
302 		return err;
303 	if (err)
304 		goto late_failure;
305 
306 	err = tcp_connect(sk);
307 	if (err)
308 		goto late_failure;
309 
310 	return 0;
311 
312 late_failure:
313 	tcp_set_state(sk, TCP_CLOSE);
314 failure:
315 	inet->inet_dport = 0;
316 	sk->sk_route_caps = 0;
317 	return err;
318 }
319 
320 static void tcp_v6_mtu_reduced(struct sock *sk)
321 {
322 	struct dst_entry *dst;
323 
324 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
325 		return;
326 
327 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
328 	if (!dst)
329 		return;
330 
331 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
332 		tcp_sync_mss(sk, dst_mtu(dst));
333 		tcp_simple_retransmit(sk);
334 	}
335 }
336 
337 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
338 		u8 type, u8 code, int offset, __be32 info)
339 {
340 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
341 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
342 	struct net *net = dev_net(skb->dev);
343 	struct request_sock *fastopen;
344 	struct ipv6_pinfo *np;
345 	struct tcp_sock *tp;
346 	__u32 seq, snd_una;
347 	struct sock *sk;
348 	bool fatal;
349 	int err;
350 
351 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
352 					&hdr->daddr, th->dest,
353 					&hdr->saddr, ntohs(th->source),
354 					skb->dev->ifindex, inet6_sdif(skb));
355 
356 	if (!sk) {
357 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
358 				  ICMP6_MIB_INERRORS);
359 		return;
360 	}
361 
362 	if (sk->sk_state == TCP_TIME_WAIT) {
363 		inet_twsk_put(inet_twsk(sk));
364 		return;
365 	}
366 	seq = ntohl(th->seq);
367 	fatal = icmpv6_err_convert(type, code, &err);
368 	if (sk->sk_state == TCP_NEW_SYN_RECV)
369 		return tcp_req_err(sk, seq, fatal);
370 
371 	bh_lock_sock(sk);
372 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
373 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
374 
375 	if (sk->sk_state == TCP_CLOSE)
376 		goto out;
377 
378 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
379 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
380 		goto out;
381 	}
382 
383 	tp = tcp_sk(sk);
384 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
385 	fastopen = tp->fastopen_rsk;
386 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
387 	if (sk->sk_state != TCP_LISTEN &&
388 	    !between(seq, snd_una, tp->snd_nxt)) {
389 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
390 		goto out;
391 	}
392 
393 	np = inet6_sk(sk);
394 
395 	if (type == NDISC_REDIRECT) {
396 		if (!sock_owned_by_user(sk)) {
397 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
398 
399 			if (dst)
400 				dst->ops->redirect(dst, sk, skb);
401 		}
402 		goto out;
403 	}
404 
405 	if (type == ICMPV6_PKT_TOOBIG) {
406 		/* We are not interested in TCP_LISTEN and open_requests
407 		 * (SYN-ACKs send out by Linux are always <576bytes so
408 		 * they should go through unfragmented).
409 		 */
410 		if (sk->sk_state == TCP_LISTEN)
411 			goto out;
412 
413 		if (!ip6_sk_accept_pmtu(sk))
414 			goto out;
415 
416 		tp->mtu_info = ntohl(info);
417 		if (!sock_owned_by_user(sk))
418 			tcp_v6_mtu_reduced(sk);
419 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
420 					   &sk->sk_tsq_flags))
421 			sock_hold(sk);
422 		goto out;
423 	}
424 
425 
426 	/* Might be for an request_sock */
427 	switch (sk->sk_state) {
428 	case TCP_SYN_SENT:
429 	case TCP_SYN_RECV:
430 		/* Only in fast or simultaneous open. If a fast open socket is
431 		 * is already accepted it is treated as a connected one below.
432 		 */
433 		if (fastopen && !fastopen->sk)
434 			break;
435 
436 		if (!sock_owned_by_user(sk)) {
437 			sk->sk_err = err;
438 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
439 
440 			tcp_done(sk);
441 		} else
442 			sk->sk_err_soft = err;
443 		goto out;
444 	}
445 
446 	if (!sock_owned_by_user(sk) && np->recverr) {
447 		sk->sk_err = err;
448 		sk->sk_error_report(sk);
449 	} else
450 		sk->sk_err_soft = err;
451 
452 out:
453 	bh_unlock_sock(sk);
454 	sock_put(sk);
455 }
456 
457 
458 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
459 			      struct flowi *fl,
460 			      struct request_sock *req,
461 			      struct tcp_fastopen_cookie *foc,
462 			      enum tcp_synack_type synack_type)
463 {
464 	struct inet_request_sock *ireq = inet_rsk(req);
465 	struct ipv6_pinfo *np = inet6_sk(sk);
466 	struct ipv6_txoptions *opt;
467 	struct flowi6 *fl6 = &fl->u.ip6;
468 	struct sk_buff *skb;
469 	int err = -ENOMEM;
470 
471 	/* First, grab a route. */
472 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
473 					       IPPROTO_TCP)) == NULL)
474 		goto done;
475 
476 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
477 
478 	if (skb) {
479 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
480 				    &ireq->ir_v6_rmt_addr);
481 
482 		fl6->daddr = ireq->ir_v6_rmt_addr;
483 		if (np->repflow && ireq->pktopts)
484 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
485 
486 		rcu_read_lock();
487 		opt = ireq->ipv6_opt;
488 		if (!opt)
489 			opt = rcu_dereference(np->opt);
490 		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
491 		rcu_read_unlock();
492 		err = net_xmit_eval(err);
493 	}
494 
495 done:
496 	return err;
497 }
498 
499 
500 static void tcp_v6_reqsk_destructor(struct request_sock *req)
501 {
502 	kfree(inet_rsk(req)->ipv6_opt);
503 	kfree_skb(inet_rsk(req)->pktopts);
504 }
505 
506 #ifdef CONFIG_TCP_MD5SIG
507 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
508 						   const struct in6_addr *addr)
509 {
510 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
511 }
512 
513 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
514 						const struct sock *addr_sk)
515 {
516 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
517 }
518 
519 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
520 				 char __user *optval, int optlen)
521 {
522 	struct tcp_md5sig cmd;
523 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
524 	u8 prefixlen;
525 
526 	if (optlen < sizeof(cmd))
527 		return -EINVAL;
528 
529 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
530 		return -EFAULT;
531 
532 	if (sin6->sin6_family != AF_INET6)
533 		return -EINVAL;
534 
535 	if (optname == TCP_MD5SIG_EXT &&
536 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
537 		prefixlen = cmd.tcpm_prefixlen;
538 		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
539 					prefixlen > 32))
540 			return -EINVAL;
541 	} else {
542 		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
543 	}
544 
545 	if (!cmd.tcpm_keylen) {
546 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
547 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
548 					      AF_INET, prefixlen);
549 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
550 				      AF_INET6, prefixlen);
551 	}
552 
553 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
554 		return -EINVAL;
555 
556 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
557 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
558 				      AF_INET, prefixlen, cmd.tcpm_key,
559 				      cmd.tcpm_keylen, GFP_KERNEL);
560 
561 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
562 			      AF_INET6, prefixlen, cmd.tcpm_key,
563 			      cmd.tcpm_keylen, GFP_KERNEL);
564 }
565 
566 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
567 				   const struct in6_addr *daddr,
568 				   const struct in6_addr *saddr,
569 				   const struct tcphdr *th, int nbytes)
570 {
571 	struct tcp6_pseudohdr *bp;
572 	struct scatterlist sg;
573 	struct tcphdr *_th;
574 
575 	bp = hp->scratch;
576 	/* 1. TCP pseudo-header (RFC2460) */
577 	bp->saddr = *saddr;
578 	bp->daddr = *daddr;
579 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
580 	bp->len = cpu_to_be32(nbytes);
581 
582 	_th = (struct tcphdr *)(bp + 1);
583 	memcpy(_th, th, sizeof(*th));
584 	_th->check = 0;
585 
586 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
587 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
588 				sizeof(*bp) + sizeof(*th));
589 	return crypto_ahash_update(hp->md5_req);
590 }
591 
592 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
593 			       const struct in6_addr *daddr, struct in6_addr *saddr,
594 			       const struct tcphdr *th)
595 {
596 	struct tcp_md5sig_pool *hp;
597 	struct ahash_request *req;
598 
599 	hp = tcp_get_md5sig_pool();
600 	if (!hp)
601 		goto clear_hash_noput;
602 	req = hp->md5_req;
603 
604 	if (crypto_ahash_init(req))
605 		goto clear_hash;
606 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
607 		goto clear_hash;
608 	if (tcp_md5_hash_key(hp, key))
609 		goto clear_hash;
610 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
611 	if (crypto_ahash_final(req))
612 		goto clear_hash;
613 
614 	tcp_put_md5sig_pool();
615 	return 0;
616 
617 clear_hash:
618 	tcp_put_md5sig_pool();
619 clear_hash_noput:
620 	memset(md5_hash, 0, 16);
621 	return 1;
622 }
623 
624 static int tcp_v6_md5_hash_skb(char *md5_hash,
625 			       const struct tcp_md5sig_key *key,
626 			       const struct sock *sk,
627 			       const struct sk_buff *skb)
628 {
629 	const struct in6_addr *saddr, *daddr;
630 	struct tcp_md5sig_pool *hp;
631 	struct ahash_request *req;
632 	const struct tcphdr *th = tcp_hdr(skb);
633 
634 	if (sk) { /* valid for establish/request sockets */
635 		saddr = &sk->sk_v6_rcv_saddr;
636 		daddr = &sk->sk_v6_daddr;
637 	} else {
638 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
639 		saddr = &ip6h->saddr;
640 		daddr = &ip6h->daddr;
641 	}
642 
643 	hp = tcp_get_md5sig_pool();
644 	if (!hp)
645 		goto clear_hash_noput;
646 	req = hp->md5_req;
647 
648 	if (crypto_ahash_init(req))
649 		goto clear_hash;
650 
651 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
652 		goto clear_hash;
653 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
654 		goto clear_hash;
655 	if (tcp_md5_hash_key(hp, key))
656 		goto clear_hash;
657 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
658 	if (crypto_ahash_final(req))
659 		goto clear_hash;
660 
661 	tcp_put_md5sig_pool();
662 	return 0;
663 
664 clear_hash:
665 	tcp_put_md5sig_pool();
666 clear_hash_noput:
667 	memset(md5_hash, 0, 16);
668 	return 1;
669 }
670 
671 #endif
672 
673 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
674 				    const struct sk_buff *skb)
675 {
676 #ifdef CONFIG_TCP_MD5SIG
677 	const __u8 *hash_location = NULL;
678 	struct tcp_md5sig_key *hash_expected;
679 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
680 	const struct tcphdr *th = tcp_hdr(skb);
681 	int genhash;
682 	u8 newhash[16];
683 
684 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
685 	hash_location = tcp_parse_md5sig_option(th);
686 
687 	/* We've parsed the options - do we have a hash? */
688 	if (!hash_expected && !hash_location)
689 		return false;
690 
691 	if (hash_expected && !hash_location) {
692 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
693 		return true;
694 	}
695 
696 	if (!hash_expected && hash_location) {
697 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
698 		return true;
699 	}
700 
701 	/* check the signature */
702 	genhash = tcp_v6_md5_hash_skb(newhash,
703 				      hash_expected,
704 				      NULL, skb);
705 
706 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
707 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
708 		net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
709 				     genhash ? "failed" : "mismatch",
710 				     &ip6h->saddr, ntohs(th->source),
711 				     &ip6h->daddr, ntohs(th->dest));
712 		return true;
713 	}
714 #endif
715 	return false;
716 }
717 
718 static void tcp_v6_init_req(struct request_sock *req,
719 			    const struct sock *sk_listener,
720 			    struct sk_buff *skb)
721 {
722 	struct inet_request_sock *ireq = inet_rsk(req);
723 	const struct ipv6_pinfo *np = inet6_sk(sk_listener);
724 
725 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
726 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
727 
728 	/* So that link locals have meaning */
729 	if (!sk_listener->sk_bound_dev_if &&
730 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
731 		ireq->ir_iif = tcp_v6_iif(skb);
732 
733 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
734 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
735 	     np->rxopt.bits.rxinfo ||
736 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
737 	     np->rxopt.bits.rxohlim || np->repflow)) {
738 		refcount_inc(&skb->users);
739 		ireq->pktopts = skb;
740 	}
741 }
742 
743 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
744 					  struct flowi *fl,
745 					  const struct request_sock *req)
746 {
747 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
748 }
749 
750 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
751 	.family		=	AF_INET6,
752 	.obj_size	=	sizeof(struct tcp6_request_sock),
753 	.rtx_syn_ack	=	tcp_rtx_synack,
754 	.send_ack	=	tcp_v6_reqsk_send_ack,
755 	.destructor	=	tcp_v6_reqsk_destructor,
756 	.send_reset	=	tcp_v6_send_reset,
757 	.syn_ack_timeout =	tcp_syn_ack_timeout,
758 };
759 
760 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
761 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
762 				sizeof(struct ipv6hdr),
763 #ifdef CONFIG_TCP_MD5SIG
764 	.req_md5_lookup	=	tcp_v6_md5_lookup,
765 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
766 #endif
767 	.init_req	=	tcp_v6_init_req,
768 #ifdef CONFIG_SYN_COOKIES
769 	.cookie_init_seq =	cookie_v6_init_sequence,
770 #endif
771 	.route_req	=	tcp_v6_route_req,
772 	.init_seq	=	tcp_v6_init_seq,
773 	.init_ts_off	=	tcp_v6_init_ts_off,
774 	.send_synack	=	tcp_v6_send_synack,
775 };
776 
777 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
778 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
779 				 int oif, struct tcp_md5sig_key *key, int rst,
780 				 u8 tclass, __be32 label)
781 {
782 	const struct tcphdr *th = tcp_hdr(skb);
783 	struct tcphdr *t1;
784 	struct sk_buff *buff;
785 	struct flowi6 fl6;
786 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
787 	struct sock *ctl_sk = net->ipv6.tcp_sk;
788 	unsigned int tot_len = sizeof(struct tcphdr);
789 	struct dst_entry *dst;
790 	__be32 *topt;
791 
792 	if (tsecr)
793 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
794 #ifdef CONFIG_TCP_MD5SIG
795 	if (key)
796 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
797 #endif
798 
799 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
800 			 GFP_ATOMIC);
801 	if (!buff)
802 		return;
803 
804 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
805 
806 	t1 = skb_push(buff, tot_len);
807 	skb_reset_transport_header(buff);
808 
809 	/* Swap the send and the receive. */
810 	memset(t1, 0, sizeof(*t1));
811 	t1->dest = th->source;
812 	t1->source = th->dest;
813 	t1->doff = tot_len / 4;
814 	t1->seq = htonl(seq);
815 	t1->ack_seq = htonl(ack);
816 	t1->ack = !rst || !th->ack;
817 	t1->rst = rst;
818 	t1->window = htons(win);
819 
820 	topt = (__be32 *)(t1 + 1);
821 
822 	if (tsecr) {
823 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
824 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
825 		*topt++ = htonl(tsval);
826 		*topt++ = htonl(tsecr);
827 	}
828 
829 #ifdef CONFIG_TCP_MD5SIG
830 	if (key) {
831 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
832 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
833 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
834 				    &ipv6_hdr(skb)->saddr,
835 				    &ipv6_hdr(skb)->daddr, t1);
836 	}
837 #endif
838 
839 	memset(&fl6, 0, sizeof(fl6));
840 	fl6.daddr = ipv6_hdr(skb)->saddr;
841 	fl6.saddr = ipv6_hdr(skb)->daddr;
842 	fl6.flowlabel = label;
843 
844 	buff->ip_summed = CHECKSUM_PARTIAL;
845 	buff->csum = 0;
846 
847 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
848 
849 	fl6.flowi6_proto = IPPROTO_TCP;
850 	if (rt6_need_strict(&fl6.daddr) && !oif)
851 		fl6.flowi6_oif = tcp_v6_iif(skb);
852 	else {
853 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
854 			oif = skb->skb_iif;
855 
856 		fl6.flowi6_oif = oif;
857 	}
858 
859 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
860 	fl6.fl6_dport = t1->dest;
861 	fl6.fl6_sport = t1->source;
862 	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
863 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
864 
865 	/* Pass a socket to ip6_dst_lookup either it is for RST
866 	 * Underlying function will use this to retrieve the network
867 	 * namespace
868 	 */
869 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
870 	if (!IS_ERR(dst)) {
871 		skb_dst_set(buff, dst);
872 		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
873 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
874 		if (rst)
875 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
876 		return;
877 	}
878 
879 	kfree_skb(buff);
880 }
881 
882 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
883 {
884 	const struct tcphdr *th = tcp_hdr(skb);
885 	u32 seq = 0, ack_seq = 0;
886 	struct tcp_md5sig_key *key = NULL;
887 #ifdef CONFIG_TCP_MD5SIG
888 	const __u8 *hash_location = NULL;
889 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
890 	unsigned char newhash[16];
891 	int genhash;
892 	struct sock *sk1 = NULL;
893 #endif
894 	int oif = 0;
895 
896 	if (th->rst)
897 		return;
898 
899 	/* If sk not NULL, it means we did a successful lookup and incoming
900 	 * route had to be correct. prequeue might have dropped our dst.
901 	 */
902 	if (!sk && !ipv6_unicast_destination(skb))
903 		return;
904 
905 #ifdef CONFIG_TCP_MD5SIG
906 	rcu_read_lock();
907 	hash_location = tcp_parse_md5sig_option(th);
908 	if (sk && sk_fullsock(sk)) {
909 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
910 	} else if (hash_location) {
911 		/*
912 		 * active side is lost. Try to find listening socket through
913 		 * source port, and then find md5 key through listening socket.
914 		 * we are not loose security here:
915 		 * Incoming packet is checked with md5 hash with finding key,
916 		 * no RST generated if md5 hash doesn't match.
917 		 */
918 		sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
919 					   &tcp_hashinfo, NULL, 0,
920 					   &ipv6h->saddr,
921 					   th->source, &ipv6h->daddr,
922 					   ntohs(th->source), tcp_v6_iif(skb),
923 					   tcp_v6_sdif(skb));
924 		if (!sk1)
925 			goto out;
926 
927 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
928 		if (!key)
929 			goto out;
930 
931 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
932 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
933 			goto out;
934 	}
935 #endif
936 
937 	if (th->ack)
938 		seq = ntohl(th->ack_seq);
939 	else
940 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
941 			  (th->doff << 2);
942 
943 	if (sk) {
944 		oif = sk->sk_bound_dev_if;
945 		trace_tcp_send_reset(sk, skb);
946 	}
947 
948 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
949 
950 #ifdef CONFIG_TCP_MD5SIG
951 out:
952 	rcu_read_unlock();
953 #endif
954 }
955 
956 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
957 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
958 			    struct tcp_md5sig_key *key, u8 tclass,
959 			    __be32 label)
960 {
961 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
962 			     tclass, label);
963 }
964 
965 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
966 {
967 	struct inet_timewait_sock *tw = inet_twsk(sk);
968 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
969 
970 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
971 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
972 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
973 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
974 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
975 
976 	inet_twsk_put(tw);
977 }
978 
979 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
980 				  struct request_sock *req)
981 {
982 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
983 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
984 	 */
985 	/* RFC 7323 2.3
986 	 * The window field (SEG.WND) of every outgoing segment, with the
987 	 * exception of <SYN> segments, MUST be right-shifted by
988 	 * Rcv.Wind.Shift bits:
989 	 */
990 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
991 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
992 			tcp_rsk(req)->rcv_nxt,
993 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
994 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
995 			req->ts_recent, sk->sk_bound_dev_if,
996 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
997 			0, 0);
998 }
999 
1000 
1001 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1002 {
1003 #ifdef CONFIG_SYN_COOKIES
1004 	const struct tcphdr *th = tcp_hdr(skb);
1005 
1006 	if (!th->syn)
1007 		sk = cookie_v6_check(sk, skb);
1008 #endif
1009 	return sk;
1010 }
1011 
1012 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1013 {
1014 	if (skb->protocol == htons(ETH_P_IP))
1015 		return tcp_v4_conn_request(sk, skb);
1016 
1017 	if (!ipv6_unicast_destination(skb))
1018 		goto drop;
1019 
1020 	return tcp_conn_request(&tcp6_request_sock_ops,
1021 				&tcp_request_sock_ipv6_ops, sk, skb);
1022 
1023 drop:
1024 	tcp_listendrop(sk);
1025 	return 0; /* don't send reset */
1026 }
1027 
1028 static void tcp_v6_restore_cb(struct sk_buff *skb)
1029 {
1030 	/* We need to move header back to the beginning if xfrm6_policy_check()
1031 	 * and tcp_v6_fill_cb() are going to be called again.
1032 	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1033 	 */
1034 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1035 		sizeof(struct inet6_skb_parm));
1036 }
1037 
1038 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1039 					 struct request_sock *req,
1040 					 struct dst_entry *dst,
1041 					 struct request_sock *req_unhash,
1042 					 bool *own_req)
1043 {
1044 	struct inet_request_sock *ireq;
1045 	struct ipv6_pinfo *newnp;
1046 	const struct ipv6_pinfo *np = inet6_sk(sk);
1047 	struct ipv6_txoptions *opt;
1048 	struct tcp6_sock *newtcp6sk;
1049 	struct inet_sock *newinet;
1050 	struct tcp_sock *newtp;
1051 	struct sock *newsk;
1052 #ifdef CONFIG_TCP_MD5SIG
1053 	struct tcp_md5sig_key *key;
1054 #endif
1055 	struct flowi6 fl6;
1056 
1057 	if (skb->protocol == htons(ETH_P_IP)) {
1058 		/*
1059 		 *	v6 mapped
1060 		 */
1061 
1062 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1063 					     req_unhash, own_req);
1064 
1065 		if (!newsk)
1066 			return NULL;
1067 
1068 		newtcp6sk = (struct tcp6_sock *)newsk;
1069 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1070 
1071 		newinet = inet_sk(newsk);
1072 		newnp = inet6_sk(newsk);
1073 		newtp = tcp_sk(newsk);
1074 
1075 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1076 
1077 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1078 
1079 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1080 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1081 #ifdef CONFIG_TCP_MD5SIG
1082 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1083 #endif
1084 
1085 		newnp->ipv6_mc_list = NULL;
1086 		newnp->ipv6_ac_list = NULL;
1087 		newnp->ipv6_fl_list = NULL;
1088 		newnp->pktoptions  = NULL;
1089 		newnp->opt	   = NULL;
1090 		newnp->mcast_oif   = tcp_v6_iif(skb);
1091 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1092 		newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1093 		if (np->repflow)
1094 			newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1095 
1096 		/*
1097 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1098 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1099 		 * that function for the gory details. -acme
1100 		 */
1101 
1102 		/* It is tricky place. Until this moment IPv4 tcp
1103 		   worked with IPv6 icsk.icsk_af_ops.
1104 		   Sync it now.
1105 		 */
1106 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1107 
1108 		return newsk;
1109 	}
1110 
1111 	ireq = inet_rsk(req);
1112 
1113 	if (sk_acceptq_is_full(sk))
1114 		goto out_overflow;
1115 
1116 	if (!dst) {
1117 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1118 		if (!dst)
1119 			goto out;
1120 	}
1121 
1122 	newsk = tcp_create_openreq_child(sk, req, skb);
1123 	if (!newsk)
1124 		goto out_nonewsk;
1125 
1126 	/*
1127 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1128 	 * count here, tcp_create_openreq_child now does this for us, see the
1129 	 * comment in that function for the gory details. -acme
1130 	 */
1131 
1132 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1133 	ip6_dst_store(newsk, dst, NULL, NULL);
1134 	inet6_sk_rx_dst_set(newsk, skb);
1135 
1136 	newtcp6sk = (struct tcp6_sock *)newsk;
1137 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1138 
1139 	newtp = tcp_sk(newsk);
1140 	newinet = inet_sk(newsk);
1141 	newnp = inet6_sk(newsk);
1142 
1143 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1144 
1145 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1146 	newnp->saddr = ireq->ir_v6_loc_addr;
1147 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1148 	newsk->sk_bound_dev_if = ireq->ir_iif;
1149 
1150 	/* Now IPv6 options...
1151 
1152 	   First: no IPv4 options.
1153 	 */
1154 	newinet->inet_opt = NULL;
1155 	newnp->ipv6_mc_list = NULL;
1156 	newnp->ipv6_ac_list = NULL;
1157 	newnp->ipv6_fl_list = NULL;
1158 
1159 	/* Clone RX bits */
1160 	newnp->rxopt.all = np->rxopt.all;
1161 
1162 	newnp->pktoptions = NULL;
1163 	newnp->opt	  = NULL;
1164 	newnp->mcast_oif  = tcp_v6_iif(skb);
1165 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1166 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1167 	if (np->repflow)
1168 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1169 
1170 	/* Clone native IPv6 options from listening socket (if any)
1171 
1172 	   Yes, keeping reference count would be much more clever,
1173 	   but we make one more one thing there: reattach optmem
1174 	   to newsk.
1175 	 */
1176 	opt = ireq->ipv6_opt;
1177 	if (!opt)
1178 		opt = rcu_dereference(np->opt);
1179 	if (opt) {
1180 		opt = ipv6_dup_options(newsk, opt);
1181 		RCU_INIT_POINTER(newnp->opt, opt);
1182 	}
1183 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1184 	if (opt)
1185 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1186 						    opt->opt_flen;
1187 
1188 	tcp_ca_openreq_child(newsk, dst);
1189 
1190 	tcp_sync_mss(newsk, dst_mtu(dst));
1191 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1192 
1193 	tcp_initialize_rcv_mss(newsk);
1194 
1195 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1196 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1197 
1198 #ifdef CONFIG_TCP_MD5SIG
1199 	/* Copy over the MD5 key from the original socket */
1200 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1201 	if (key) {
1202 		/* We're using one, so create a matching key
1203 		 * on the newsk structure. If we fail to get
1204 		 * memory, then we end up not copying the key
1205 		 * across. Shucks.
1206 		 */
1207 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1208 			       AF_INET6, 128, key->key, key->keylen,
1209 			       sk_gfp_mask(sk, GFP_ATOMIC));
1210 	}
1211 #endif
1212 
1213 	if (__inet_inherit_port(sk, newsk) < 0) {
1214 		inet_csk_prepare_forced_close(newsk);
1215 		tcp_done(newsk);
1216 		goto out;
1217 	}
1218 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1219 	if (*own_req) {
1220 		tcp_move_syn(newtp, req);
1221 
1222 		/* Clone pktoptions received with SYN, if we own the req */
1223 		if (ireq->pktopts) {
1224 			newnp->pktoptions = skb_clone(ireq->pktopts,
1225 						      sk_gfp_mask(sk, GFP_ATOMIC));
1226 			consume_skb(ireq->pktopts);
1227 			ireq->pktopts = NULL;
1228 			if (newnp->pktoptions) {
1229 				tcp_v6_restore_cb(newnp->pktoptions);
1230 				skb_set_owner_r(newnp->pktoptions, newsk);
1231 			}
1232 		}
1233 	}
1234 
1235 	return newsk;
1236 
1237 out_overflow:
1238 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1239 out_nonewsk:
1240 	dst_release(dst);
1241 out:
1242 	tcp_listendrop(sk);
1243 	return NULL;
1244 }
1245 
1246 /* The socket must have it's spinlock held when we get
1247  * here, unless it is a TCP_LISTEN socket.
1248  *
1249  * We have a potential double-lock case here, so even when
1250  * doing backlog processing we use the BH locking scheme.
1251  * This is because we cannot sleep with the original spinlock
1252  * held.
1253  */
1254 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1255 {
1256 	struct ipv6_pinfo *np = inet6_sk(sk);
1257 	struct tcp_sock *tp;
1258 	struct sk_buff *opt_skb = NULL;
1259 
1260 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1261 	   goes to IPv4 receive handler and backlogged.
1262 	   From backlog it always goes here. Kerboom...
1263 	   Fortunately, tcp_rcv_established and rcv_established
1264 	   handle them correctly, but it is not case with
1265 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1266 	 */
1267 
1268 	if (skb->protocol == htons(ETH_P_IP))
1269 		return tcp_v4_do_rcv(sk, skb);
1270 
1271 	/*
1272 	 *	socket locking is here for SMP purposes as backlog rcv
1273 	 *	is currently called with bh processing disabled.
1274 	 */
1275 
1276 	/* Do Stevens' IPV6_PKTOPTIONS.
1277 
1278 	   Yes, guys, it is the only place in our code, where we
1279 	   may make it not affecting IPv4.
1280 	   The rest of code is protocol independent,
1281 	   and I do not like idea to uglify IPv4.
1282 
1283 	   Actually, all the idea behind IPV6_PKTOPTIONS
1284 	   looks not very well thought. For now we latch
1285 	   options, received in the last packet, enqueued
1286 	   by tcp. Feel free to propose better solution.
1287 					       --ANK (980728)
1288 	 */
1289 	if (np->rxopt.all)
1290 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1291 
1292 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1293 		struct dst_entry *dst = sk->sk_rx_dst;
1294 
1295 		sock_rps_save_rxhash(sk, skb);
1296 		sk_mark_napi_id(sk, skb);
1297 		if (dst) {
1298 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1299 			    dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1300 				dst_release(dst);
1301 				sk->sk_rx_dst = NULL;
1302 			}
1303 		}
1304 
1305 		tcp_rcv_established(sk, skb, tcp_hdr(skb));
1306 		if (opt_skb)
1307 			goto ipv6_pktoptions;
1308 		return 0;
1309 	}
1310 
1311 	if (tcp_checksum_complete(skb))
1312 		goto csum_err;
1313 
1314 	if (sk->sk_state == TCP_LISTEN) {
1315 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1316 
1317 		if (!nsk)
1318 			goto discard;
1319 
1320 		if (nsk != sk) {
1321 			if (tcp_child_process(sk, nsk, skb))
1322 				goto reset;
1323 			if (opt_skb)
1324 				__kfree_skb(opt_skb);
1325 			return 0;
1326 		}
1327 	} else
1328 		sock_rps_save_rxhash(sk, skb);
1329 
1330 	if (tcp_rcv_state_process(sk, skb))
1331 		goto reset;
1332 	if (opt_skb)
1333 		goto ipv6_pktoptions;
1334 	return 0;
1335 
1336 reset:
1337 	tcp_v6_send_reset(sk, skb);
1338 discard:
1339 	if (opt_skb)
1340 		__kfree_skb(opt_skb);
1341 	kfree_skb(skb);
1342 	return 0;
1343 csum_err:
1344 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1345 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1346 	goto discard;
1347 
1348 
1349 ipv6_pktoptions:
1350 	/* Do you ask, what is it?
1351 
1352 	   1. skb was enqueued by tcp.
1353 	   2. skb is added to tail of read queue, rather than out of order.
1354 	   3. socket is not in passive state.
1355 	   4. Finally, it really contains options, which user wants to receive.
1356 	 */
1357 	tp = tcp_sk(sk);
1358 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1359 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1360 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1361 			np->mcast_oif = tcp_v6_iif(opt_skb);
1362 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1363 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1364 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1365 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1366 		if (np->repflow)
1367 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1368 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1369 			skb_set_owner_r(opt_skb, sk);
1370 			tcp_v6_restore_cb(opt_skb);
1371 			opt_skb = xchg(&np->pktoptions, opt_skb);
1372 		} else {
1373 			__kfree_skb(opt_skb);
1374 			opt_skb = xchg(&np->pktoptions, NULL);
1375 		}
1376 	}
1377 
1378 	kfree_skb(opt_skb);
1379 	return 0;
1380 }
1381 
1382 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1383 			   const struct tcphdr *th)
1384 {
1385 	/* This is tricky: we move IP6CB at its correct location into
1386 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1387 	 * _decode_session6() uses IP6CB().
1388 	 * barrier() makes sure compiler won't play aliasing games.
1389 	 */
1390 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1391 		sizeof(struct inet6_skb_parm));
1392 	barrier();
1393 
1394 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1395 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1396 				    skb->len - th->doff*4);
1397 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1398 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1399 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1400 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1401 	TCP_SKB_CB(skb)->sacked = 0;
1402 	TCP_SKB_CB(skb)->has_rxtstamp =
1403 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1404 }
1405 
1406 static int tcp_v6_rcv(struct sk_buff *skb)
1407 {
1408 	int sdif = inet6_sdif(skb);
1409 	const struct tcphdr *th;
1410 	const struct ipv6hdr *hdr;
1411 	bool refcounted;
1412 	struct sock *sk;
1413 	int ret;
1414 	struct net *net = dev_net(skb->dev);
1415 
1416 	if (skb->pkt_type != PACKET_HOST)
1417 		goto discard_it;
1418 
1419 	/*
1420 	 *	Count it even if it's bad.
1421 	 */
1422 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1423 
1424 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1425 		goto discard_it;
1426 
1427 	th = (const struct tcphdr *)skb->data;
1428 
1429 	if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1430 		goto bad_packet;
1431 	if (!pskb_may_pull(skb, th->doff*4))
1432 		goto discard_it;
1433 
1434 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1435 		goto csum_error;
1436 
1437 	th = (const struct tcphdr *)skb->data;
1438 	hdr = ipv6_hdr(skb);
1439 
1440 lookup:
1441 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1442 				th->source, th->dest, inet6_iif(skb), sdif,
1443 				&refcounted);
1444 	if (!sk)
1445 		goto no_tcp_socket;
1446 
1447 process:
1448 	if (sk->sk_state == TCP_TIME_WAIT)
1449 		goto do_time_wait;
1450 
1451 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1452 		struct request_sock *req = inet_reqsk(sk);
1453 		struct sock *nsk;
1454 
1455 		sk = req->rsk_listener;
1456 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
1457 			sk_drops_add(sk, skb);
1458 			reqsk_put(req);
1459 			goto discard_it;
1460 		}
1461 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1462 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1463 			goto lookup;
1464 		}
1465 		sock_hold(sk);
1466 		refcounted = true;
1467 		nsk = NULL;
1468 		if (!tcp_filter(sk, skb)) {
1469 			th = (const struct tcphdr *)skb->data;
1470 			hdr = ipv6_hdr(skb);
1471 			tcp_v6_fill_cb(skb, hdr, th);
1472 			nsk = tcp_check_req(sk, skb, req, false);
1473 		}
1474 		if (!nsk) {
1475 			reqsk_put(req);
1476 			goto discard_and_relse;
1477 		}
1478 		if (nsk == sk) {
1479 			reqsk_put(req);
1480 			tcp_v6_restore_cb(skb);
1481 		} else if (tcp_child_process(sk, nsk, skb)) {
1482 			tcp_v6_send_reset(nsk, skb);
1483 			goto discard_and_relse;
1484 		} else {
1485 			sock_put(sk);
1486 			return 0;
1487 		}
1488 	}
1489 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1490 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1491 		goto discard_and_relse;
1492 	}
1493 
1494 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1495 		goto discard_and_relse;
1496 
1497 	if (tcp_v6_inbound_md5_hash(sk, skb))
1498 		goto discard_and_relse;
1499 
1500 	if (tcp_filter(sk, skb))
1501 		goto discard_and_relse;
1502 	th = (const struct tcphdr *)skb->data;
1503 	hdr = ipv6_hdr(skb);
1504 	tcp_v6_fill_cb(skb, hdr, th);
1505 
1506 	skb->dev = NULL;
1507 
1508 	if (sk->sk_state == TCP_LISTEN) {
1509 		ret = tcp_v6_do_rcv(sk, skb);
1510 		goto put_and_return;
1511 	}
1512 
1513 	sk_incoming_cpu_update(sk);
1514 
1515 	bh_lock_sock_nested(sk);
1516 	tcp_segs_in(tcp_sk(sk), skb);
1517 	ret = 0;
1518 	if (!sock_owned_by_user(sk)) {
1519 		ret = tcp_v6_do_rcv(sk, skb);
1520 	} else if (tcp_add_backlog(sk, skb)) {
1521 		goto discard_and_relse;
1522 	}
1523 	bh_unlock_sock(sk);
1524 
1525 put_and_return:
1526 	if (refcounted)
1527 		sock_put(sk);
1528 	return ret ? -1 : 0;
1529 
1530 no_tcp_socket:
1531 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1532 		goto discard_it;
1533 
1534 	tcp_v6_fill_cb(skb, hdr, th);
1535 
1536 	if (tcp_checksum_complete(skb)) {
1537 csum_error:
1538 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1539 bad_packet:
1540 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1541 	} else {
1542 		tcp_v6_send_reset(NULL, skb);
1543 	}
1544 
1545 discard_it:
1546 	kfree_skb(skb);
1547 	return 0;
1548 
1549 discard_and_relse:
1550 	sk_drops_add(sk, skb);
1551 	if (refcounted)
1552 		sock_put(sk);
1553 	goto discard_it;
1554 
1555 do_time_wait:
1556 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1557 		inet_twsk_put(inet_twsk(sk));
1558 		goto discard_it;
1559 	}
1560 
1561 	tcp_v6_fill_cb(skb, hdr, th);
1562 
1563 	if (tcp_checksum_complete(skb)) {
1564 		inet_twsk_put(inet_twsk(sk));
1565 		goto csum_error;
1566 	}
1567 
1568 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1569 	case TCP_TW_SYN:
1570 	{
1571 		struct sock *sk2;
1572 
1573 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1574 					    skb, __tcp_hdrlen(th),
1575 					    &ipv6_hdr(skb)->saddr, th->source,
1576 					    &ipv6_hdr(skb)->daddr,
1577 					    ntohs(th->dest), tcp_v6_iif(skb),
1578 					    sdif);
1579 		if (sk2) {
1580 			struct inet_timewait_sock *tw = inet_twsk(sk);
1581 			inet_twsk_deschedule_put(tw);
1582 			sk = sk2;
1583 			tcp_v6_restore_cb(skb);
1584 			refcounted = false;
1585 			goto process;
1586 		}
1587 	}
1588 		/* to ACK */
1589 		/* fall through */
1590 	case TCP_TW_ACK:
1591 		tcp_v6_timewait_ack(sk, skb);
1592 		break;
1593 	case TCP_TW_RST:
1594 		tcp_v6_send_reset(sk, skb);
1595 		inet_twsk_deschedule_put(inet_twsk(sk));
1596 		goto discard_it;
1597 	case TCP_TW_SUCCESS:
1598 		;
1599 	}
1600 	goto discard_it;
1601 }
1602 
1603 static void tcp_v6_early_demux(struct sk_buff *skb)
1604 {
1605 	const struct ipv6hdr *hdr;
1606 	const struct tcphdr *th;
1607 	struct sock *sk;
1608 
1609 	if (skb->pkt_type != PACKET_HOST)
1610 		return;
1611 
1612 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1613 		return;
1614 
1615 	hdr = ipv6_hdr(skb);
1616 	th = tcp_hdr(skb);
1617 
1618 	if (th->doff < sizeof(struct tcphdr) / 4)
1619 		return;
1620 
1621 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1622 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1623 					&hdr->saddr, th->source,
1624 					&hdr->daddr, ntohs(th->dest),
1625 					inet6_iif(skb), inet6_sdif(skb));
1626 	if (sk) {
1627 		skb->sk = sk;
1628 		skb->destructor = sock_edemux;
1629 		if (sk_fullsock(sk)) {
1630 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1631 
1632 			if (dst)
1633 				dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1634 			if (dst &&
1635 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1636 				skb_dst_set_noref(skb, dst);
1637 		}
1638 	}
1639 }
1640 
1641 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1642 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1643 	.twsk_unique	= tcp_twsk_unique,
1644 	.twsk_destructor = tcp_twsk_destructor,
1645 };
1646 
1647 static const struct inet_connection_sock_af_ops ipv6_specific = {
1648 	.queue_xmit	   = inet6_csk_xmit,
1649 	.send_check	   = tcp_v6_send_check,
1650 	.rebuild_header	   = inet6_sk_rebuild_header,
1651 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1652 	.conn_request	   = tcp_v6_conn_request,
1653 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1654 	.net_header_len	   = sizeof(struct ipv6hdr),
1655 	.net_frag_header_len = sizeof(struct frag_hdr),
1656 	.setsockopt	   = ipv6_setsockopt,
1657 	.getsockopt	   = ipv6_getsockopt,
1658 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1659 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1660 #ifdef CONFIG_COMPAT
1661 	.compat_setsockopt = compat_ipv6_setsockopt,
1662 	.compat_getsockopt = compat_ipv6_getsockopt,
1663 #endif
1664 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1665 };
1666 
1667 #ifdef CONFIG_TCP_MD5SIG
1668 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1669 	.md5_lookup	=	tcp_v6_md5_lookup,
1670 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1671 	.md5_parse	=	tcp_v6_parse_md5_keys,
1672 };
1673 #endif
1674 
1675 /*
1676  *	TCP over IPv4 via INET6 API
1677  */
1678 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1679 	.queue_xmit	   = ip_queue_xmit,
1680 	.send_check	   = tcp_v4_send_check,
1681 	.rebuild_header	   = inet_sk_rebuild_header,
1682 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1683 	.conn_request	   = tcp_v6_conn_request,
1684 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1685 	.net_header_len	   = sizeof(struct iphdr),
1686 	.setsockopt	   = ipv6_setsockopt,
1687 	.getsockopt	   = ipv6_getsockopt,
1688 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1689 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1690 #ifdef CONFIG_COMPAT
1691 	.compat_setsockopt = compat_ipv6_setsockopt,
1692 	.compat_getsockopt = compat_ipv6_getsockopt,
1693 #endif
1694 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1695 };
1696 
1697 #ifdef CONFIG_TCP_MD5SIG
1698 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1699 	.md5_lookup	=	tcp_v4_md5_lookup,
1700 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1701 	.md5_parse	=	tcp_v6_parse_md5_keys,
1702 };
1703 #endif
1704 
1705 /* NOTE: A lot of things set to zero explicitly by call to
1706  *       sk_alloc() so need not be done here.
1707  */
1708 static int tcp_v6_init_sock(struct sock *sk)
1709 {
1710 	struct inet_connection_sock *icsk = inet_csk(sk);
1711 
1712 	tcp_init_sock(sk);
1713 
1714 	icsk->icsk_af_ops = &ipv6_specific;
1715 
1716 #ifdef CONFIG_TCP_MD5SIG
1717 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1718 #endif
1719 
1720 	return 0;
1721 }
1722 
1723 static void tcp_v6_destroy_sock(struct sock *sk)
1724 {
1725 	tcp_v4_destroy_sock(sk);
1726 	inet6_destroy_sock(sk);
1727 }
1728 
1729 #ifdef CONFIG_PROC_FS
1730 /* Proc filesystem TCPv6 sock list dumping. */
1731 static void get_openreq6(struct seq_file *seq,
1732 			 const struct request_sock *req, int i)
1733 {
1734 	long ttd = req->rsk_timer.expires - jiffies;
1735 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1736 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1737 
1738 	if (ttd < 0)
1739 		ttd = 0;
1740 
1741 	seq_printf(seq,
1742 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1743 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1744 		   i,
1745 		   src->s6_addr32[0], src->s6_addr32[1],
1746 		   src->s6_addr32[2], src->s6_addr32[3],
1747 		   inet_rsk(req)->ir_num,
1748 		   dest->s6_addr32[0], dest->s6_addr32[1],
1749 		   dest->s6_addr32[2], dest->s6_addr32[3],
1750 		   ntohs(inet_rsk(req)->ir_rmt_port),
1751 		   TCP_SYN_RECV,
1752 		   0, 0, /* could print option size, but that is af dependent. */
1753 		   1,   /* timers active (only the expire timer) */
1754 		   jiffies_to_clock_t(ttd),
1755 		   req->num_timeout,
1756 		   from_kuid_munged(seq_user_ns(seq),
1757 				    sock_i_uid(req->rsk_listener)),
1758 		   0,  /* non standard timer */
1759 		   0, /* open_requests have no inode */
1760 		   0, req);
1761 }
1762 
1763 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1764 {
1765 	const struct in6_addr *dest, *src;
1766 	__u16 destp, srcp;
1767 	int timer_active;
1768 	unsigned long timer_expires;
1769 	const struct inet_sock *inet = inet_sk(sp);
1770 	const struct tcp_sock *tp = tcp_sk(sp);
1771 	const struct inet_connection_sock *icsk = inet_csk(sp);
1772 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1773 	int rx_queue;
1774 	int state;
1775 
1776 	dest  = &sp->sk_v6_daddr;
1777 	src   = &sp->sk_v6_rcv_saddr;
1778 	destp = ntohs(inet->inet_dport);
1779 	srcp  = ntohs(inet->inet_sport);
1780 
1781 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1782 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1783 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1784 		timer_active	= 1;
1785 		timer_expires	= icsk->icsk_timeout;
1786 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1787 		timer_active	= 4;
1788 		timer_expires	= icsk->icsk_timeout;
1789 	} else if (timer_pending(&sp->sk_timer)) {
1790 		timer_active	= 2;
1791 		timer_expires	= sp->sk_timer.expires;
1792 	} else {
1793 		timer_active	= 0;
1794 		timer_expires = jiffies;
1795 	}
1796 
1797 	state = inet_sk_state_load(sp);
1798 	if (state == TCP_LISTEN)
1799 		rx_queue = sp->sk_ack_backlog;
1800 	else
1801 		/* Because we don't lock the socket,
1802 		 * we might find a transient negative value.
1803 		 */
1804 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1805 
1806 	seq_printf(seq,
1807 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1808 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1809 		   i,
1810 		   src->s6_addr32[0], src->s6_addr32[1],
1811 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1812 		   dest->s6_addr32[0], dest->s6_addr32[1],
1813 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1814 		   state,
1815 		   tp->write_seq - tp->snd_una,
1816 		   rx_queue,
1817 		   timer_active,
1818 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
1819 		   icsk->icsk_retransmits,
1820 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1821 		   icsk->icsk_probes_out,
1822 		   sock_i_ino(sp),
1823 		   refcount_read(&sp->sk_refcnt), sp,
1824 		   jiffies_to_clock_t(icsk->icsk_rto),
1825 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
1826 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1827 		   tp->snd_cwnd,
1828 		   state == TCP_LISTEN ?
1829 			fastopenq->max_qlen :
1830 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1831 		   );
1832 }
1833 
1834 static void get_timewait6_sock(struct seq_file *seq,
1835 			       struct inet_timewait_sock *tw, int i)
1836 {
1837 	long delta = tw->tw_timer.expires - jiffies;
1838 	const struct in6_addr *dest, *src;
1839 	__u16 destp, srcp;
1840 
1841 	dest = &tw->tw_v6_daddr;
1842 	src  = &tw->tw_v6_rcv_saddr;
1843 	destp = ntohs(tw->tw_dport);
1844 	srcp  = ntohs(tw->tw_sport);
1845 
1846 	seq_printf(seq,
1847 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1848 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1849 		   i,
1850 		   src->s6_addr32[0], src->s6_addr32[1],
1851 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
1852 		   dest->s6_addr32[0], dest->s6_addr32[1],
1853 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1854 		   tw->tw_substate, 0, 0,
1855 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1856 		   refcount_read(&tw->tw_refcnt), tw);
1857 }
1858 
1859 static int tcp6_seq_show(struct seq_file *seq, void *v)
1860 {
1861 	struct tcp_iter_state *st;
1862 	struct sock *sk = v;
1863 
1864 	if (v == SEQ_START_TOKEN) {
1865 		seq_puts(seq,
1866 			 "  sl  "
1867 			 "local_address                         "
1868 			 "remote_address                        "
1869 			 "st tx_queue rx_queue tr tm->when retrnsmt"
1870 			 "   uid  timeout inode\n");
1871 		goto out;
1872 	}
1873 	st = seq->private;
1874 
1875 	if (sk->sk_state == TCP_TIME_WAIT)
1876 		get_timewait6_sock(seq, v, st->num);
1877 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
1878 		get_openreq6(seq, v, st->num);
1879 	else
1880 		get_tcp6_sock(seq, v, st->num);
1881 out:
1882 	return 0;
1883 }
1884 
1885 static const struct file_operations tcp6_afinfo_seq_fops = {
1886 	.open    = tcp_seq_open,
1887 	.read    = seq_read,
1888 	.llseek  = seq_lseek,
1889 	.release = seq_release_net
1890 };
1891 
1892 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1893 	.name		= "tcp6",
1894 	.family		= AF_INET6,
1895 	.seq_fops	= &tcp6_afinfo_seq_fops,
1896 	.seq_ops	= {
1897 		.show		= tcp6_seq_show,
1898 	},
1899 };
1900 
1901 int __net_init tcp6_proc_init(struct net *net)
1902 {
1903 	return tcp_proc_register(net, &tcp6_seq_afinfo);
1904 }
1905 
1906 void tcp6_proc_exit(struct net *net)
1907 {
1908 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
1909 }
1910 #endif
1911 
1912 struct proto tcpv6_prot = {
1913 	.name			= "TCPv6",
1914 	.owner			= THIS_MODULE,
1915 	.close			= tcp_close,
1916 	.connect		= tcp_v6_connect,
1917 	.disconnect		= tcp_disconnect,
1918 	.accept			= inet_csk_accept,
1919 	.ioctl			= tcp_ioctl,
1920 	.init			= tcp_v6_init_sock,
1921 	.destroy		= tcp_v6_destroy_sock,
1922 	.shutdown		= tcp_shutdown,
1923 	.setsockopt		= tcp_setsockopt,
1924 	.getsockopt		= tcp_getsockopt,
1925 	.keepalive		= tcp_set_keepalive,
1926 	.recvmsg		= tcp_recvmsg,
1927 	.sendmsg		= tcp_sendmsg,
1928 	.sendpage		= tcp_sendpage,
1929 	.backlog_rcv		= tcp_v6_do_rcv,
1930 	.release_cb		= tcp_release_cb,
1931 	.hash			= inet6_hash,
1932 	.unhash			= inet_unhash,
1933 	.get_port		= inet_csk_get_port,
1934 	.enter_memory_pressure	= tcp_enter_memory_pressure,
1935 	.leave_memory_pressure	= tcp_leave_memory_pressure,
1936 	.stream_memory_free	= tcp_stream_memory_free,
1937 	.sockets_allocated	= &tcp_sockets_allocated,
1938 	.memory_allocated	= &tcp_memory_allocated,
1939 	.memory_pressure	= &tcp_memory_pressure,
1940 	.orphan_count		= &tcp_orphan_count,
1941 	.sysctl_mem		= sysctl_tcp_mem,
1942 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
1943 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
1944 	.max_header		= MAX_TCP_HEADER,
1945 	.obj_size		= sizeof(struct tcp6_sock),
1946 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
1947 	.twsk_prot		= &tcp6_timewait_sock_ops,
1948 	.rsk_prot		= &tcp6_request_sock_ops,
1949 	.h.hashinfo		= &tcp_hashinfo,
1950 	.no_autobind		= true,
1951 #ifdef CONFIG_COMPAT
1952 	.compat_setsockopt	= compat_tcp_setsockopt,
1953 	.compat_getsockopt	= compat_tcp_getsockopt,
1954 #endif
1955 	.diag_destroy		= tcp_abort,
1956 };
1957 
1958 /* thinking of making this const? Don't.
1959  * early_demux can change based on sysctl.
1960  */
1961 static struct inet6_protocol tcpv6_protocol = {
1962 	.early_demux	=	tcp_v6_early_demux,
1963 	.early_demux_handler =  tcp_v6_early_demux,
1964 	.handler	=	tcp_v6_rcv,
1965 	.err_handler	=	tcp_v6_err,
1966 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1967 };
1968 
1969 static struct inet_protosw tcpv6_protosw = {
1970 	.type		=	SOCK_STREAM,
1971 	.protocol	=	IPPROTO_TCP,
1972 	.prot		=	&tcpv6_prot,
1973 	.ops		=	&inet6_stream_ops,
1974 	.flags		=	INET_PROTOSW_PERMANENT |
1975 				INET_PROTOSW_ICSK,
1976 };
1977 
1978 static int __net_init tcpv6_net_init(struct net *net)
1979 {
1980 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1981 				    SOCK_RAW, IPPROTO_TCP, net);
1982 }
1983 
1984 static void __net_exit tcpv6_net_exit(struct net *net)
1985 {
1986 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1987 }
1988 
1989 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1990 {
1991 	inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1992 }
1993 
1994 static struct pernet_operations tcpv6_net_ops = {
1995 	.init	    = tcpv6_net_init,
1996 	.exit	    = tcpv6_net_exit,
1997 	.exit_batch = tcpv6_net_exit_batch,
1998 };
1999 
2000 int __init tcpv6_init(void)
2001 {
2002 	int ret;
2003 
2004 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2005 	if (ret)
2006 		goto out;
2007 
2008 	/* register inet6 protocol */
2009 	ret = inet6_register_protosw(&tcpv6_protosw);
2010 	if (ret)
2011 		goto out_tcpv6_protocol;
2012 
2013 	ret = register_pernet_subsys(&tcpv6_net_ops);
2014 	if (ret)
2015 		goto out_tcpv6_protosw;
2016 out:
2017 	return ret;
2018 
2019 out_tcpv6_protosw:
2020 	inet6_unregister_protosw(&tcpv6_protosw);
2021 out_tcpv6_protocol:
2022 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2023 	goto out;
2024 }
2025 
2026 void tcpv6_exit(void)
2027 {
2028 	unregister_pernet_subsys(&tcpv6_net_ops);
2029 	inet6_unregister_protosw(&tcpv6_protosw);
2030 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2031 }
2032