xref: /linux/net/ipv6/tcp_ipv6.c (revision 0e9b70c1e3623fa110fb6be553e644524228ef60)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	TCP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on:
10  *	linux/net/ipv4/tcp.c
11  *	linux/net/ipv4/tcp_input.c
12  *	linux/net/ipv4/tcp_output.c
13  *
14  *	Fixes:
15  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
16  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
17  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
18  *					a single port at the same time.
19  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
20  */
21 
22 #include <linux/bottom_half.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/jiffies.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/init.h>
34 #include <linux/jhash.h>
35 #include <linux/ipsec.h>
36 #include <linux/times.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/indirect_call_wrapper.h>
43 
44 #include <net/tcp.h>
45 #include <net/ndisc.h>
46 #include <net/inet6_hashtables.h>
47 #include <net/inet6_connection_sock.h>
48 #include <net/ipv6.h>
49 #include <net/transp_v6.h>
50 #include <net/addrconf.h>
51 #include <net/ip6_route.h>
52 #include <net/ip6_checksum.h>
53 #include <net/inet_ecn.h>
54 #include <net/protocol.h>
55 #include <net/xfrm.h>
56 #include <net/snmp.h>
57 #include <net/dsfield.h>
58 #include <net/timewait_sock.h>
59 #include <net/inet_common.h>
60 #include <net/secure_seq.h>
61 #include <net/busy_poll.h>
62 
63 #include <linux/proc_fs.h>
64 #include <linux/seq_file.h>
65 
66 #include <crypto/hash.h>
67 #include <linux/scatterlist.h>
68 
69 #include <trace/events/tcp.h>
70 
71 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
73 				      struct request_sock *req);
74 
75 INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
76 
77 static const struct inet_connection_sock_af_ops ipv6_mapped;
78 const struct inet_connection_sock_af_ops ipv6_specific;
79 #ifdef CONFIG_TCP_MD5SIG
80 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
82 #else
83 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
84 						   const struct in6_addr *addr,
85 						   int l3index)
86 {
87 	return NULL;
88 }
89 #endif
90 
91 /* Helper returning the inet6 address from a given tcp socket.
92  * It can be used in TCP stack instead of inet6_sk(sk).
93  * This avoids a dereference and allow compiler optimizations.
94  * It is a specialized version of inet6_sk_generic().
95  */
96 static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97 {
98 	unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
99 
100 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
101 }
102 
103 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104 {
105 	struct dst_entry *dst = skb_dst(skb);
106 
107 	if (dst && dst_hold_safe(dst)) {
108 		const struct rt6_info *rt = (const struct rt6_info *)dst;
109 
110 		rcu_assign_pointer(sk->sk_rx_dst, dst);
111 		sk->sk_rx_dst_ifindex = skb->skb_iif;
112 		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
113 	}
114 }
115 
116 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
117 {
118 	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 				ipv6_hdr(skb)->saddr.s6_addr32,
120 				tcp_hdr(skb)->dest,
121 				tcp_hdr(skb)->source);
122 }
123 
124 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
125 {
126 	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
127 				   ipv6_hdr(skb)->saddr.s6_addr32);
128 }
129 
130 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 			      int addr_len)
132 {
133 	/* This check is replicated from tcp_v6_connect() and intended to
134 	 * prevent BPF program called below from accessing bytes that are out
135 	 * of the bound specified by user in addr_len.
136 	 */
137 	if (addr_len < SIN6_LEN_RFC2133)
138 		return -EINVAL;
139 
140 	sock_owned_by_me(sk);
141 
142 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143 }
144 
145 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
146 			  int addr_len)
147 {
148 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
149 	struct inet_connection_sock *icsk = inet_csk(sk);
150 	struct in6_addr *saddr = NULL, *final_p, final;
151 	struct inet_timewait_death_row *tcp_death_row;
152 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
153 	struct inet_sock *inet = inet_sk(sk);
154 	struct tcp_sock *tp = tcp_sk(sk);
155 	struct net *net = sock_net(sk);
156 	struct ipv6_txoptions *opt;
157 	struct dst_entry *dst;
158 	struct flowi6 fl6;
159 	int addr_type;
160 	int err;
161 
162 	if (addr_len < SIN6_LEN_RFC2133)
163 		return -EINVAL;
164 
165 	if (usin->sin6_family != AF_INET6)
166 		return -EAFNOSUPPORT;
167 
168 	memset(&fl6, 0, sizeof(fl6));
169 
170 	if (np->sndflow) {
171 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
172 		IP6_ECN_flow_init(fl6.flowlabel);
173 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
174 			struct ip6_flowlabel *flowlabel;
175 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
176 			if (IS_ERR(flowlabel))
177 				return -EINVAL;
178 			fl6_sock_release(flowlabel);
179 		}
180 	}
181 
182 	/*
183 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
184 	 */
185 
186 	if (ipv6_addr_any(&usin->sin6_addr)) {
187 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
188 			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
189 					       &usin->sin6_addr);
190 		else
191 			usin->sin6_addr = in6addr_loopback;
192 	}
193 
194 	addr_type = ipv6_addr_type(&usin->sin6_addr);
195 
196 	if (addr_type & IPV6_ADDR_MULTICAST)
197 		return -ENETUNREACH;
198 
199 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
200 		if (addr_len >= sizeof(struct sockaddr_in6) &&
201 		    usin->sin6_scope_id) {
202 			/* If interface is set while binding, indices
203 			 * must coincide.
204 			 */
205 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
206 				return -EINVAL;
207 
208 			sk->sk_bound_dev_if = usin->sin6_scope_id;
209 		}
210 
211 		/* Connect to link-local address requires an interface */
212 		if (!sk->sk_bound_dev_if)
213 			return -EINVAL;
214 	}
215 
216 	if (tp->rx_opt.ts_recent_stamp &&
217 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
218 		tp->rx_opt.ts_recent = 0;
219 		tp->rx_opt.ts_recent_stamp = 0;
220 		WRITE_ONCE(tp->write_seq, 0);
221 	}
222 
223 	sk->sk_v6_daddr = usin->sin6_addr;
224 	np->flow_label = fl6.flowlabel;
225 
226 	/*
227 	 *	TCP over IPv4
228 	 */
229 
230 	if (addr_type & IPV6_ADDR_MAPPED) {
231 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
232 		struct sockaddr_in sin;
233 
234 		if (ipv6_only_sock(sk))
235 			return -ENETUNREACH;
236 
237 		sin.sin_family = AF_INET;
238 		sin.sin_port = usin->sin6_port;
239 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
240 
241 		/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
242 		WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
243 		if (sk_is_mptcp(sk))
244 			mptcpv6_handle_mapped(sk, true);
245 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
246 #ifdef CONFIG_TCP_MD5SIG
247 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
248 #endif
249 
250 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
251 
252 		if (err) {
253 			icsk->icsk_ext_hdr_len = exthdrlen;
254 			/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
255 			WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
256 			if (sk_is_mptcp(sk))
257 				mptcpv6_handle_mapped(sk, false);
258 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
259 #ifdef CONFIG_TCP_MD5SIG
260 			tp->af_specific = &tcp_sock_ipv6_specific;
261 #endif
262 			goto failure;
263 		}
264 		np->saddr = sk->sk_v6_rcv_saddr;
265 
266 		return err;
267 	}
268 
269 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
270 		saddr = &sk->sk_v6_rcv_saddr;
271 
272 	fl6.flowi6_proto = IPPROTO_TCP;
273 	fl6.daddr = sk->sk_v6_daddr;
274 	fl6.saddr = saddr ? *saddr : np->saddr;
275 	fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
276 	fl6.flowi6_oif = sk->sk_bound_dev_if;
277 	fl6.flowi6_mark = sk->sk_mark;
278 	fl6.fl6_dport = usin->sin6_port;
279 	fl6.fl6_sport = inet->inet_sport;
280 	fl6.flowi6_uid = sk->sk_uid;
281 
282 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
283 	final_p = fl6_update_dst(&fl6, opt, &final);
284 
285 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
286 
287 	dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
288 	if (IS_ERR(dst)) {
289 		err = PTR_ERR(dst);
290 		goto failure;
291 	}
292 
293 	tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
294 
295 	if (!saddr) {
296 		saddr = &fl6.saddr;
297 
298 		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
299 		if (err)
300 			goto failure;
301 	}
302 
303 	/* set the source address */
304 	np->saddr = *saddr;
305 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
306 
307 	sk->sk_gso_type = SKB_GSO_TCPV6;
308 	ip6_dst_store(sk, dst, NULL, NULL);
309 
310 	icsk->icsk_ext_hdr_len = 0;
311 	if (opt)
312 		icsk->icsk_ext_hdr_len = opt->opt_flen +
313 					 opt->opt_nflen;
314 
315 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
316 
317 	inet->inet_dport = usin->sin6_port;
318 
319 	tcp_set_state(sk, TCP_SYN_SENT);
320 	err = inet6_hash_connect(tcp_death_row, sk);
321 	if (err)
322 		goto late_failure;
323 
324 	sk_set_txhash(sk);
325 
326 	if (likely(!tp->repair)) {
327 		if (!tp->write_seq)
328 			WRITE_ONCE(tp->write_seq,
329 				   secure_tcpv6_seq(np->saddr.s6_addr32,
330 						    sk->sk_v6_daddr.s6_addr32,
331 						    inet->inet_sport,
332 						    inet->inet_dport));
333 		tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
334 						   sk->sk_v6_daddr.s6_addr32);
335 	}
336 
337 	if (tcp_fastopen_defer_connect(sk, &err))
338 		return err;
339 	if (err)
340 		goto late_failure;
341 
342 	err = tcp_connect(sk);
343 	if (err)
344 		goto late_failure;
345 
346 	return 0;
347 
348 late_failure:
349 	tcp_set_state(sk, TCP_CLOSE);
350 	inet_bhash2_reset_saddr(sk);
351 failure:
352 	inet->inet_dport = 0;
353 	sk->sk_route_caps = 0;
354 	return err;
355 }
356 
357 static void tcp_v6_mtu_reduced(struct sock *sk)
358 {
359 	struct dst_entry *dst;
360 	u32 mtu;
361 
362 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
363 		return;
364 
365 	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
366 
367 	/* Drop requests trying to increase our current mss.
368 	 * Check done in __ip6_rt_update_pmtu() is too late.
369 	 */
370 	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
371 		return;
372 
373 	dst = inet6_csk_update_pmtu(sk, mtu);
374 	if (!dst)
375 		return;
376 
377 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
378 		tcp_sync_mss(sk, dst_mtu(dst));
379 		tcp_simple_retransmit(sk);
380 	}
381 }
382 
383 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
384 		u8 type, u8 code, int offset, __be32 info)
385 {
386 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
387 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
388 	struct net *net = dev_net(skb->dev);
389 	struct request_sock *fastopen;
390 	struct ipv6_pinfo *np;
391 	struct tcp_sock *tp;
392 	__u32 seq, snd_una;
393 	struct sock *sk;
394 	bool fatal;
395 	int err;
396 
397 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
398 					&hdr->daddr, th->dest,
399 					&hdr->saddr, ntohs(th->source),
400 					skb->dev->ifindex, inet6_sdif(skb));
401 
402 	if (!sk) {
403 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
404 				  ICMP6_MIB_INERRORS);
405 		return -ENOENT;
406 	}
407 
408 	if (sk->sk_state == TCP_TIME_WAIT) {
409 		inet_twsk_put(inet_twsk(sk));
410 		return 0;
411 	}
412 	seq = ntohl(th->seq);
413 	fatal = icmpv6_err_convert(type, code, &err);
414 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
415 		tcp_req_err(sk, seq, fatal);
416 		return 0;
417 	}
418 
419 	bh_lock_sock(sk);
420 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
421 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
422 
423 	if (sk->sk_state == TCP_CLOSE)
424 		goto out;
425 
426 	if (static_branch_unlikely(&ip6_min_hopcount)) {
427 		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
428 		if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
429 			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
430 			goto out;
431 		}
432 	}
433 
434 	tp = tcp_sk(sk);
435 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
436 	fastopen = rcu_dereference(tp->fastopen_rsk);
437 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
438 	if (sk->sk_state != TCP_LISTEN &&
439 	    !between(seq, snd_una, tp->snd_nxt)) {
440 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
441 		goto out;
442 	}
443 
444 	np = tcp_inet6_sk(sk);
445 
446 	if (type == NDISC_REDIRECT) {
447 		if (!sock_owned_by_user(sk)) {
448 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
449 
450 			if (dst)
451 				dst->ops->redirect(dst, sk, skb);
452 		}
453 		goto out;
454 	}
455 
456 	if (type == ICMPV6_PKT_TOOBIG) {
457 		u32 mtu = ntohl(info);
458 
459 		/* We are not interested in TCP_LISTEN and open_requests
460 		 * (SYN-ACKs send out by Linux are always <576bytes so
461 		 * they should go through unfragmented).
462 		 */
463 		if (sk->sk_state == TCP_LISTEN)
464 			goto out;
465 
466 		if (!ip6_sk_accept_pmtu(sk))
467 			goto out;
468 
469 		if (mtu < IPV6_MIN_MTU)
470 			goto out;
471 
472 		WRITE_ONCE(tp->mtu_info, mtu);
473 
474 		if (!sock_owned_by_user(sk))
475 			tcp_v6_mtu_reduced(sk);
476 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
477 					   &sk->sk_tsq_flags))
478 			sock_hold(sk);
479 		goto out;
480 	}
481 
482 
483 	/* Might be for an request_sock */
484 	switch (sk->sk_state) {
485 	case TCP_SYN_SENT:
486 	case TCP_SYN_RECV:
487 		/* Only in fast or simultaneous open. If a fast open socket is
488 		 * already accepted it is treated as a connected one below.
489 		 */
490 		if (fastopen && !fastopen->sk)
491 			break;
492 
493 		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
494 
495 		if (!sock_owned_by_user(sk)) {
496 			sk->sk_err = err;
497 			sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
498 
499 			tcp_done(sk);
500 		} else
501 			sk->sk_err_soft = err;
502 		goto out;
503 	case TCP_LISTEN:
504 		break;
505 	default:
506 		/* check if this ICMP message allows revert of backoff.
507 		 * (see RFC 6069)
508 		 */
509 		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
510 		    code == ICMPV6_NOROUTE)
511 			tcp_ld_RTO_revert(sk, seq);
512 	}
513 
514 	if (!sock_owned_by_user(sk) && np->recverr) {
515 		sk->sk_err = err;
516 		sk_error_report(sk);
517 	} else
518 		sk->sk_err_soft = err;
519 
520 out:
521 	bh_unlock_sock(sk);
522 	sock_put(sk);
523 	return 0;
524 }
525 
526 
527 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
528 			      struct flowi *fl,
529 			      struct request_sock *req,
530 			      struct tcp_fastopen_cookie *foc,
531 			      enum tcp_synack_type synack_type,
532 			      struct sk_buff *syn_skb)
533 {
534 	struct inet_request_sock *ireq = inet_rsk(req);
535 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
536 	struct ipv6_txoptions *opt;
537 	struct flowi6 *fl6 = &fl->u.ip6;
538 	struct sk_buff *skb;
539 	int err = -ENOMEM;
540 	u8 tclass;
541 
542 	/* First, grab a route. */
543 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
544 					       IPPROTO_TCP)) == NULL)
545 		goto done;
546 
547 	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
548 
549 	if (skb) {
550 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
551 				    &ireq->ir_v6_rmt_addr);
552 
553 		fl6->daddr = ireq->ir_v6_rmt_addr;
554 		if (np->repflow && ireq->pktopts)
555 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
556 
557 		tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
558 				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
559 				(np->tclass & INET_ECN_MASK) :
560 				np->tclass;
561 
562 		if (!INET_ECN_is_capable(tclass) &&
563 		    tcp_bpf_ca_needs_ecn((struct sock *)req))
564 			tclass |= INET_ECN_ECT_0;
565 
566 		rcu_read_lock();
567 		opt = ireq->ipv6_opt;
568 		if (!opt)
569 			opt = rcu_dereference(np->opt);
570 		err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
571 			       tclass, sk->sk_priority);
572 		rcu_read_unlock();
573 		err = net_xmit_eval(err);
574 	}
575 
576 done:
577 	return err;
578 }
579 
580 
581 static void tcp_v6_reqsk_destructor(struct request_sock *req)
582 {
583 	kfree(inet_rsk(req)->ipv6_opt);
584 	consume_skb(inet_rsk(req)->pktopts);
585 }
586 
587 #ifdef CONFIG_TCP_MD5SIG
588 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
589 						   const struct in6_addr *addr,
590 						   int l3index)
591 {
592 	return tcp_md5_do_lookup(sk, l3index,
593 				 (union tcp_md5_addr *)addr, AF_INET6);
594 }
595 
596 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
597 						const struct sock *addr_sk)
598 {
599 	int l3index;
600 
601 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
602 						 addr_sk->sk_bound_dev_if);
603 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
604 				    l3index);
605 }
606 
607 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
608 				 sockptr_t optval, int optlen)
609 {
610 	struct tcp_md5sig cmd;
611 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
612 	int l3index = 0;
613 	u8 prefixlen;
614 	u8 flags;
615 
616 	if (optlen < sizeof(cmd))
617 		return -EINVAL;
618 
619 	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
620 		return -EFAULT;
621 
622 	if (sin6->sin6_family != AF_INET6)
623 		return -EINVAL;
624 
625 	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
626 
627 	if (optname == TCP_MD5SIG_EXT &&
628 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
629 		prefixlen = cmd.tcpm_prefixlen;
630 		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
631 					prefixlen > 32))
632 			return -EINVAL;
633 	} else {
634 		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
635 	}
636 
637 	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
638 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
639 		struct net_device *dev;
640 
641 		rcu_read_lock();
642 		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
643 		if (dev && netif_is_l3_master(dev))
644 			l3index = dev->ifindex;
645 		rcu_read_unlock();
646 
647 		/* ok to reference set/not set outside of rcu;
648 		 * right now device MUST be an L3 master
649 		 */
650 		if (!dev || !l3index)
651 			return -EINVAL;
652 	}
653 
654 	if (!cmd.tcpm_keylen) {
655 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
656 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
657 					      AF_INET, prefixlen,
658 					      l3index, flags);
659 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
660 				      AF_INET6, prefixlen, l3index, flags);
661 	}
662 
663 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
664 		return -EINVAL;
665 
666 	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
667 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
668 				      AF_INET, prefixlen, l3index, flags,
669 				      cmd.tcpm_key, cmd.tcpm_keylen);
670 
671 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
672 			      AF_INET6, prefixlen, l3index, flags,
673 			      cmd.tcpm_key, cmd.tcpm_keylen);
674 }
675 
676 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
677 				   const struct in6_addr *daddr,
678 				   const struct in6_addr *saddr,
679 				   const struct tcphdr *th, int nbytes)
680 {
681 	struct tcp6_pseudohdr *bp;
682 	struct scatterlist sg;
683 	struct tcphdr *_th;
684 
685 	bp = hp->scratch;
686 	/* 1. TCP pseudo-header (RFC2460) */
687 	bp->saddr = *saddr;
688 	bp->daddr = *daddr;
689 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
690 	bp->len = cpu_to_be32(nbytes);
691 
692 	_th = (struct tcphdr *)(bp + 1);
693 	memcpy(_th, th, sizeof(*th));
694 	_th->check = 0;
695 
696 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
697 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
698 				sizeof(*bp) + sizeof(*th));
699 	return crypto_ahash_update(hp->md5_req);
700 }
701 
702 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
703 			       const struct in6_addr *daddr, struct in6_addr *saddr,
704 			       const struct tcphdr *th)
705 {
706 	struct tcp_md5sig_pool *hp;
707 	struct ahash_request *req;
708 
709 	hp = tcp_get_md5sig_pool();
710 	if (!hp)
711 		goto clear_hash_noput;
712 	req = hp->md5_req;
713 
714 	if (crypto_ahash_init(req))
715 		goto clear_hash;
716 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
717 		goto clear_hash;
718 	if (tcp_md5_hash_key(hp, key))
719 		goto clear_hash;
720 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
721 	if (crypto_ahash_final(req))
722 		goto clear_hash;
723 
724 	tcp_put_md5sig_pool();
725 	return 0;
726 
727 clear_hash:
728 	tcp_put_md5sig_pool();
729 clear_hash_noput:
730 	memset(md5_hash, 0, 16);
731 	return 1;
732 }
733 
734 static int tcp_v6_md5_hash_skb(char *md5_hash,
735 			       const struct tcp_md5sig_key *key,
736 			       const struct sock *sk,
737 			       const struct sk_buff *skb)
738 {
739 	const struct in6_addr *saddr, *daddr;
740 	struct tcp_md5sig_pool *hp;
741 	struct ahash_request *req;
742 	const struct tcphdr *th = tcp_hdr(skb);
743 
744 	if (sk) { /* valid for establish/request sockets */
745 		saddr = &sk->sk_v6_rcv_saddr;
746 		daddr = &sk->sk_v6_daddr;
747 	} else {
748 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
749 		saddr = &ip6h->saddr;
750 		daddr = &ip6h->daddr;
751 	}
752 
753 	hp = tcp_get_md5sig_pool();
754 	if (!hp)
755 		goto clear_hash_noput;
756 	req = hp->md5_req;
757 
758 	if (crypto_ahash_init(req))
759 		goto clear_hash;
760 
761 	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
762 		goto clear_hash;
763 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
764 		goto clear_hash;
765 	if (tcp_md5_hash_key(hp, key))
766 		goto clear_hash;
767 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
768 	if (crypto_ahash_final(req))
769 		goto clear_hash;
770 
771 	tcp_put_md5sig_pool();
772 	return 0;
773 
774 clear_hash:
775 	tcp_put_md5sig_pool();
776 clear_hash_noput:
777 	memset(md5_hash, 0, 16);
778 	return 1;
779 }
780 
781 #endif
782 
783 static void tcp_v6_init_req(struct request_sock *req,
784 			    const struct sock *sk_listener,
785 			    struct sk_buff *skb)
786 {
787 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
788 	struct inet_request_sock *ireq = inet_rsk(req);
789 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
790 
791 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
792 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
793 
794 	/* So that link locals have meaning */
795 	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
796 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
797 		ireq->ir_iif = tcp_v6_iif(skb);
798 
799 	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
800 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
801 	     np->rxopt.bits.rxinfo ||
802 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
803 	     np->rxopt.bits.rxohlim || np->repflow)) {
804 		refcount_inc(&skb->users);
805 		ireq->pktopts = skb;
806 	}
807 }
808 
809 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
810 					  struct sk_buff *skb,
811 					  struct flowi *fl,
812 					  struct request_sock *req)
813 {
814 	tcp_v6_init_req(req, sk, skb);
815 
816 	if (security_inet_conn_request(sk, skb, req))
817 		return NULL;
818 
819 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
820 }
821 
822 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
823 	.family		=	AF_INET6,
824 	.obj_size	=	sizeof(struct tcp6_request_sock),
825 	.rtx_syn_ack	=	tcp_rtx_synack,
826 	.send_ack	=	tcp_v6_reqsk_send_ack,
827 	.destructor	=	tcp_v6_reqsk_destructor,
828 	.send_reset	=	tcp_v6_send_reset,
829 	.syn_ack_timeout =	tcp_syn_ack_timeout,
830 };
831 
832 const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
833 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
834 				sizeof(struct ipv6hdr),
835 #ifdef CONFIG_TCP_MD5SIG
836 	.req_md5_lookup	=	tcp_v6_md5_lookup,
837 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
838 #endif
839 #ifdef CONFIG_SYN_COOKIES
840 	.cookie_init_seq =	cookie_v6_init_sequence,
841 #endif
842 	.route_req	=	tcp_v6_route_req,
843 	.init_seq	=	tcp_v6_init_seq,
844 	.init_ts_off	=	tcp_v6_init_ts_off,
845 	.send_synack	=	tcp_v6_send_synack,
846 };
847 
848 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
849 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
850 				 int oif, struct tcp_md5sig_key *key, int rst,
851 				 u8 tclass, __be32 label, u32 priority, u32 txhash)
852 {
853 	const struct tcphdr *th = tcp_hdr(skb);
854 	struct tcphdr *t1;
855 	struct sk_buff *buff;
856 	struct flowi6 fl6;
857 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
858 	struct sock *ctl_sk = net->ipv6.tcp_sk;
859 	unsigned int tot_len = sizeof(struct tcphdr);
860 	__be32 mrst = 0, *topt;
861 	struct dst_entry *dst;
862 	__u32 mark = 0;
863 
864 	if (tsecr)
865 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
866 #ifdef CONFIG_TCP_MD5SIG
867 	if (key)
868 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
869 #endif
870 
871 #ifdef CONFIG_MPTCP
872 	if (rst && !key) {
873 		mrst = mptcp_reset_option(skb);
874 
875 		if (mrst)
876 			tot_len += sizeof(__be32);
877 	}
878 #endif
879 
880 	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
881 	if (!buff)
882 		return;
883 
884 	skb_reserve(buff, MAX_TCP_HEADER);
885 
886 	t1 = skb_push(buff, tot_len);
887 	skb_reset_transport_header(buff);
888 
889 	/* Swap the send and the receive. */
890 	memset(t1, 0, sizeof(*t1));
891 	t1->dest = th->source;
892 	t1->source = th->dest;
893 	t1->doff = tot_len / 4;
894 	t1->seq = htonl(seq);
895 	t1->ack_seq = htonl(ack);
896 	t1->ack = !rst || !th->ack;
897 	t1->rst = rst;
898 	t1->window = htons(win);
899 
900 	topt = (__be32 *)(t1 + 1);
901 
902 	if (tsecr) {
903 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
904 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
905 		*topt++ = htonl(tsval);
906 		*topt++ = htonl(tsecr);
907 	}
908 
909 	if (mrst)
910 		*topt++ = mrst;
911 
912 #ifdef CONFIG_TCP_MD5SIG
913 	if (key) {
914 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
915 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
916 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
917 				    &ipv6_hdr(skb)->saddr,
918 				    &ipv6_hdr(skb)->daddr, t1);
919 	}
920 #endif
921 
922 	memset(&fl6, 0, sizeof(fl6));
923 	fl6.daddr = ipv6_hdr(skb)->saddr;
924 	fl6.saddr = ipv6_hdr(skb)->daddr;
925 	fl6.flowlabel = label;
926 
927 	buff->ip_summed = CHECKSUM_PARTIAL;
928 
929 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
930 
931 	fl6.flowi6_proto = IPPROTO_TCP;
932 	if (rt6_need_strict(&fl6.daddr) && !oif)
933 		fl6.flowi6_oif = tcp_v6_iif(skb);
934 	else {
935 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
936 			oif = skb->skb_iif;
937 
938 		fl6.flowi6_oif = oif;
939 	}
940 
941 	if (sk) {
942 		if (sk->sk_state == TCP_TIME_WAIT)
943 			mark = inet_twsk(sk)->tw_mark;
944 		else
945 			mark = sk->sk_mark;
946 		skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
947 	}
948 	if (txhash) {
949 		/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
950 		skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
951 	}
952 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
953 	fl6.fl6_dport = t1->dest;
954 	fl6.fl6_sport = t1->source;
955 	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
956 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
957 
958 	/* Pass a socket to ip6_dst_lookup either it is for RST
959 	 * Underlying function will use this to retrieve the network
960 	 * namespace
961 	 */
962 	if (sk && sk->sk_state != TCP_TIME_WAIT)
963 		dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
964 	else
965 		dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
966 	if (!IS_ERR(dst)) {
967 		skb_dst_set(buff, dst);
968 		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
969 			 tclass & ~INET_ECN_MASK, priority);
970 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
971 		if (rst)
972 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
973 		return;
974 	}
975 
976 	kfree_skb(buff);
977 }
978 
979 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
980 {
981 	const struct tcphdr *th = tcp_hdr(skb);
982 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
983 	u32 seq = 0, ack_seq = 0;
984 	struct tcp_md5sig_key *key = NULL;
985 #ifdef CONFIG_TCP_MD5SIG
986 	const __u8 *hash_location = NULL;
987 	unsigned char newhash[16];
988 	int genhash;
989 	struct sock *sk1 = NULL;
990 #endif
991 	__be32 label = 0;
992 	u32 priority = 0;
993 	struct net *net;
994 	u32 txhash = 0;
995 	int oif = 0;
996 
997 	if (th->rst)
998 		return;
999 
1000 	/* If sk not NULL, it means we did a successful lookup and incoming
1001 	 * route had to be correct. prequeue might have dropped our dst.
1002 	 */
1003 	if (!sk && !ipv6_unicast_destination(skb))
1004 		return;
1005 
1006 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1007 #ifdef CONFIG_TCP_MD5SIG
1008 	rcu_read_lock();
1009 	hash_location = tcp_parse_md5sig_option(th);
1010 	if (sk && sk_fullsock(sk)) {
1011 		int l3index;
1012 
1013 		/* sdif set, means packet ingressed via a device
1014 		 * in an L3 domain and inet_iif is set to it.
1015 		 */
1016 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1017 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1018 	} else if (hash_location) {
1019 		int dif = tcp_v6_iif_l3_slave(skb);
1020 		int sdif = tcp_v6_sdif(skb);
1021 		int l3index;
1022 
1023 		/*
1024 		 * active side is lost. Try to find listening socket through
1025 		 * source port, and then find md5 key through listening socket.
1026 		 * we are not loose security here:
1027 		 * Incoming packet is checked with md5 hash with finding key,
1028 		 * no RST generated if md5 hash doesn't match.
1029 		 */
1030 		sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1031 					    NULL, 0, &ipv6h->saddr, th->source,
1032 					    &ipv6h->daddr, ntohs(th->source),
1033 					    dif, sdif);
1034 		if (!sk1)
1035 			goto out;
1036 
1037 		/* sdif set, means packet ingressed via a device
1038 		 * in an L3 domain and dif is set to it.
1039 		 */
1040 		l3index = tcp_v6_sdif(skb) ? dif : 0;
1041 
1042 		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1043 		if (!key)
1044 			goto out;
1045 
1046 		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
1047 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
1048 			goto out;
1049 	}
1050 #endif
1051 
1052 	if (th->ack)
1053 		seq = ntohl(th->ack_seq);
1054 	else
1055 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1056 			  (th->doff << 2);
1057 
1058 	if (sk) {
1059 		oif = sk->sk_bound_dev_if;
1060 		if (sk_fullsock(sk)) {
1061 			const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1062 
1063 			trace_tcp_send_reset(sk, skb);
1064 			if (np->repflow)
1065 				label = ip6_flowlabel(ipv6h);
1066 			priority = sk->sk_priority;
1067 			txhash = sk->sk_hash;
1068 		}
1069 		if (sk->sk_state == TCP_TIME_WAIT) {
1070 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1071 			priority = inet_twsk(sk)->tw_priority;
1072 			txhash = inet_twsk(sk)->tw_txhash;
1073 		}
1074 	} else {
1075 		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1076 			label = ip6_flowlabel(ipv6h);
1077 	}
1078 
1079 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1080 			     ipv6_get_dsfield(ipv6h), label, priority, txhash);
1081 
1082 #ifdef CONFIG_TCP_MD5SIG
1083 out:
1084 	rcu_read_unlock();
1085 #endif
1086 }
1087 
1088 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1089 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1090 			    struct tcp_md5sig_key *key, u8 tclass,
1091 			    __be32 label, u32 priority, u32 txhash)
1092 {
1093 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
1094 			     tclass, label, priority, txhash);
1095 }
1096 
1097 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1098 {
1099 	struct inet_timewait_sock *tw = inet_twsk(sk);
1100 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1101 
1102 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1103 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1104 			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
1105 			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1106 			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
1107 			tw->tw_txhash);
1108 
1109 	inet_twsk_put(tw);
1110 }
1111 
1112 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1113 				  struct request_sock *req)
1114 {
1115 	int l3index;
1116 
1117 	l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1118 
1119 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1120 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1121 	 */
1122 	/* RFC 7323 2.3
1123 	 * The window field (SEG.WND) of every outgoing segment, with the
1124 	 * exception of <SYN> segments, MUST be right-shifted by
1125 	 * Rcv.Wind.Shift bits:
1126 	 */
1127 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1128 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1129 			tcp_rsk(req)->rcv_nxt,
1130 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1131 			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1132 			req->ts_recent, sk->sk_bound_dev_if,
1133 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
1134 			ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
1135 			tcp_rsk(req)->txhash);
1136 }
1137 
1138 
1139 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1140 {
1141 #ifdef CONFIG_SYN_COOKIES
1142 	const struct tcphdr *th = tcp_hdr(skb);
1143 
1144 	if (!th->syn)
1145 		sk = cookie_v6_check(sk, skb);
1146 #endif
1147 	return sk;
1148 }
1149 
1150 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1151 			 struct tcphdr *th, u32 *cookie)
1152 {
1153 	u16 mss = 0;
1154 #ifdef CONFIG_SYN_COOKIES
1155 	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1156 				    &tcp_request_sock_ipv6_ops, sk, th);
1157 	if (mss) {
1158 		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
1159 		tcp_synq_overflow(sk);
1160 	}
1161 #endif
1162 	return mss;
1163 }
1164 
1165 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1166 {
1167 	if (skb->protocol == htons(ETH_P_IP))
1168 		return tcp_v4_conn_request(sk, skb);
1169 
1170 	if (!ipv6_unicast_destination(skb))
1171 		goto drop;
1172 
1173 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1174 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1175 		return 0;
1176 	}
1177 
1178 	return tcp_conn_request(&tcp6_request_sock_ops,
1179 				&tcp_request_sock_ipv6_ops, sk, skb);
1180 
1181 drop:
1182 	tcp_listendrop(sk);
1183 	return 0; /* don't send reset */
1184 }
1185 
1186 static void tcp_v6_restore_cb(struct sk_buff *skb)
1187 {
1188 	/* We need to move header back to the beginning if xfrm6_policy_check()
1189 	 * and tcp_v6_fill_cb() are going to be called again.
1190 	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1191 	 */
1192 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1193 		sizeof(struct inet6_skb_parm));
1194 }
1195 
1196 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1197 					 struct request_sock *req,
1198 					 struct dst_entry *dst,
1199 					 struct request_sock *req_unhash,
1200 					 bool *own_req)
1201 {
1202 	struct inet_request_sock *ireq;
1203 	struct ipv6_pinfo *newnp;
1204 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1205 	struct ipv6_txoptions *opt;
1206 	struct inet_sock *newinet;
1207 	bool found_dup_sk = false;
1208 	struct tcp_sock *newtp;
1209 	struct sock *newsk;
1210 #ifdef CONFIG_TCP_MD5SIG
1211 	struct tcp_md5sig_key *key;
1212 	int l3index;
1213 #endif
1214 	struct flowi6 fl6;
1215 
1216 	if (skb->protocol == htons(ETH_P_IP)) {
1217 		/*
1218 		 *	v6 mapped
1219 		 */
1220 
1221 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1222 					     req_unhash, own_req);
1223 
1224 		if (!newsk)
1225 			return NULL;
1226 
1227 		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1228 
1229 		newnp = tcp_inet6_sk(newsk);
1230 		newtp = tcp_sk(newsk);
1231 
1232 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1233 
1234 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1235 
1236 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1237 		if (sk_is_mptcp(newsk))
1238 			mptcpv6_handle_mapped(newsk, true);
1239 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1240 #ifdef CONFIG_TCP_MD5SIG
1241 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1242 #endif
1243 
1244 		newnp->ipv6_mc_list = NULL;
1245 		newnp->ipv6_ac_list = NULL;
1246 		newnp->ipv6_fl_list = NULL;
1247 		newnp->pktoptions  = NULL;
1248 		newnp->opt	   = NULL;
1249 		newnp->mcast_oif   = inet_iif(skb);
1250 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
1251 		newnp->rcv_flowinfo = 0;
1252 		if (np->repflow)
1253 			newnp->flow_label = 0;
1254 
1255 		/*
1256 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1257 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1258 		 * that function for the gory details. -acme
1259 		 */
1260 
1261 		/* It is tricky place. Until this moment IPv4 tcp
1262 		   worked with IPv6 icsk.icsk_af_ops.
1263 		   Sync it now.
1264 		 */
1265 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1266 
1267 		return newsk;
1268 	}
1269 
1270 	ireq = inet_rsk(req);
1271 
1272 	if (sk_acceptq_is_full(sk))
1273 		goto out_overflow;
1274 
1275 	if (!dst) {
1276 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1277 		if (!dst)
1278 			goto out;
1279 	}
1280 
1281 	newsk = tcp_create_openreq_child(sk, req, skb);
1282 	if (!newsk)
1283 		goto out_nonewsk;
1284 
1285 	/*
1286 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1287 	 * count here, tcp_create_openreq_child now does this for us, see the
1288 	 * comment in that function for the gory details. -acme
1289 	 */
1290 
1291 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1292 	ip6_dst_store(newsk, dst, NULL, NULL);
1293 	inet6_sk_rx_dst_set(newsk, skb);
1294 
1295 	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1296 
1297 	newtp = tcp_sk(newsk);
1298 	newinet = inet_sk(newsk);
1299 	newnp = tcp_inet6_sk(newsk);
1300 
1301 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1302 
1303 	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1304 	newnp->saddr = ireq->ir_v6_loc_addr;
1305 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1306 	newsk->sk_bound_dev_if = ireq->ir_iif;
1307 
1308 	/* Now IPv6 options...
1309 
1310 	   First: no IPv4 options.
1311 	 */
1312 	newinet->inet_opt = NULL;
1313 	newnp->ipv6_mc_list = NULL;
1314 	newnp->ipv6_ac_list = NULL;
1315 	newnp->ipv6_fl_list = NULL;
1316 
1317 	/* Clone RX bits */
1318 	newnp->rxopt.all = np->rxopt.all;
1319 
1320 	newnp->pktoptions = NULL;
1321 	newnp->opt	  = NULL;
1322 	newnp->mcast_oif  = tcp_v6_iif(skb);
1323 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1324 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1325 	if (np->repflow)
1326 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1327 
1328 	/* Set ToS of the new socket based upon the value of incoming SYN.
1329 	 * ECT bits are set later in tcp_init_transfer().
1330 	 */
1331 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1332 		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1333 
1334 	/* Clone native IPv6 options from listening socket (if any)
1335 
1336 	   Yes, keeping reference count would be much more clever,
1337 	   but we make one more one thing there: reattach optmem
1338 	   to newsk.
1339 	 */
1340 	opt = ireq->ipv6_opt;
1341 	if (!opt)
1342 		opt = rcu_dereference(np->opt);
1343 	if (opt) {
1344 		opt = ipv6_dup_options(newsk, opt);
1345 		RCU_INIT_POINTER(newnp->opt, opt);
1346 	}
1347 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1348 	if (opt)
1349 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1350 						    opt->opt_flen;
1351 
1352 	tcp_ca_openreq_child(newsk, dst);
1353 
1354 	tcp_sync_mss(newsk, dst_mtu(dst));
1355 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1356 
1357 	tcp_initialize_rcv_mss(newsk);
1358 
1359 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1360 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1361 
1362 #ifdef CONFIG_TCP_MD5SIG
1363 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1364 
1365 	/* Copy over the MD5 key from the original socket */
1366 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1367 	if (key) {
1368 		const union tcp_md5_addr *addr;
1369 
1370 		addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1371 		if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1372 			inet_csk_prepare_forced_close(newsk);
1373 			tcp_done(newsk);
1374 			goto out;
1375 		}
1376 	}
1377 #endif
1378 
1379 	if (__inet_inherit_port(sk, newsk) < 0) {
1380 		inet_csk_prepare_forced_close(newsk);
1381 		tcp_done(newsk);
1382 		goto out;
1383 	}
1384 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1385 				       &found_dup_sk);
1386 	if (*own_req) {
1387 		tcp_move_syn(newtp, req);
1388 
1389 		/* Clone pktoptions received with SYN, if we own the req */
1390 		if (ireq->pktopts) {
1391 			newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
1392 			consume_skb(ireq->pktopts);
1393 			ireq->pktopts = NULL;
1394 			if (newnp->pktoptions)
1395 				tcp_v6_restore_cb(newnp->pktoptions);
1396 		}
1397 	} else {
1398 		if (!req_unhash && found_dup_sk) {
1399 			/* This code path should only be executed in the
1400 			 * syncookie case only
1401 			 */
1402 			bh_unlock_sock(newsk);
1403 			sock_put(newsk);
1404 			newsk = NULL;
1405 		}
1406 	}
1407 
1408 	return newsk;
1409 
1410 out_overflow:
1411 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1412 out_nonewsk:
1413 	dst_release(dst);
1414 out:
1415 	tcp_listendrop(sk);
1416 	return NULL;
1417 }
1418 
1419 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1420 							   u32));
1421 /* The socket must have it's spinlock held when we get
1422  * here, unless it is a TCP_LISTEN socket.
1423  *
1424  * We have a potential double-lock case here, so even when
1425  * doing backlog processing we use the BH locking scheme.
1426  * This is because we cannot sleep with the original spinlock
1427  * held.
1428  */
1429 INDIRECT_CALLABLE_SCOPE
1430 int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1431 {
1432 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1433 	struct sk_buff *opt_skb = NULL;
1434 	enum skb_drop_reason reason;
1435 	struct tcp_sock *tp;
1436 
1437 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1438 	   goes to IPv4 receive handler and backlogged.
1439 	   From backlog it always goes here. Kerboom...
1440 	   Fortunately, tcp_rcv_established and rcv_established
1441 	   handle them correctly, but it is not case with
1442 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1443 	 */
1444 
1445 	if (skb->protocol == htons(ETH_P_IP))
1446 		return tcp_v4_do_rcv(sk, skb);
1447 
1448 	/*
1449 	 *	socket locking is here for SMP purposes as backlog rcv
1450 	 *	is currently called with bh processing disabled.
1451 	 */
1452 
1453 	/* Do Stevens' IPV6_PKTOPTIONS.
1454 
1455 	   Yes, guys, it is the only place in our code, where we
1456 	   may make it not affecting IPv4.
1457 	   The rest of code is protocol independent,
1458 	   and I do not like idea to uglify IPv4.
1459 
1460 	   Actually, all the idea behind IPV6_PKTOPTIONS
1461 	   looks not very well thought. For now we latch
1462 	   options, received in the last packet, enqueued
1463 	   by tcp. Feel free to propose better solution.
1464 					       --ANK (980728)
1465 	 */
1466 	if (np->rxopt.all)
1467 		opt_skb = skb_clone_and_charge_r(skb, sk);
1468 
1469 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
1470 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1471 		struct dst_entry *dst;
1472 
1473 		dst = rcu_dereference_protected(sk->sk_rx_dst,
1474 						lockdep_sock_is_held(sk));
1475 
1476 		sock_rps_save_rxhash(sk, skb);
1477 		sk_mark_napi_id(sk, skb);
1478 		if (dst) {
1479 			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1480 			    INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1481 					    dst, sk->sk_rx_dst_cookie) == NULL) {
1482 				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1483 				dst_release(dst);
1484 			}
1485 		}
1486 
1487 		tcp_rcv_established(sk, skb);
1488 		if (opt_skb)
1489 			goto ipv6_pktoptions;
1490 		return 0;
1491 	}
1492 
1493 	if (tcp_checksum_complete(skb))
1494 		goto csum_err;
1495 
1496 	if (sk->sk_state == TCP_LISTEN) {
1497 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1498 
1499 		if (!nsk)
1500 			goto discard;
1501 
1502 		if (nsk != sk) {
1503 			if (tcp_child_process(sk, nsk, skb))
1504 				goto reset;
1505 			if (opt_skb)
1506 				__kfree_skb(opt_skb);
1507 			return 0;
1508 		}
1509 	} else
1510 		sock_rps_save_rxhash(sk, skb);
1511 
1512 	if (tcp_rcv_state_process(sk, skb))
1513 		goto reset;
1514 	if (opt_skb)
1515 		goto ipv6_pktoptions;
1516 	return 0;
1517 
1518 reset:
1519 	tcp_v6_send_reset(sk, skb);
1520 discard:
1521 	if (opt_skb)
1522 		__kfree_skb(opt_skb);
1523 	kfree_skb_reason(skb, reason);
1524 	return 0;
1525 csum_err:
1526 	reason = SKB_DROP_REASON_TCP_CSUM;
1527 	trace_tcp_bad_csum(skb);
1528 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1529 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1530 	goto discard;
1531 
1532 
1533 ipv6_pktoptions:
1534 	/* Do you ask, what is it?
1535 
1536 	   1. skb was enqueued by tcp.
1537 	   2. skb is added to tail of read queue, rather than out of order.
1538 	   3. socket is not in passive state.
1539 	   4. Finally, it really contains options, which user wants to receive.
1540 	 */
1541 	tp = tcp_sk(sk);
1542 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1543 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1544 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1545 			np->mcast_oif = tcp_v6_iif(opt_skb);
1546 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1547 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1548 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1549 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1550 		if (np->repflow)
1551 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1552 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1553 			tcp_v6_restore_cb(opt_skb);
1554 			opt_skb = xchg(&np->pktoptions, opt_skb);
1555 		} else {
1556 			__kfree_skb(opt_skb);
1557 			opt_skb = xchg(&np->pktoptions, NULL);
1558 		}
1559 	}
1560 
1561 	consume_skb(opt_skb);
1562 	return 0;
1563 }
1564 
1565 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1566 			   const struct tcphdr *th)
1567 {
1568 	/* This is tricky: we move IP6CB at its correct location into
1569 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1570 	 * _decode_session6() uses IP6CB().
1571 	 * barrier() makes sure compiler won't play aliasing games.
1572 	 */
1573 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1574 		sizeof(struct inet6_skb_parm));
1575 	barrier();
1576 
1577 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1578 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1579 				    skb->len - th->doff*4);
1580 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1581 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1582 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1583 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1584 	TCP_SKB_CB(skb)->sacked = 0;
1585 	TCP_SKB_CB(skb)->has_rxtstamp =
1586 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1587 }
1588 
1589 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1590 {
1591 	enum skb_drop_reason drop_reason;
1592 	int sdif = inet6_sdif(skb);
1593 	int dif = inet6_iif(skb);
1594 	const struct tcphdr *th;
1595 	const struct ipv6hdr *hdr;
1596 	bool refcounted;
1597 	struct sock *sk;
1598 	int ret;
1599 	struct net *net = dev_net(skb->dev);
1600 
1601 	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1602 	if (skb->pkt_type != PACKET_HOST)
1603 		goto discard_it;
1604 
1605 	/*
1606 	 *	Count it even if it's bad.
1607 	 */
1608 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1609 
1610 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1611 		goto discard_it;
1612 
1613 	th = (const struct tcphdr *)skb->data;
1614 
1615 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1616 		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1617 		goto bad_packet;
1618 	}
1619 	if (!pskb_may_pull(skb, th->doff*4))
1620 		goto discard_it;
1621 
1622 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1623 		goto csum_error;
1624 
1625 	th = (const struct tcphdr *)skb->data;
1626 	hdr = ipv6_hdr(skb);
1627 
1628 lookup:
1629 	sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1630 				th->source, th->dest, inet6_iif(skb), sdif,
1631 				&refcounted);
1632 	if (!sk)
1633 		goto no_tcp_socket;
1634 
1635 process:
1636 	if (sk->sk_state == TCP_TIME_WAIT)
1637 		goto do_time_wait;
1638 
1639 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1640 		struct request_sock *req = inet_reqsk(sk);
1641 		bool req_stolen = false;
1642 		struct sock *nsk;
1643 
1644 		sk = req->rsk_listener;
1645 		drop_reason = tcp_inbound_md5_hash(sk, skb,
1646 						   &hdr->saddr, &hdr->daddr,
1647 						   AF_INET6, dif, sdif);
1648 		if (drop_reason) {
1649 			sk_drops_add(sk, skb);
1650 			reqsk_put(req);
1651 			goto discard_it;
1652 		}
1653 		if (tcp_checksum_complete(skb)) {
1654 			reqsk_put(req);
1655 			goto csum_error;
1656 		}
1657 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1658 			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1659 			if (!nsk) {
1660 				inet_csk_reqsk_queue_drop_and_put(sk, req);
1661 				goto lookup;
1662 			}
1663 			sk = nsk;
1664 			/* reuseport_migrate_sock() has already held one sk_refcnt
1665 			 * before returning.
1666 			 */
1667 		} else {
1668 			sock_hold(sk);
1669 		}
1670 		refcounted = true;
1671 		nsk = NULL;
1672 		if (!tcp_filter(sk, skb)) {
1673 			th = (const struct tcphdr *)skb->data;
1674 			hdr = ipv6_hdr(skb);
1675 			tcp_v6_fill_cb(skb, hdr, th);
1676 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1677 		} else {
1678 			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1679 		}
1680 		if (!nsk) {
1681 			reqsk_put(req);
1682 			if (req_stolen) {
1683 				/* Another cpu got exclusive access to req
1684 				 * and created a full blown socket.
1685 				 * Try to feed this packet to this socket
1686 				 * instead of discarding it.
1687 				 */
1688 				tcp_v6_restore_cb(skb);
1689 				sock_put(sk);
1690 				goto lookup;
1691 			}
1692 			goto discard_and_relse;
1693 		}
1694 		if (nsk == sk) {
1695 			reqsk_put(req);
1696 			tcp_v6_restore_cb(skb);
1697 		} else if (tcp_child_process(sk, nsk, skb)) {
1698 			tcp_v6_send_reset(nsk, skb);
1699 			goto discard_and_relse;
1700 		} else {
1701 			sock_put(sk);
1702 			return 0;
1703 		}
1704 	}
1705 
1706 	if (static_branch_unlikely(&ip6_min_hopcount)) {
1707 		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1708 		if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
1709 			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1710 			drop_reason = SKB_DROP_REASON_TCP_MINTTL;
1711 			goto discard_and_relse;
1712 		}
1713 	}
1714 
1715 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1716 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1717 		goto discard_and_relse;
1718 	}
1719 
1720 	drop_reason = tcp_inbound_md5_hash(sk, skb, &hdr->saddr, &hdr->daddr,
1721 					   AF_INET6, dif, sdif);
1722 	if (drop_reason)
1723 		goto discard_and_relse;
1724 
1725 	if (tcp_filter(sk, skb)) {
1726 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1727 		goto discard_and_relse;
1728 	}
1729 	th = (const struct tcphdr *)skb->data;
1730 	hdr = ipv6_hdr(skb);
1731 	tcp_v6_fill_cb(skb, hdr, th);
1732 
1733 	skb->dev = NULL;
1734 
1735 	if (sk->sk_state == TCP_LISTEN) {
1736 		ret = tcp_v6_do_rcv(sk, skb);
1737 		goto put_and_return;
1738 	}
1739 
1740 	sk_incoming_cpu_update(sk);
1741 
1742 	bh_lock_sock_nested(sk);
1743 	tcp_segs_in(tcp_sk(sk), skb);
1744 	ret = 0;
1745 	if (!sock_owned_by_user(sk)) {
1746 		ret = tcp_v6_do_rcv(sk, skb);
1747 	} else {
1748 		if (tcp_add_backlog(sk, skb, &drop_reason))
1749 			goto discard_and_relse;
1750 	}
1751 	bh_unlock_sock(sk);
1752 put_and_return:
1753 	if (refcounted)
1754 		sock_put(sk);
1755 	return ret ? -1 : 0;
1756 
1757 no_tcp_socket:
1758 	drop_reason = SKB_DROP_REASON_NO_SOCKET;
1759 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1760 		goto discard_it;
1761 
1762 	tcp_v6_fill_cb(skb, hdr, th);
1763 
1764 	if (tcp_checksum_complete(skb)) {
1765 csum_error:
1766 		drop_reason = SKB_DROP_REASON_TCP_CSUM;
1767 		trace_tcp_bad_csum(skb);
1768 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1769 bad_packet:
1770 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1771 	} else {
1772 		tcp_v6_send_reset(NULL, skb);
1773 	}
1774 
1775 discard_it:
1776 	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1777 	kfree_skb_reason(skb, drop_reason);
1778 	return 0;
1779 
1780 discard_and_relse:
1781 	sk_drops_add(sk, skb);
1782 	if (refcounted)
1783 		sock_put(sk);
1784 	goto discard_it;
1785 
1786 do_time_wait:
1787 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1788 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1789 		inet_twsk_put(inet_twsk(sk));
1790 		goto discard_it;
1791 	}
1792 
1793 	tcp_v6_fill_cb(skb, hdr, th);
1794 
1795 	if (tcp_checksum_complete(skb)) {
1796 		inet_twsk_put(inet_twsk(sk));
1797 		goto csum_error;
1798 	}
1799 
1800 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1801 	case TCP_TW_SYN:
1802 	{
1803 		struct sock *sk2;
1804 
1805 		sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1806 					    skb, __tcp_hdrlen(th),
1807 					    &ipv6_hdr(skb)->saddr, th->source,
1808 					    &ipv6_hdr(skb)->daddr,
1809 					    ntohs(th->dest),
1810 					    tcp_v6_iif_l3_slave(skb),
1811 					    sdif);
1812 		if (sk2) {
1813 			struct inet_timewait_sock *tw = inet_twsk(sk);
1814 			inet_twsk_deschedule_put(tw);
1815 			sk = sk2;
1816 			tcp_v6_restore_cb(skb);
1817 			refcounted = false;
1818 			goto process;
1819 		}
1820 	}
1821 		/* to ACK */
1822 		fallthrough;
1823 	case TCP_TW_ACK:
1824 		tcp_v6_timewait_ack(sk, skb);
1825 		break;
1826 	case TCP_TW_RST:
1827 		tcp_v6_send_reset(sk, skb);
1828 		inet_twsk_deschedule_put(inet_twsk(sk));
1829 		goto discard_it;
1830 	case TCP_TW_SUCCESS:
1831 		;
1832 	}
1833 	goto discard_it;
1834 }
1835 
1836 void tcp_v6_early_demux(struct sk_buff *skb)
1837 {
1838 	struct net *net = dev_net(skb->dev);
1839 	const struct ipv6hdr *hdr;
1840 	const struct tcphdr *th;
1841 	struct sock *sk;
1842 
1843 	if (skb->pkt_type != PACKET_HOST)
1844 		return;
1845 
1846 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1847 		return;
1848 
1849 	hdr = ipv6_hdr(skb);
1850 	th = tcp_hdr(skb);
1851 
1852 	if (th->doff < sizeof(struct tcphdr) / 4)
1853 		return;
1854 
1855 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
1856 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1857 					&hdr->saddr, th->source,
1858 					&hdr->daddr, ntohs(th->dest),
1859 					inet6_iif(skb), inet6_sdif(skb));
1860 	if (sk) {
1861 		skb->sk = sk;
1862 		skb->destructor = sock_edemux;
1863 		if (sk_fullsock(sk)) {
1864 			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1865 
1866 			if (dst)
1867 				dst = dst_check(dst, sk->sk_rx_dst_cookie);
1868 			if (dst &&
1869 			    sk->sk_rx_dst_ifindex == skb->skb_iif)
1870 				skb_dst_set_noref(skb, dst);
1871 		}
1872 	}
1873 }
1874 
1875 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1876 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1877 	.twsk_unique	= tcp_twsk_unique,
1878 	.twsk_destructor = tcp_twsk_destructor,
1879 };
1880 
1881 INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1882 {
1883 	__tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
1884 }
1885 
1886 const struct inet_connection_sock_af_ops ipv6_specific = {
1887 	.queue_xmit	   = inet6_csk_xmit,
1888 	.send_check	   = tcp_v6_send_check,
1889 	.rebuild_header	   = inet6_sk_rebuild_header,
1890 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1891 	.conn_request	   = tcp_v6_conn_request,
1892 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1893 	.net_header_len	   = sizeof(struct ipv6hdr),
1894 	.net_frag_header_len = sizeof(struct frag_hdr),
1895 	.setsockopt	   = ipv6_setsockopt,
1896 	.getsockopt	   = ipv6_getsockopt,
1897 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1898 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1899 	.mtu_reduced	   = tcp_v6_mtu_reduced,
1900 };
1901 
1902 #ifdef CONFIG_TCP_MD5SIG
1903 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1904 	.md5_lookup	=	tcp_v6_md5_lookup,
1905 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1906 	.md5_parse	=	tcp_v6_parse_md5_keys,
1907 };
1908 #endif
1909 
1910 /*
1911  *	TCP over IPv4 via INET6 API
1912  */
1913 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1914 	.queue_xmit	   = ip_queue_xmit,
1915 	.send_check	   = tcp_v4_send_check,
1916 	.rebuild_header	   = inet_sk_rebuild_header,
1917 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1918 	.conn_request	   = tcp_v6_conn_request,
1919 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1920 	.net_header_len	   = sizeof(struct iphdr),
1921 	.setsockopt	   = ipv6_setsockopt,
1922 	.getsockopt	   = ipv6_getsockopt,
1923 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1924 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1925 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1926 };
1927 
1928 #ifdef CONFIG_TCP_MD5SIG
1929 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1930 	.md5_lookup	=	tcp_v4_md5_lookup,
1931 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1932 	.md5_parse	=	tcp_v6_parse_md5_keys,
1933 };
1934 #endif
1935 
1936 /* NOTE: A lot of things set to zero explicitly by call to
1937  *       sk_alloc() so need not be done here.
1938  */
1939 static int tcp_v6_init_sock(struct sock *sk)
1940 {
1941 	struct inet_connection_sock *icsk = inet_csk(sk);
1942 
1943 	tcp_init_sock(sk);
1944 
1945 	icsk->icsk_af_ops = &ipv6_specific;
1946 
1947 #ifdef CONFIG_TCP_MD5SIG
1948 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1949 #endif
1950 
1951 	return 0;
1952 }
1953 
1954 #ifdef CONFIG_PROC_FS
1955 /* Proc filesystem TCPv6 sock list dumping. */
1956 static void get_openreq6(struct seq_file *seq,
1957 			 const struct request_sock *req, int i)
1958 {
1959 	long ttd = req->rsk_timer.expires - jiffies;
1960 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1961 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1962 
1963 	if (ttd < 0)
1964 		ttd = 0;
1965 
1966 	seq_printf(seq,
1967 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1968 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1969 		   i,
1970 		   src->s6_addr32[0], src->s6_addr32[1],
1971 		   src->s6_addr32[2], src->s6_addr32[3],
1972 		   inet_rsk(req)->ir_num,
1973 		   dest->s6_addr32[0], dest->s6_addr32[1],
1974 		   dest->s6_addr32[2], dest->s6_addr32[3],
1975 		   ntohs(inet_rsk(req)->ir_rmt_port),
1976 		   TCP_SYN_RECV,
1977 		   0, 0, /* could print option size, but that is af dependent. */
1978 		   1,   /* timers active (only the expire timer) */
1979 		   jiffies_to_clock_t(ttd),
1980 		   req->num_timeout,
1981 		   from_kuid_munged(seq_user_ns(seq),
1982 				    sock_i_uid(req->rsk_listener)),
1983 		   0,  /* non standard timer */
1984 		   0, /* open_requests have no inode */
1985 		   0, req);
1986 }
1987 
1988 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1989 {
1990 	const struct in6_addr *dest, *src;
1991 	__u16 destp, srcp;
1992 	int timer_active;
1993 	unsigned long timer_expires;
1994 	const struct inet_sock *inet = inet_sk(sp);
1995 	const struct tcp_sock *tp = tcp_sk(sp);
1996 	const struct inet_connection_sock *icsk = inet_csk(sp);
1997 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1998 	int rx_queue;
1999 	int state;
2000 
2001 	dest  = &sp->sk_v6_daddr;
2002 	src   = &sp->sk_v6_rcv_saddr;
2003 	destp = ntohs(inet->inet_dport);
2004 	srcp  = ntohs(inet->inet_sport);
2005 
2006 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2007 	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2008 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2009 		timer_active	= 1;
2010 		timer_expires	= icsk->icsk_timeout;
2011 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2012 		timer_active	= 4;
2013 		timer_expires	= icsk->icsk_timeout;
2014 	} else if (timer_pending(&sp->sk_timer)) {
2015 		timer_active	= 2;
2016 		timer_expires	= sp->sk_timer.expires;
2017 	} else {
2018 		timer_active	= 0;
2019 		timer_expires = jiffies;
2020 	}
2021 
2022 	state = inet_sk_state_load(sp);
2023 	if (state == TCP_LISTEN)
2024 		rx_queue = READ_ONCE(sp->sk_ack_backlog);
2025 	else
2026 		/* Because we don't lock the socket,
2027 		 * we might find a transient negative value.
2028 		 */
2029 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2030 				      READ_ONCE(tp->copied_seq), 0);
2031 
2032 	seq_printf(seq,
2033 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2034 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2035 		   i,
2036 		   src->s6_addr32[0], src->s6_addr32[1],
2037 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2038 		   dest->s6_addr32[0], dest->s6_addr32[1],
2039 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2040 		   state,
2041 		   READ_ONCE(tp->write_seq) - tp->snd_una,
2042 		   rx_queue,
2043 		   timer_active,
2044 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
2045 		   icsk->icsk_retransmits,
2046 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2047 		   icsk->icsk_probes_out,
2048 		   sock_i_ino(sp),
2049 		   refcount_read(&sp->sk_refcnt), sp,
2050 		   jiffies_to_clock_t(icsk->icsk_rto),
2051 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2052 		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2053 		   tcp_snd_cwnd(tp),
2054 		   state == TCP_LISTEN ?
2055 			fastopenq->max_qlen :
2056 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2057 		   );
2058 }
2059 
2060 static void get_timewait6_sock(struct seq_file *seq,
2061 			       struct inet_timewait_sock *tw, int i)
2062 {
2063 	long delta = tw->tw_timer.expires - jiffies;
2064 	const struct in6_addr *dest, *src;
2065 	__u16 destp, srcp;
2066 
2067 	dest = &tw->tw_v6_daddr;
2068 	src  = &tw->tw_v6_rcv_saddr;
2069 	destp = ntohs(tw->tw_dport);
2070 	srcp  = ntohs(tw->tw_sport);
2071 
2072 	seq_printf(seq,
2073 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2074 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2075 		   i,
2076 		   src->s6_addr32[0], src->s6_addr32[1],
2077 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2078 		   dest->s6_addr32[0], dest->s6_addr32[1],
2079 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2080 		   tw->tw_substate, 0, 0,
2081 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2082 		   refcount_read(&tw->tw_refcnt), tw);
2083 }
2084 
2085 static int tcp6_seq_show(struct seq_file *seq, void *v)
2086 {
2087 	struct tcp_iter_state *st;
2088 	struct sock *sk = v;
2089 
2090 	if (v == SEQ_START_TOKEN) {
2091 		seq_puts(seq,
2092 			 "  sl  "
2093 			 "local_address                         "
2094 			 "remote_address                        "
2095 			 "st tx_queue rx_queue tr tm->when retrnsmt"
2096 			 "   uid  timeout inode\n");
2097 		goto out;
2098 	}
2099 	st = seq->private;
2100 
2101 	if (sk->sk_state == TCP_TIME_WAIT)
2102 		get_timewait6_sock(seq, v, st->num);
2103 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2104 		get_openreq6(seq, v, st->num);
2105 	else
2106 		get_tcp6_sock(seq, v, st->num);
2107 out:
2108 	return 0;
2109 }
2110 
2111 static const struct seq_operations tcp6_seq_ops = {
2112 	.show		= tcp6_seq_show,
2113 	.start		= tcp_seq_start,
2114 	.next		= tcp_seq_next,
2115 	.stop		= tcp_seq_stop,
2116 };
2117 
2118 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2119 	.family		= AF_INET6,
2120 };
2121 
2122 int __net_init tcp6_proc_init(struct net *net)
2123 {
2124 	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2125 			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2126 		return -ENOMEM;
2127 	return 0;
2128 }
2129 
2130 void tcp6_proc_exit(struct net *net)
2131 {
2132 	remove_proc_entry("tcp6", net->proc_net);
2133 }
2134 #endif
2135 
2136 struct proto tcpv6_prot = {
2137 	.name			= "TCPv6",
2138 	.owner			= THIS_MODULE,
2139 	.close			= tcp_close,
2140 	.pre_connect		= tcp_v6_pre_connect,
2141 	.connect		= tcp_v6_connect,
2142 	.disconnect		= tcp_disconnect,
2143 	.accept			= inet_csk_accept,
2144 	.ioctl			= tcp_ioctl,
2145 	.init			= tcp_v6_init_sock,
2146 	.destroy		= tcp_v4_destroy_sock,
2147 	.shutdown		= tcp_shutdown,
2148 	.setsockopt		= tcp_setsockopt,
2149 	.getsockopt		= tcp_getsockopt,
2150 	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
2151 	.keepalive		= tcp_set_keepalive,
2152 	.recvmsg		= tcp_recvmsg,
2153 	.sendmsg		= tcp_sendmsg,
2154 	.sendpage		= tcp_sendpage,
2155 	.backlog_rcv		= tcp_v6_do_rcv,
2156 	.release_cb		= tcp_release_cb,
2157 	.hash			= inet6_hash,
2158 	.unhash			= inet_unhash,
2159 	.get_port		= inet_csk_get_port,
2160 	.put_port		= inet_put_port,
2161 #ifdef CONFIG_BPF_SYSCALL
2162 	.psock_update_sk_prot	= tcp_bpf_update_proto,
2163 #endif
2164 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2165 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2166 	.stream_memory_free	= tcp_stream_memory_free,
2167 	.sockets_allocated	= &tcp_sockets_allocated,
2168 
2169 	.memory_allocated	= &tcp_memory_allocated,
2170 	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
2171 
2172 	.memory_pressure	= &tcp_memory_pressure,
2173 	.orphan_count		= &tcp_orphan_count,
2174 	.sysctl_mem		= sysctl_tcp_mem,
2175 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2176 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2177 	.max_header		= MAX_TCP_HEADER,
2178 	.obj_size		= sizeof(struct tcp6_sock),
2179 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2180 	.twsk_prot		= &tcp6_timewait_sock_ops,
2181 	.rsk_prot		= &tcp6_request_sock_ops,
2182 	.h.hashinfo		= NULL,
2183 	.no_autobind		= true,
2184 	.diag_destroy		= tcp_abort,
2185 };
2186 EXPORT_SYMBOL_GPL(tcpv6_prot);
2187 
2188 static const struct inet6_protocol tcpv6_protocol = {
2189 	.handler	=	tcp_v6_rcv,
2190 	.err_handler	=	tcp_v6_err,
2191 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2192 };
2193 
2194 static struct inet_protosw tcpv6_protosw = {
2195 	.type		=	SOCK_STREAM,
2196 	.protocol	=	IPPROTO_TCP,
2197 	.prot		=	&tcpv6_prot,
2198 	.ops		=	&inet6_stream_ops,
2199 	.flags		=	INET_PROTOSW_PERMANENT |
2200 				INET_PROTOSW_ICSK,
2201 };
2202 
2203 static int __net_init tcpv6_net_init(struct net *net)
2204 {
2205 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2206 				    SOCK_RAW, IPPROTO_TCP, net);
2207 }
2208 
2209 static void __net_exit tcpv6_net_exit(struct net *net)
2210 {
2211 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2212 }
2213 
2214 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2215 {
2216 	tcp_twsk_purge(net_exit_list, AF_INET6);
2217 }
2218 
2219 static struct pernet_operations tcpv6_net_ops = {
2220 	.init	    = tcpv6_net_init,
2221 	.exit	    = tcpv6_net_exit,
2222 	.exit_batch = tcpv6_net_exit_batch,
2223 };
2224 
2225 int __init tcpv6_init(void)
2226 {
2227 	int ret;
2228 
2229 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2230 	if (ret)
2231 		goto out;
2232 
2233 	/* register inet6 protocol */
2234 	ret = inet6_register_protosw(&tcpv6_protosw);
2235 	if (ret)
2236 		goto out_tcpv6_protocol;
2237 
2238 	ret = register_pernet_subsys(&tcpv6_net_ops);
2239 	if (ret)
2240 		goto out_tcpv6_protosw;
2241 
2242 	ret = mptcpv6_init();
2243 	if (ret)
2244 		goto out_tcpv6_pernet_subsys;
2245 
2246 out:
2247 	return ret;
2248 
2249 out_tcpv6_pernet_subsys:
2250 	unregister_pernet_subsys(&tcpv6_net_ops);
2251 out_tcpv6_protosw:
2252 	inet6_unregister_protosw(&tcpv6_protosw);
2253 out_tcpv6_protocol:
2254 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2255 	goto out;
2256 }
2257 
2258 void tcpv6_exit(void)
2259 {
2260 	unregister_pernet_subsys(&tcpv6_net_ops);
2261 	inet6_unregister_protosw(&tcpv6_protosw);
2262 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2263 }
2264