xref: /linux/net/ipv6/tcp_ipv6.c (revision bdfa82f5b8998a6311a8ef0cf89ad413f5cd9ea4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	TCP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on:
10  *	linux/net/ipv4/tcp.c
11  *	linux/net/ipv4/tcp_input.c
12  *	linux/net/ipv4/tcp_output.c
13  *
14  *	Fixes:
15  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
16  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
17  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
18  *					a single port at the same time.
19  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
20  */
21 
22 #include <linux/bottom_half.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/jiffies.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/init.h>
34 #include <linux/jhash.h>
35 #include <linux/ipsec.h>
36 #include <linux/times.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/indirect_call_wrapper.h>
43 
44 #include <net/tcp.h>
45 #include <net/ndisc.h>
46 #include <net/inet6_hashtables.h>
47 #include <net/inet6_connection_sock.h>
48 #include <net/ipv6.h>
49 #include <net/transp_v6.h>
50 #include <net/addrconf.h>
51 #include <net/ip6_route.h>
52 #include <net/ip6_checksum.h>
53 #include <net/inet_ecn.h>
54 #include <net/protocol.h>
55 #include <net/xfrm.h>
56 #include <net/snmp.h>
57 #include <net/dsfield.h>
58 #include <net/timewait_sock.h>
59 #include <net/inet_common.h>
60 #include <net/secure_seq.h>
61 #include <net/hotdata.h>
62 #include <net/busy_poll.h>
63 #include <net/rstreason.h>
64 
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
67 
68 #include <crypto/hash.h>
69 #include <linux/scatterlist.h>
70 
71 #include <trace/events/tcp.h>
72 
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
74 			      enum sk_rst_reason reason);
75 static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 				      struct request_sock *req);
77 
78 INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 const struct inet_connection_sock_af_ops ipv6_specific;
82 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #endif
86 
87 /* Helper returning the inet6 address from a given tcp socket.
88  * It can be used in TCP stack instead of inet6_sk(sk).
89  * This avoids a dereference and allow compiler optimizations.
90  * It is a specialized version of inet6_sk_generic().
91  */
92 #define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \
93 					      struct tcp6_sock, tcp)->inet6)
94 
95 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
96 {
97 	struct dst_entry *dst = skb_dst(skb);
98 
99 	if (dst && dst_hold_safe(dst)) {
100 		rcu_assign_pointer(sk->sk_rx_dst, dst);
101 		sk->sk_rx_dst_ifindex = skb->skb_iif;
102 		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
103 	}
104 }
105 
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108 	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 				ipv6_hdr(skb)->saddr.s6_addr32,
110 				tcp_hdr(skb)->dest,
111 				tcp_hdr(skb)->source);
112 }
113 
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116 	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 				   ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119 
120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121 			      int addr_len)
122 {
123 	/* This check is replicated from tcp_v6_connect() and intended to
124 	 * prevent BPF program called below from accessing bytes that are out
125 	 * of the bound specified by user in addr_len.
126 	 */
127 	if (addr_len < SIN6_LEN_RFC2133)
128 		return -EINVAL;
129 
130 	sock_owned_by_me(sk);
131 
132 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len);
133 }
134 
135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
136 			  int addr_len)
137 {
138 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139 	struct inet_connection_sock *icsk = inet_csk(sk);
140 	struct in6_addr *saddr = NULL, *final_p, final;
141 	struct inet_timewait_death_row *tcp_death_row;
142 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
143 	struct inet_sock *inet = inet_sk(sk);
144 	struct tcp_sock *tp = tcp_sk(sk);
145 	struct net *net = sock_net(sk);
146 	struct ipv6_txoptions *opt;
147 	struct dst_entry *dst;
148 	struct flowi6 fl6;
149 	int addr_type;
150 	int err;
151 
152 	if (addr_len < SIN6_LEN_RFC2133)
153 		return -EINVAL;
154 
155 	if (usin->sin6_family != AF_INET6)
156 		return -EAFNOSUPPORT;
157 
158 	memset(&fl6, 0, sizeof(fl6));
159 
160 	if (inet6_test_bit(SNDFLOW, sk)) {
161 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
162 		IP6_ECN_flow_init(fl6.flowlabel);
163 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
164 			struct ip6_flowlabel *flowlabel;
165 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
166 			if (IS_ERR(flowlabel))
167 				return -EINVAL;
168 			fl6_sock_release(flowlabel);
169 		}
170 	}
171 
172 	/*
173 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
174 	 */
175 
176 	if (ipv6_addr_any(&usin->sin6_addr)) {
177 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
178 			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
179 					       &usin->sin6_addr);
180 		else
181 			usin->sin6_addr = in6addr_loopback;
182 	}
183 
184 	addr_type = ipv6_addr_type(&usin->sin6_addr);
185 
186 	if (addr_type & IPV6_ADDR_MULTICAST)
187 		return -ENETUNREACH;
188 
189 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
190 		if (addr_len >= sizeof(struct sockaddr_in6) &&
191 		    usin->sin6_scope_id) {
192 			/* If interface is set while binding, indices
193 			 * must coincide.
194 			 */
195 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
196 				return -EINVAL;
197 
198 			sk->sk_bound_dev_if = usin->sin6_scope_id;
199 		}
200 
201 		/* Connect to link-local address requires an interface */
202 		if (!sk->sk_bound_dev_if)
203 			return -EINVAL;
204 	}
205 
206 	if (tp->rx_opt.ts_recent_stamp &&
207 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
208 		tp->rx_opt.ts_recent = 0;
209 		tp->rx_opt.ts_recent_stamp = 0;
210 		WRITE_ONCE(tp->write_seq, 0);
211 	}
212 
213 	sk->sk_v6_daddr = usin->sin6_addr;
214 	np->flow_label = fl6.flowlabel;
215 
216 	/*
217 	 *	TCP over IPv4
218 	 */
219 
220 	if (addr_type & IPV6_ADDR_MAPPED) {
221 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
222 		struct sockaddr_in sin;
223 
224 		if (ipv6_only_sock(sk))
225 			return -ENETUNREACH;
226 
227 		sin.sin_family = AF_INET;
228 		sin.sin_port = usin->sin6_port;
229 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
230 
231 		/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
232 		WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
233 		if (sk_is_mptcp(sk))
234 			mptcpv6_handle_mapped(sk, true);
235 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
236 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
237 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
238 #endif
239 
240 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
241 
242 		if (err) {
243 			icsk->icsk_ext_hdr_len = exthdrlen;
244 			/* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
245 			WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
246 			if (sk_is_mptcp(sk))
247 				mptcpv6_handle_mapped(sk, false);
248 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
249 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
250 			tp->af_specific = &tcp_sock_ipv6_specific;
251 #endif
252 			goto failure;
253 		}
254 		np->saddr = sk->sk_v6_rcv_saddr;
255 
256 		return err;
257 	}
258 
259 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
260 		saddr = &sk->sk_v6_rcv_saddr;
261 
262 	fl6.flowi6_proto = IPPROTO_TCP;
263 	fl6.daddr = sk->sk_v6_daddr;
264 	fl6.saddr = saddr ? *saddr : np->saddr;
265 	fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
266 	fl6.flowi6_oif = sk->sk_bound_dev_if;
267 	fl6.flowi6_mark = sk->sk_mark;
268 	fl6.fl6_dport = usin->sin6_port;
269 	fl6.fl6_sport = inet->inet_sport;
270 	if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !fl6.fl6_sport)
271 		fl6.flowi6_flags = FLOWI_FLAG_ANY_SPORT;
272 	fl6.flowi6_uid = sk->sk_uid;
273 
274 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
275 	final_p = fl6_update_dst(&fl6, opt, &final);
276 
277 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
278 
279 	dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
280 	if (IS_ERR(dst)) {
281 		err = PTR_ERR(dst);
282 		goto failure;
283 	}
284 
285 	tp->tcp_usec_ts = dst_tcp_usec_ts(dst);
286 	tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
287 
288 	if (!saddr) {
289 		saddr = &fl6.saddr;
290 
291 		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
292 		if (err)
293 			goto failure;
294 	}
295 
296 	/* set the source address */
297 	np->saddr = *saddr;
298 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
299 
300 	sk->sk_gso_type = SKB_GSO_TCPV6;
301 	ip6_dst_store(sk, dst, NULL, NULL);
302 
303 	icsk->icsk_ext_hdr_len = 0;
304 	if (opt)
305 		icsk->icsk_ext_hdr_len = opt->opt_flen +
306 					 opt->opt_nflen;
307 
308 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
309 
310 	inet->inet_dport = usin->sin6_port;
311 
312 	tcp_set_state(sk, TCP_SYN_SENT);
313 	err = inet6_hash_connect(tcp_death_row, sk);
314 	if (err)
315 		goto late_failure;
316 
317 	sk_set_txhash(sk);
318 
319 	if (likely(!tp->repair)) {
320 		if (!tp->write_seq)
321 			WRITE_ONCE(tp->write_seq,
322 				   secure_tcpv6_seq(np->saddr.s6_addr32,
323 						    sk->sk_v6_daddr.s6_addr32,
324 						    inet->inet_sport,
325 						    inet->inet_dport));
326 		tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
327 						   sk->sk_v6_daddr.s6_addr32);
328 	}
329 
330 	if (tcp_fastopen_defer_connect(sk, &err))
331 		return err;
332 	if (err)
333 		goto late_failure;
334 
335 	err = tcp_connect(sk);
336 	if (err)
337 		goto late_failure;
338 
339 	return 0;
340 
341 late_failure:
342 	tcp_set_state(sk, TCP_CLOSE);
343 	inet_bhash2_reset_saddr(sk);
344 failure:
345 	inet->inet_dport = 0;
346 	sk->sk_route_caps = 0;
347 	return err;
348 }
349 
350 static void tcp_v6_mtu_reduced(struct sock *sk)
351 {
352 	struct dst_entry *dst;
353 	u32 mtu;
354 
355 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
356 		return;
357 
358 	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
359 
360 	/* Drop requests trying to increase our current mss.
361 	 * Check done in __ip6_rt_update_pmtu() is too late.
362 	 */
363 	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
364 		return;
365 
366 	dst = inet6_csk_update_pmtu(sk, mtu);
367 	if (!dst)
368 		return;
369 
370 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
371 		tcp_sync_mss(sk, dst_mtu(dst));
372 		tcp_simple_retransmit(sk);
373 	}
374 }
375 
376 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
377 		u8 type, u8 code, int offset, __be32 info)
378 {
379 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
380 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
381 	struct net *net = dev_net_rcu(skb->dev);
382 	struct request_sock *fastopen;
383 	struct ipv6_pinfo *np;
384 	struct tcp_sock *tp;
385 	__u32 seq, snd_una;
386 	struct sock *sk;
387 	bool fatal;
388 	int err;
389 
390 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
391 					&hdr->daddr, th->dest,
392 					&hdr->saddr, ntohs(th->source),
393 					skb->dev->ifindex, inet6_sdif(skb));
394 
395 	if (!sk) {
396 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
397 				  ICMP6_MIB_INERRORS);
398 		return -ENOENT;
399 	}
400 
401 	if (sk->sk_state == TCP_TIME_WAIT) {
402 		/* To increase the counter of ignored icmps for TCP-AO */
403 		tcp_ao_ignore_icmp(sk, AF_INET6, type, code);
404 		inet_twsk_put(inet_twsk(sk));
405 		return 0;
406 	}
407 	seq = ntohl(th->seq);
408 	fatal = icmpv6_err_convert(type, code, &err);
409 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
410 		tcp_req_err(sk, seq, fatal);
411 		return 0;
412 	}
413 
414 	if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) {
415 		sock_put(sk);
416 		return 0;
417 	}
418 
419 	bh_lock_sock(sk);
420 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
421 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
422 
423 	if (sk->sk_state == TCP_CLOSE)
424 		goto out;
425 
426 	if (static_branch_unlikely(&ip6_min_hopcount)) {
427 		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
428 		if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
429 			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
430 			goto out;
431 		}
432 	}
433 
434 	tp = tcp_sk(sk);
435 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
436 	fastopen = rcu_dereference(tp->fastopen_rsk);
437 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
438 	if (sk->sk_state != TCP_LISTEN &&
439 	    !between(seq, snd_una, tp->snd_nxt)) {
440 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
441 		goto out;
442 	}
443 
444 	np = tcp_inet6_sk(sk);
445 
446 	if (type == NDISC_REDIRECT) {
447 		if (!sock_owned_by_user(sk)) {
448 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
449 
450 			if (dst)
451 				dst->ops->redirect(dst, sk, skb);
452 		}
453 		goto out;
454 	}
455 
456 	if (type == ICMPV6_PKT_TOOBIG) {
457 		u32 mtu = ntohl(info);
458 
459 		/* We are not interested in TCP_LISTEN and open_requests
460 		 * (SYN-ACKs send out by Linux are always <576bytes so
461 		 * they should go through unfragmented).
462 		 */
463 		if (sk->sk_state == TCP_LISTEN)
464 			goto out;
465 
466 		if (!ip6_sk_accept_pmtu(sk))
467 			goto out;
468 
469 		if (mtu < IPV6_MIN_MTU)
470 			goto out;
471 
472 		WRITE_ONCE(tp->mtu_info, mtu);
473 
474 		if (!sock_owned_by_user(sk))
475 			tcp_v6_mtu_reduced(sk);
476 		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
477 					   &sk->sk_tsq_flags))
478 			sock_hold(sk);
479 		goto out;
480 	}
481 
482 
483 	/* Might be for an request_sock */
484 	switch (sk->sk_state) {
485 	case TCP_SYN_SENT:
486 	case TCP_SYN_RECV:
487 		/* Only in fast or simultaneous open. If a fast open socket is
488 		 * already accepted it is treated as a connected one below.
489 		 */
490 		if (fastopen && !fastopen->sk)
491 			break;
492 
493 		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
494 
495 		if (!sock_owned_by_user(sk))
496 			tcp_done_with_error(sk, err);
497 		else
498 			WRITE_ONCE(sk->sk_err_soft, err);
499 		goto out;
500 	case TCP_LISTEN:
501 		break;
502 	default:
503 		/* check if this ICMP message allows revert of backoff.
504 		 * (see RFC 6069)
505 		 */
506 		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
507 		    code == ICMPV6_NOROUTE)
508 			tcp_ld_RTO_revert(sk, seq);
509 	}
510 
511 	if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
512 		WRITE_ONCE(sk->sk_err, err);
513 		sk_error_report(sk);
514 	} else {
515 		WRITE_ONCE(sk->sk_err_soft, err);
516 	}
517 out:
518 	bh_unlock_sock(sk);
519 	sock_put(sk);
520 	return 0;
521 }
522 
523 
524 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
525 			      struct flowi *fl,
526 			      struct request_sock *req,
527 			      struct tcp_fastopen_cookie *foc,
528 			      enum tcp_synack_type synack_type,
529 			      struct sk_buff *syn_skb)
530 {
531 	struct inet_request_sock *ireq = inet_rsk(req);
532 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
533 	struct ipv6_txoptions *opt;
534 	struct flowi6 *fl6 = &fl->u.ip6;
535 	struct sk_buff *skb;
536 	int err = -ENOMEM;
537 	u8 tclass;
538 
539 	/* First, grab a route. */
540 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
541 					       IPPROTO_TCP)) == NULL)
542 		goto done;
543 
544 	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
545 
546 	if (skb) {
547 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
548 				    &ireq->ir_v6_rmt_addr);
549 
550 		fl6->daddr = ireq->ir_v6_rmt_addr;
551 		if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts)
552 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
553 
554 		tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
555 				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
556 				(np->tclass & INET_ECN_MASK) :
557 				np->tclass;
558 
559 		if (!INET_ECN_is_capable(tclass) &&
560 		    tcp_bpf_ca_needs_ecn((struct sock *)req))
561 			tclass |= INET_ECN_ECT_0;
562 
563 		rcu_read_lock();
564 		opt = ireq->ipv6_opt;
565 		if (!opt)
566 			opt = rcu_dereference(np->opt);
567 		err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
568 			       opt, tclass, READ_ONCE(sk->sk_priority));
569 		rcu_read_unlock();
570 		err = net_xmit_eval(err);
571 	}
572 
573 done:
574 	return err;
575 }
576 
577 
578 static void tcp_v6_reqsk_destructor(struct request_sock *req)
579 {
580 	kfree(inet_rsk(req)->ipv6_opt);
581 	consume_skb(inet_rsk(req)->pktopts);
582 }
583 
584 #ifdef CONFIG_TCP_MD5SIG
585 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
586 						   const struct in6_addr *addr,
587 						   int l3index)
588 {
589 	return tcp_md5_do_lookup(sk, l3index,
590 				 (union tcp_md5_addr *)addr, AF_INET6);
591 }
592 
593 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
594 						const struct sock *addr_sk)
595 {
596 	int l3index;
597 
598 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
599 						 addr_sk->sk_bound_dev_if);
600 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
601 				    l3index);
602 }
603 
604 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
605 				 sockptr_t optval, int optlen)
606 {
607 	struct tcp_md5sig cmd;
608 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
609 	union tcp_ao_addr *addr;
610 	int l3index = 0;
611 	u8 prefixlen;
612 	bool l3flag;
613 	u8 flags;
614 
615 	if (optlen < sizeof(cmd))
616 		return -EINVAL;
617 
618 	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
619 		return -EFAULT;
620 
621 	if (sin6->sin6_family != AF_INET6)
622 		return -EINVAL;
623 
624 	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
625 	l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
626 
627 	if (optname == TCP_MD5SIG_EXT &&
628 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
629 		prefixlen = cmd.tcpm_prefixlen;
630 		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
631 					prefixlen > 32))
632 			return -EINVAL;
633 	} else {
634 		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
635 	}
636 
637 	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
638 	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
639 		struct net_device *dev;
640 
641 		rcu_read_lock();
642 		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
643 		if (dev && netif_is_l3_master(dev))
644 			l3index = dev->ifindex;
645 		rcu_read_unlock();
646 
647 		/* ok to reference set/not set outside of rcu;
648 		 * right now device MUST be an L3 master
649 		 */
650 		if (!dev || !l3index)
651 			return -EINVAL;
652 	}
653 
654 	if (!cmd.tcpm_keylen) {
655 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
656 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
657 					      AF_INET, prefixlen,
658 					      l3index, flags);
659 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
660 				      AF_INET6, prefixlen, l3index, flags);
661 	}
662 
663 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
664 		return -EINVAL;
665 
666 	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
667 		addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3];
668 
669 		/* Don't allow keys for peers that have a matching TCP-AO key.
670 		 * See the comment in tcp_ao_add_cmd()
671 		 */
672 		if (tcp_ao_required(sk, addr, AF_INET,
673 				    l3flag ? l3index : -1, false))
674 			return -EKEYREJECTED;
675 		return tcp_md5_do_add(sk, addr,
676 				      AF_INET, prefixlen, l3index, flags,
677 				      cmd.tcpm_key, cmd.tcpm_keylen);
678 	}
679 
680 	addr = (union tcp_md5_addr *)&sin6->sin6_addr;
681 
682 	/* Don't allow keys for peers that have a matching TCP-AO key.
683 	 * See the comment in tcp_ao_add_cmd()
684 	 */
685 	if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false))
686 		return -EKEYREJECTED;
687 
688 	return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags,
689 			      cmd.tcpm_key, cmd.tcpm_keylen);
690 }
691 
692 static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp,
693 				   const struct in6_addr *daddr,
694 				   const struct in6_addr *saddr,
695 				   const struct tcphdr *th, int nbytes)
696 {
697 	struct tcp6_pseudohdr *bp;
698 	struct scatterlist sg;
699 	struct tcphdr *_th;
700 
701 	bp = hp->scratch;
702 	/* 1. TCP pseudo-header (RFC2460) */
703 	bp->saddr = *saddr;
704 	bp->daddr = *daddr;
705 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
706 	bp->len = cpu_to_be32(nbytes);
707 
708 	_th = (struct tcphdr *)(bp + 1);
709 	memcpy(_th, th, sizeof(*th));
710 	_th->check = 0;
711 
712 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
713 	ahash_request_set_crypt(hp->req, &sg, NULL,
714 				sizeof(*bp) + sizeof(*th));
715 	return crypto_ahash_update(hp->req);
716 }
717 
718 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
719 			       const struct in6_addr *daddr, struct in6_addr *saddr,
720 			       const struct tcphdr *th)
721 {
722 	struct tcp_sigpool hp;
723 
724 	if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
725 		goto clear_hash_nostart;
726 
727 	if (crypto_ahash_init(hp.req))
728 		goto clear_hash;
729 	if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
730 		goto clear_hash;
731 	if (tcp_md5_hash_key(&hp, key))
732 		goto clear_hash;
733 	ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
734 	if (crypto_ahash_final(hp.req))
735 		goto clear_hash;
736 
737 	tcp_sigpool_end(&hp);
738 	return 0;
739 
740 clear_hash:
741 	tcp_sigpool_end(&hp);
742 clear_hash_nostart:
743 	memset(md5_hash, 0, 16);
744 	return 1;
745 }
746 
747 static int tcp_v6_md5_hash_skb(char *md5_hash,
748 			       const struct tcp_md5sig_key *key,
749 			       const struct sock *sk,
750 			       const struct sk_buff *skb)
751 {
752 	const struct tcphdr *th = tcp_hdr(skb);
753 	const struct in6_addr *saddr, *daddr;
754 	struct tcp_sigpool hp;
755 
756 	if (sk) { /* valid for establish/request sockets */
757 		saddr = &sk->sk_v6_rcv_saddr;
758 		daddr = &sk->sk_v6_daddr;
759 	} else {
760 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
761 		saddr = &ip6h->saddr;
762 		daddr = &ip6h->daddr;
763 	}
764 
765 	if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
766 		goto clear_hash_nostart;
767 
768 	if (crypto_ahash_init(hp.req))
769 		goto clear_hash;
770 
771 	if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
772 		goto clear_hash;
773 	if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
774 		goto clear_hash;
775 	if (tcp_md5_hash_key(&hp, key))
776 		goto clear_hash;
777 	ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
778 	if (crypto_ahash_final(hp.req))
779 		goto clear_hash;
780 
781 	tcp_sigpool_end(&hp);
782 	return 0;
783 
784 clear_hash:
785 	tcp_sigpool_end(&hp);
786 clear_hash_nostart:
787 	memset(md5_hash, 0, 16);
788 	return 1;
789 }
790 #endif
791 
792 static void tcp_v6_init_req(struct request_sock *req,
793 			    const struct sock *sk_listener,
794 			    struct sk_buff *skb,
795 			    u32 tw_isn)
796 {
797 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
798 	struct inet_request_sock *ireq = inet_rsk(req);
799 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
800 
801 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
802 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
803 	ireq->ir_rmt_addr = LOOPBACK4_IPV6;
804 	ireq->ir_loc_addr = LOOPBACK4_IPV6;
805 
806 	/* So that link locals have meaning */
807 	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
808 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
809 		ireq->ir_iif = tcp_v6_iif(skb);
810 
811 	if (!tw_isn &&
812 	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
813 	     np->rxopt.bits.rxinfo ||
814 	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
815 	     np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) {
816 		refcount_inc(&skb->users);
817 		ireq->pktopts = skb;
818 	}
819 }
820 
821 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
822 					  struct sk_buff *skb,
823 					  struct flowi *fl,
824 					  struct request_sock *req,
825 					  u32 tw_isn)
826 {
827 	tcp_v6_init_req(req, sk, skb, tw_isn);
828 
829 	if (security_inet_conn_request(sk, skb, req))
830 		return NULL;
831 
832 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
833 }
834 
835 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
836 	.family		=	AF_INET6,
837 	.obj_size	=	sizeof(struct tcp6_request_sock),
838 	.rtx_syn_ack	=	tcp_rtx_synack,
839 	.send_ack	=	tcp_v6_reqsk_send_ack,
840 	.destructor	=	tcp_v6_reqsk_destructor,
841 	.send_reset	=	tcp_v6_send_reset,
842 	.syn_ack_timeout =	tcp_syn_ack_timeout,
843 };
844 
845 const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
846 	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
847 				sizeof(struct ipv6hdr),
848 #ifdef CONFIG_TCP_MD5SIG
849 	.req_md5_lookup	=	tcp_v6_md5_lookup,
850 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
851 #endif
852 #ifdef CONFIG_TCP_AO
853 	.ao_lookup	=	tcp_v6_ao_lookup_rsk,
854 	.ao_calc_key	=	tcp_v6_ao_calc_key_rsk,
855 	.ao_synack_hash =	tcp_v6_ao_synack_hash,
856 #endif
857 #ifdef CONFIG_SYN_COOKIES
858 	.cookie_init_seq =	cookie_v6_init_sequence,
859 #endif
860 	.route_req	=	tcp_v6_route_req,
861 	.init_seq	=	tcp_v6_init_seq,
862 	.init_ts_off	=	tcp_v6_init_ts_off,
863 	.send_synack	=	tcp_v6_send_synack,
864 };
865 
866 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
867 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
868 				 int oif, int rst, u8 tclass, __be32 label,
869 				 u32 priority, u32 txhash, struct tcp_key *key)
870 {
871 	struct net *net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
872 	unsigned int tot_len = sizeof(struct tcphdr);
873 	struct sock *ctl_sk = net->ipv6.tcp_sk;
874 	const struct tcphdr *th = tcp_hdr(skb);
875 	__be32 mrst = 0, *topt;
876 	struct dst_entry *dst;
877 	struct sk_buff *buff;
878 	struct tcphdr *t1;
879 	struct flowi6 fl6;
880 	u32 mark = 0;
881 
882 	if (tsecr)
883 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
884 	if (tcp_key_is_md5(key))
885 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
886 	if (tcp_key_is_ao(key))
887 		tot_len += tcp_ao_len_aligned(key->ao_key);
888 
889 #ifdef CONFIG_MPTCP
890 	if (rst && !tcp_key_is_md5(key)) {
891 		mrst = mptcp_reset_option(skb);
892 
893 		if (mrst)
894 			tot_len += sizeof(__be32);
895 	}
896 #endif
897 
898 	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
899 	if (!buff)
900 		return;
901 
902 	skb_reserve(buff, MAX_TCP_HEADER);
903 
904 	t1 = skb_push(buff, tot_len);
905 	skb_reset_transport_header(buff);
906 
907 	/* Swap the send and the receive. */
908 	memset(t1, 0, sizeof(*t1));
909 	t1->dest = th->source;
910 	t1->source = th->dest;
911 	t1->doff = tot_len / 4;
912 	t1->seq = htonl(seq);
913 	t1->ack_seq = htonl(ack);
914 	t1->ack = !rst || !th->ack;
915 	t1->rst = rst;
916 	t1->window = htons(win);
917 
918 	topt = (__be32 *)(t1 + 1);
919 
920 	if (tsecr) {
921 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
922 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
923 		*topt++ = htonl(tsval);
924 		*topt++ = htonl(tsecr);
925 	}
926 
927 	if (mrst)
928 		*topt++ = mrst;
929 
930 #ifdef CONFIG_TCP_MD5SIG
931 	if (tcp_key_is_md5(key)) {
932 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
933 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
934 		tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key,
935 				    &ipv6_hdr(skb)->saddr,
936 				    &ipv6_hdr(skb)->daddr, t1);
937 	}
938 #endif
939 #ifdef CONFIG_TCP_AO
940 	if (tcp_key_is_ao(key)) {
941 		*topt++ = htonl((TCPOPT_AO << 24) |
942 				(tcp_ao_len(key->ao_key) << 16) |
943 				(key->ao_key->sndid << 8) |
944 				(key->rcv_next));
945 
946 		tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key,
947 				key->traffic_key,
948 				(union tcp_ao_addr *)&ipv6_hdr(skb)->saddr,
949 				(union tcp_ao_addr *)&ipv6_hdr(skb)->daddr,
950 				t1, key->sne);
951 	}
952 #endif
953 
954 	memset(&fl6, 0, sizeof(fl6));
955 	fl6.daddr = ipv6_hdr(skb)->saddr;
956 	fl6.saddr = ipv6_hdr(skb)->daddr;
957 	fl6.flowlabel = label;
958 
959 	buff->ip_summed = CHECKSUM_PARTIAL;
960 
961 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
962 
963 	fl6.flowi6_proto = IPPROTO_TCP;
964 	if (rt6_need_strict(&fl6.daddr) && !oif)
965 		fl6.flowi6_oif = tcp_v6_iif(skb);
966 	else {
967 		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
968 			oif = skb->skb_iif;
969 
970 		fl6.flowi6_oif = oif;
971 	}
972 
973 	if (sk) {
974 		/* unconstify the socket only to attach it to buff with care. */
975 		skb_set_owner_edemux(buff, (struct sock *)sk);
976 
977 		if (sk->sk_state == TCP_TIME_WAIT)
978 			mark = inet_twsk(sk)->tw_mark;
979 		else
980 			mark = READ_ONCE(sk->sk_mark);
981 		skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC);
982 	}
983 	if (txhash) {
984 		/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
985 		skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
986 	}
987 	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
988 	fl6.fl6_dport = t1->dest;
989 	fl6.fl6_sport = t1->source;
990 	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
991 	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
992 
993 	/* Pass a socket to ip6_dst_lookup either it is for RST
994 	 * Underlying function will use this to retrieve the network
995 	 * namespace
996 	 */
997 	if (sk && sk->sk_state != TCP_TIME_WAIT)
998 		dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
999 	else
1000 		dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
1001 	if (!IS_ERR(dst)) {
1002 		skb_dst_set(buff, dst);
1003 		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
1004 			 tclass, priority);
1005 		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1006 		if (rst)
1007 			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1008 		return;
1009 	}
1010 
1011 	kfree_skb(buff);
1012 }
1013 
1014 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
1015 			      enum sk_rst_reason reason)
1016 {
1017 	const struct tcphdr *th = tcp_hdr(skb);
1018 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1019 	const __u8 *md5_hash_location = NULL;
1020 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1021 	bool allocated_traffic_key = false;
1022 #endif
1023 	const struct tcp_ao_hdr *aoh;
1024 	struct tcp_key key = {};
1025 	u32 seq = 0, ack_seq = 0;
1026 	__be32 label = 0;
1027 	u32 priority = 0;
1028 	struct net *net;
1029 	u32 txhash = 0;
1030 	int oif = 0;
1031 #ifdef CONFIG_TCP_MD5SIG
1032 	unsigned char newhash[16];
1033 	int genhash;
1034 	struct sock *sk1 = NULL;
1035 #endif
1036 
1037 	if (th->rst)
1038 		return;
1039 
1040 	/* If sk not NULL, it means we did a successful lookup and incoming
1041 	 * route had to be correct. prequeue might have dropped our dst.
1042 	 */
1043 	if (!sk && !ipv6_unicast_destination(skb))
1044 		return;
1045 
1046 	net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev);
1047 	/* Invalid TCP option size or twice included auth */
1048 	if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
1049 		return;
1050 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1051 	rcu_read_lock();
1052 #endif
1053 #ifdef CONFIG_TCP_MD5SIG
1054 	if (sk && sk_fullsock(sk)) {
1055 		int l3index;
1056 
1057 		/* sdif set, means packet ingressed via a device
1058 		 * in an L3 domain and inet_iif is set to it.
1059 		 */
1060 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1061 		key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1062 		if (key.md5_key)
1063 			key.type = TCP_KEY_MD5;
1064 	} else if (md5_hash_location) {
1065 		int dif = tcp_v6_iif_l3_slave(skb);
1066 		int sdif = tcp_v6_sdif(skb);
1067 		int l3index;
1068 
1069 		/*
1070 		 * active side is lost. Try to find listening socket through
1071 		 * source port, and then find md5 key through listening socket.
1072 		 * we are not loose security here:
1073 		 * Incoming packet is checked with md5 hash with finding key,
1074 		 * no RST generated if md5 hash doesn't match.
1075 		 */
1076 		sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1077 					    NULL, 0, &ipv6h->saddr, th->source,
1078 					    &ipv6h->daddr, ntohs(th->source),
1079 					    dif, sdif);
1080 		if (!sk1)
1081 			goto out;
1082 
1083 		/* sdif set, means packet ingressed via a device
1084 		 * in an L3 domain and dif is set to it.
1085 		 */
1086 		l3index = tcp_v6_sdif(skb) ? dif : 0;
1087 
1088 		key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1089 		if (!key.md5_key)
1090 			goto out;
1091 		key.type = TCP_KEY_MD5;
1092 
1093 		genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
1094 		if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
1095 			goto out;
1096 	}
1097 #endif
1098 
1099 	if (th->ack)
1100 		seq = ntohl(th->ack_seq);
1101 	else
1102 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1103 			  (th->doff << 2);
1104 
1105 #ifdef CONFIG_TCP_AO
1106 	if (aoh) {
1107 		int l3index;
1108 
1109 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1110 		if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq,
1111 					 &key.ao_key, &key.traffic_key,
1112 					 &allocated_traffic_key,
1113 					 &key.rcv_next, &key.sne))
1114 			goto out;
1115 		key.type = TCP_KEY_AO;
1116 	}
1117 #endif
1118 
1119 	if (sk) {
1120 		oif = sk->sk_bound_dev_if;
1121 		if (sk_fullsock(sk)) {
1122 			if (inet6_test_bit(REPFLOW, sk))
1123 				label = ip6_flowlabel(ipv6h);
1124 			priority = READ_ONCE(sk->sk_priority);
1125 			txhash = sk->sk_txhash;
1126 		}
1127 		if (sk->sk_state == TCP_TIME_WAIT) {
1128 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1129 			priority = inet_twsk(sk)->tw_priority;
1130 			txhash = inet_twsk(sk)->tw_txhash;
1131 		}
1132 	} else {
1133 		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1134 			label = ip6_flowlabel(ipv6h);
1135 	}
1136 
1137 	trace_tcp_send_reset(sk, skb, reason);
1138 
1139 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
1140 			     ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK,
1141 			     label, priority, txhash,
1142 			     &key);
1143 
1144 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1145 out:
1146 	if (allocated_traffic_key)
1147 		kfree(key.traffic_key);
1148 	rcu_read_unlock();
1149 #endif
1150 }
1151 
1152 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1153 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1154 			    struct tcp_key *key, u8 tclass,
1155 			    __be32 label, u32 priority, u32 txhash)
1156 {
1157 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
1158 			     tclass, label, priority, txhash, key);
1159 }
1160 
1161 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb,
1162 				enum tcp_tw_status tw_status)
1163 {
1164 	struct inet_timewait_sock *tw = inet_twsk(sk);
1165 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1166 	u8 tclass = tw->tw_tclass;
1167 	struct tcp_key key = {};
1168 
1169 	if (tw_status == TCP_TW_ACK_OOW)
1170 		tclass &= ~INET_ECN_MASK;
1171 #ifdef CONFIG_TCP_AO
1172 	struct tcp_ao_info *ao_info;
1173 
1174 	if (static_branch_unlikely(&tcp_ao_needed.key)) {
1175 
1176 		/* FIXME: the segment to-be-acked is not verified yet */
1177 		ao_info = rcu_dereference(tcptw->ao_info);
1178 		if (ao_info) {
1179 			const struct tcp_ao_hdr *aoh;
1180 
1181 			/* Invalid TCP option size or twice included auth */
1182 			if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1183 				goto out;
1184 			if (aoh)
1185 				key.ao_key = tcp_ao_established_key(sk, ao_info,
1186 								    aoh->rnext_keyid, -1);
1187 		}
1188 	}
1189 	if (key.ao_key) {
1190 		struct tcp_ao_key *rnext_key;
1191 
1192 		key.traffic_key = snd_other_key(key.ao_key);
1193 		/* rcv_next switches to our rcv_next */
1194 		rnext_key = READ_ONCE(ao_info->rnext_key);
1195 		key.rcv_next = rnext_key->rcvid;
1196 		key.sne = READ_ONCE(ao_info->snd_sne);
1197 		key.type = TCP_KEY_AO;
1198 #else
1199 	if (0) {
1200 #endif
1201 #ifdef CONFIG_TCP_MD5SIG
1202 	} else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1203 		key.md5_key = tcp_twsk_md5_key(tcptw);
1204 		if (key.md5_key)
1205 			key.type = TCP_KEY_MD5;
1206 #endif
1207 	}
1208 
1209 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt,
1210 			READ_ONCE(tcptw->tw_rcv_nxt),
1211 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1212 			tcp_tw_tsval(tcptw),
1213 			READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if,
1214 			&key, tclass, cpu_to_be32(tw->tw_flowlabel),
1215 			tw->tw_priority, tw->tw_txhash);
1216 
1217 #ifdef CONFIG_TCP_AO
1218 out:
1219 #endif
1220 	inet_twsk_put(tw);
1221 }
1222 
1223 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1224 				  struct request_sock *req)
1225 {
1226 	struct tcp_key key = {};
1227 
1228 #ifdef CONFIG_TCP_AO
1229 	if (static_branch_unlikely(&tcp_ao_needed.key) &&
1230 	    tcp_rsk_used_ao(req)) {
1231 		const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
1232 		const struct tcp_ao_hdr *aoh;
1233 		int l3index;
1234 
1235 		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1236 		/* Invalid TCP option size or twice included auth */
1237 		if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1238 			return;
1239 		if (!aoh)
1240 			return;
1241 		key.ao_key = tcp_ao_do_lookup(sk, l3index,
1242 					      (union tcp_ao_addr *)addr,
1243 					      AF_INET6, aoh->rnext_keyid, -1);
1244 		if (unlikely(!key.ao_key)) {
1245 			/* Send ACK with any matching MKT for the peer */
1246 			key.ao_key = tcp_ao_do_lookup(sk, l3index,
1247 						      (union tcp_ao_addr *)addr,
1248 						      AF_INET6, -1, -1);
1249 			/* Matching key disappeared (user removed the key?)
1250 			 * let the handshake timeout.
1251 			 */
1252 			if (!key.ao_key) {
1253 				net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n",
1254 						     addr,
1255 						     ntohs(tcp_hdr(skb)->source),
1256 						     &ipv6_hdr(skb)->daddr,
1257 						     ntohs(tcp_hdr(skb)->dest));
1258 				return;
1259 			}
1260 		}
1261 		key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1262 		if (!key.traffic_key)
1263 			return;
1264 
1265 		key.type = TCP_KEY_AO;
1266 		key.rcv_next = aoh->keyid;
1267 		tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1268 #else
1269 	if (0) {
1270 #endif
1271 #ifdef CONFIG_TCP_MD5SIG
1272 	} else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1273 		int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1274 
1275 		key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
1276 						   l3index);
1277 		if (key.md5_key)
1278 			key.type = TCP_KEY_MD5;
1279 #endif
1280 	}
1281 
1282 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1283 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1284 	 */
1285 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1286 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1287 			tcp_rsk(req)->rcv_nxt,
1288 			tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
1289 			tcp_rsk_tsval(tcp_rsk(req)),
1290 			req->ts_recent, sk->sk_bound_dev_if,
1291 			&key, ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK,
1292 			0,
1293 			READ_ONCE(sk->sk_priority),
1294 			READ_ONCE(tcp_rsk(req)->txhash));
1295 	if (tcp_key_is_ao(&key))
1296 		kfree(key.traffic_key);
1297 }
1298 
1299 
1300 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1301 {
1302 #ifdef CONFIG_SYN_COOKIES
1303 	const struct tcphdr *th = tcp_hdr(skb);
1304 
1305 	if (!th->syn)
1306 		sk = cookie_v6_check(sk, skb);
1307 #endif
1308 	return sk;
1309 }
1310 
1311 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1312 			 struct tcphdr *th, u32 *cookie)
1313 {
1314 	u16 mss = 0;
1315 #ifdef CONFIG_SYN_COOKIES
1316 	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1317 				    &tcp_request_sock_ipv6_ops, sk, th);
1318 	if (mss) {
1319 		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
1320 		tcp_synq_overflow(sk);
1321 	}
1322 #endif
1323 	return mss;
1324 }
1325 
1326 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1327 {
1328 	if (skb->protocol == htons(ETH_P_IP))
1329 		return tcp_v4_conn_request(sk, skb);
1330 
1331 	if (!ipv6_unicast_destination(skb))
1332 		goto drop;
1333 
1334 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1335 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1336 		return 0;
1337 	}
1338 
1339 	return tcp_conn_request(&tcp6_request_sock_ops,
1340 				&tcp_request_sock_ipv6_ops, sk, skb);
1341 
1342 drop:
1343 	tcp_listendrop(sk);
1344 	return 0; /* don't send reset */
1345 }
1346 
1347 static void tcp_v6_restore_cb(struct sk_buff *skb)
1348 {
1349 	/* We need to move header back to the beginning if xfrm6_policy_check()
1350 	 * and tcp_v6_fill_cb() are going to be called again.
1351 	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1352 	 */
1353 	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1354 		sizeof(struct inet6_skb_parm));
1355 }
1356 
1357 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1358 					 struct request_sock *req,
1359 					 struct dst_entry *dst,
1360 					 struct request_sock *req_unhash,
1361 					 bool *own_req)
1362 {
1363 	struct inet_request_sock *ireq;
1364 	struct ipv6_pinfo *newnp;
1365 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1366 	struct ipv6_txoptions *opt;
1367 	struct inet_sock *newinet;
1368 	bool found_dup_sk = false;
1369 	struct tcp_sock *newtp;
1370 	struct sock *newsk;
1371 #ifdef CONFIG_TCP_MD5SIG
1372 	struct tcp_md5sig_key *key;
1373 	int l3index;
1374 #endif
1375 	struct flowi6 fl6;
1376 
1377 	if (skb->protocol == htons(ETH_P_IP)) {
1378 		/*
1379 		 *	v6 mapped
1380 		 */
1381 
1382 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1383 					     req_unhash, own_req);
1384 
1385 		if (!newsk)
1386 			return NULL;
1387 
1388 		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1389 
1390 		newnp = tcp_inet6_sk(newsk);
1391 		newtp = tcp_sk(newsk);
1392 
1393 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1394 
1395 		newnp->saddr = newsk->sk_v6_rcv_saddr;
1396 
1397 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1398 		if (sk_is_mptcp(newsk))
1399 			mptcpv6_handle_mapped(newsk, true);
1400 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1401 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1402 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1403 #endif
1404 
1405 		newnp->ipv6_mc_list = NULL;
1406 		newnp->ipv6_ac_list = NULL;
1407 		newnp->ipv6_fl_list = NULL;
1408 		newnp->pktoptions  = NULL;
1409 		newnp->opt	   = NULL;
1410 		newnp->mcast_oif   = inet_iif(skb);
1411 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
1412 		newnp->rcv_flowinfo = 0;
1413 		if (inet6_test_bit(REPFLOW, sk))
1414 			newnp->flow_label = 0;
1415 
1416 		/*
1417 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1418 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1419 		 * that function for the gory details. -acme
1420 		 */
1421 
1422 		/* It is tricky place. Until this moment IPv4 tcp
1423 		   worked with IPv6 icsk.icsk_af_ops.
1424 		   Sync it now.
1425 		 */
1426 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1427 
1428 		return newsk;
1429 	}
1430 
1431 	ireq = inet_rsk(req);
1432 
1433 	if (sk_acceptq_is_full(sk))
1434 		goto out_overflow;
1435 
1436 	if (!dst) {
1437 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1438 		if (!dst)
1439 			goto out;
1440 	}
1441 
1442 	newsk = tcp_create_openreq_child(sk, req, skb);
1443 	if (!newsk)
1444 		goto out_nonewsk;
1445 
1446 	/*
1447 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1448 	 * count here, tcp_create_openreq_child now does this for us, see the
1449 	 * comment in that function for the gory details. -acme
1450 	 */
1451 
1452 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1453 	inet6_sk_rx_dst_set(newsk, skb);
1454 
1455 	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1456 
1457 	newtp = tcp_sk(newsk);
1458 	newinet = inet_sk(newsk);
1459 	newnp = tcp_inet6_sk(newsk);
1460 
1461 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1462 
1463 	ip6_dst_store(newsk, dst, NULL, NULL);
1464 
1465 	newnp->saddr = ireq->ir_v6_loc_addr;
1466 
1467 	/* Now IPv6 options...
1468 
1469 	   First: no IPv4 options.
1470 	 */
1471 	newinet->inet_opt = NULL;
1472 	newnp->ipv6_mc_list = NULL;
1473 	newnp->ipv6_ac_list = NULL;
1474 	newnp->ipv6_fl_list = NULL;
1475 
1476 	/* Clone RX bits */
1477 	newnp->rxopt.all = np->rxopt.all;
1478 
1479 	newnp->pktoptions = NULL;
1480 	newnp->opt	  = NULL;
1481 	newnp->mcast_oif  = tcp_v6_iif(skb);
1482 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1483 	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1484 	if (inet6_test_bit(REPFLOW, sk))
1485 		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1486 
1487 	/* Set ToS of the new socket based upon the value of incoming SYN.
1488 	 * ECT bits are set later in tcp_init_transfer().
1489 	 */
1490 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1491 		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1492 
1493 	/* Clone native IPv6 options from listening socket (if any)
1494 
1495 	   Yes, keeping reference count would be much more clever,
1496 	   but we make one more one thing there: reattach optmem
1497 	   to newsk.
1498 	 */
1499 	opt = ireq->ipv6_opt;
1500 	if (!opt)
1501 		opt = rcu_dereference(np->opt);
1502 	if (opt) {
1503 		opt = ipv6_dup_options(newsk, opt);
1504 		RCU_INIT_POINTER(newnp->opt, opt);
1505 	}
1506 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1507 	if (opt)
1508 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1509 						    opt->opt_flen;
1510 
1511 	tcp_ca_openreq_child(newsk, dst);
1512 
1513 	tcp_sync_mss(newsk, dst_mtu(dst));
1514 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1515 
1516 	tcp_initialize_rcv_mss(newsk);
1517 
1518 #ifdef CONFIG_TCP_MD5SIG
1519 	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1520 
1521 	if (!tcp_rsk_used_ao(req)) {
1522 		/* Copy over the MD5 key from the original socket */
1523 		key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1524 		if (key) {
1525 			const union tcp_md5_addr *addr;
1526 
1527 			addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1528 			if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1529 				inet_csk_prepare_forced_close(newsk);
1530 				tcp_done(newsk);
1531 				goto out;
1532 			}
1533 		}
1534 	}
1535 #endif
1536 #ifdef CONFIG_TCP_AO
1537 	/* Copy over tcp_ao_info if any */
1538 	if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
1539 		goto out; /* OOM */
1540 #endif
1541 
1542 	if (__inet_inherit_port(sk, newsk) < 0) {
1543 		inet_csk_prepare_forced_close(newsk);
1544 		tcp_done(newsk);
1545 		goto out;
1546 	}
1547 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1548 				       &found_dup_sk);
1549 	if (*own_req) {
1550 		tcp_move_syn(newtp, req);
1551 
1552 		/* Clone pktoptions received with SYN, if we own the req */
1553 		if (ireq->pktopts) {
1554 			newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
1555 			consume_skb(ireq->pktopts);
1556 			ireq->pktopts = NULL;
1557 			if (newnp->pktoptions)
1558 				tcp_v6_restore_cb(newnp->pktoptions);
1559 		}
1560 	} else {
1561 		if (!req_unhash && found_dup_sk) {
1562 			/* This code path should only be executed in the
1563 			 * syncookie case only
1564 			 */
1565 			bh_unlock_sock(newsk);
1566 			sock_put(newsk);
1567 			newsk = NULL;
1568 		}
1569 	}
1570 
1571 	return newsk;
1572 
1573 out_overflow:
1574 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1575 out_nonewsk:
1576 	dst_release(dst);
1577 out:
1578 	tcp_listendrop(sk);
1579 	return NULL;
1580 }
1581 
1582 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1583 							   u32));
1584 /* The socket must have it's spinlock held when we get
1585  * here, unless it is a TCP_LISTEN socket.
1586  *
1587  * We have a potential double-lock case here, so even when
1588  * doing backlog processing we use the BH locking scheme.
1589  * This is because we cannot sleep with the original spinlock
1590  * held.
1591  */
1592 INDIRECT_CALLABLE_SCOPE
1593 int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1594 {
1595 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1596 	struct sk_buff *opt_skb = NULL;
1597 	enum skb_drop_reason reason;
1598 	struct tcp_sock *tp;
1599 
1600 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1601 	   goes to IPv4 receive handler and backlogged.
1602 	   From backlog it always goes here. Kerboom...
1603 	   Fortunately, tcp_rcv_established and rcv_established
1604 	   handle them correctly, but it is not case with
1605 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1606 	 */
1607 
1608 	if (skb->protocol == htons(ETH_P_IP))
1609 		return tcp_v4_do_rcv(sk, skb);
1610 
1611 	/*
1612 	 *	socket locking is here for SMP purposes as backlog rcv
1613 	 *	is currently called with bh processing disabled.
1614 	 */
1615 
1616 	/* Do Stevens' IPV6_PKTOPTIONS.
1617 
1618 	   Yes, guys, it is the only place in our code, where we
1619 	   may make it not affecting IPv4.
1620 	   The rest of code is protocol independent,
1621 	   and I do not like idea to uglify IPv4.
1622 
1623 	   Actually, all the idea behind IPV6_PKTOPTIONS
1624 	   looks not very well thought. For now we latch
1625 	   options, received in the last packet, enqueued
1626 	   by tcp. Feel free to propose better solution.
1627 					       --ANK (980728)
1628 	 */
1629 	if (np->rxopt.all && sk->sk_state != TCP_LISTEN)
1630 		opt_skb = skb_clone_and_charge_r(skb, sk);
1631 
1632 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1633 		struct dst_entry *dst;
1634 
1635 		dst = rcu_dereference_protected(sk->sk_rx_dst,
1636 						lockdep_sock_is_held(sk));
1637 
1638 		sock_rps_save_rxhash(sk, skb);
1639 		sk_mark_napi_id(sk, skb);
1640 		if (dst) {
1641 			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1642 			    INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1643 					    dst, sk->sk_rx_dst_cookie) == NULL) {
1644 				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1645 				dst_release(dst);
1646 			}
1647 		}
1648 
1649 		tcp_rcv_established(sk, skb);
1650 		if (opt_skb)
1651 			goto ipv6_pktoptions;
1652 		return 0;
1653 	}
1654 
1655 	if (tcp_checksum_complete(skb))
1656 		goto csum_err;
1657 
1658 	if (sk->sk_state == TCP_LISTEN) {
1659 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1660 
1661 		if (nsk != sk) {
1662 			if (nsk) {
1663 				reason = tcp_child_process(sk, nsk, skb);
1664 				if (reason)
1665 					goto reset;
1666 			}
1667 			return 0;
1668 		}
1669 	} else
1670 		sock_rps_save_rxhash(sk, skb);
1671 
1672 	reason = tcp_rcv_state_process(sk, skb);
1673 	if (reason)
1674 		goto reset;
1675 	if (opt_skb)
1676 		goto ipv6_pktoptions;
1677 	return 0;
1678 
1679 reset:
1680 	tcp_v6_send_reset(sk, skb, sk_rst_convert_drop_reason(reason));
1681 discard:
1682 	if (opt_skb)
1683 		__kfree_skb(opt_skb);
1684 	sk_skb_reason_drop(sk, skb, reason);
1685 	return 0;
1686 csum_err:
1687 	reason = SKB_DROP_REASON_TCP_CSUM;
1688 	trace_tcp_bad_csum(skb);
1689 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1690 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1691 	goto discard;
1692 
1693 
1694 ipv6_pktoptions:
1695 	/* Do you ask, what is it?
1696 
1697 	   1. skb was enqueued by tcp.
1698 	   2. skb is added to tail of read queue, rather than out of order.
1699 	   3. socket is not in passive state.
1700 	   4. Finally, it really contains options, which user wants to receive.
1701 	 */
1702 	tp = tcp_sk(sk);
1703 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1704 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1705 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1706 			WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb));
1707 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1708 			WRITE_ONCE(np->mcast_hops,
1709 				   ipv6_hdr(opt_skb)->hop_limit);
1710 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1711 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1712 		if (inet6_test_bit(REPFLOW, sk))
1713 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1714 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1715 			tcp_v6_restore_cb(opt_skb);
1716 			opt_skb = xchg(&np->pktoptions, opt_skb);
1717 		} else {
1718 			__kfree_skb(opt_skb);
1719 			opt_skb = xchg(&np->pktoptions, NULL);
1720 		}
1721 	}
1722 
1723 	consume_skb(opt_skb);
1724 	return 0;
1725 }
1726 
1727 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1728 			   const struct tcphdr *th)
1729 {
1730 	/* This is tricky: we move IP6CB at its correct location into
1731 	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1732 	 * _decode_session6() uses IP6CB().
1733 	 * barrier() makes sure compiler won't play aliasing games.
1734 	 */
1735 	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1736 		sizeof(struct inet6_skb_parm));
1737 	barrier();
1738 
1739 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1740 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1741 				    skb->len - th->doff*4);
1742 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1743 	TCP_SKB_CB(skb)->tcp_flags = tcp_flags_ntohs(th);
1744 	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1745 	TCP_SKB_CB(skb)->sacked = 0;
1746 	TCP_SKB_CB(skb)->has_rxtstamp =
1747 			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1748 }
1749 
1750 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1751 {
1752 	struct net *net = dev_net_rcu(skb->dev);
1753 	enum skb_drop_reason drop_reason;
1754 	enum tcp_tw_status tw_status;
1755 	int sdif = inet6_sdif(skb);
1756 	int dif = inet6_iif(skb);
1757 	const struct tcphdr *th;
1758 	const struct ipv6hdr *hdr;
1759 	struct sock *sk = NULL;
1760 	bool refcounted;
1761 	int ret;
1762 	u32 isn;
1763 
1764 	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1765 	if (skb->pkt_type != PACKET_HOST)
1766 		goto discard_it;
1767 
1768 	/*
1769 	 *	Count it even if it's bad.
1770 	 */
1771 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1772 
1773 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1774 		goto discard_it;
1775 
1776 	th = (const struct tcphdr *)skb->data;
1777 
1778 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1779 		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1780 		goto bad_packet;
1781 	}
1782 	if (!pskb_may_pull(skb, th->doff*4))
1783 		goto discard_it;
1784 
1785 	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1786 		goto csum_error;
1787 
1788 	th = (const struct tcphdr *)skb->data;
1789 	hdr = ipv6_hdr(skb);
1790 
1791 lookup:
1792 	sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1793 				th->source, th->dest, inet6_iif(skb), sdif,
1794 				&refcounted);
1795 	if (!sk)
1796 		goto no_tcp_socket;
1797 
1798 	if (sk->sk_state == TCP_TIME_WAIT)
1799 		goto do_time_wait;
1800 
1801 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1802 		struct request_sock *req = inet_reqsk(sk);
1803 		bool req_stolen = false;
1804 		struct sock *nsk;
1805 
1806 		sk = req->rsk_listener;
1807 		if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1808 			drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1809 		else
1810 			drop_reason = tcp_inbound_hash(sk, req, skb,
1811 						       &hdr->saddr, &hdr->daddr,
1812 						       AF_INET6, dif, sdif);
1813 		if (drop_reason) {
1814 			sk_drops_add(sk, skb);
1815 			reqsk_put(req);
1816 			goto discard_it;
1817 		}
1818 		if (tcp_checksum_complete(skb)) {
1819 			reqsk_put(req);
1820 			goto csum_error;
1821 		}
1822 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1823 			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1824 			if (!nsk) {
1825 				inet_csk_reqsk_queue_drop_and_put(sk, req);
1826 				goto lookup;
1827 			}
1828 			sk = nsk;
1829 			/* reuseport_migrate_sock() has already held one sk_refcnt
1830 			 * before returning.
1831 			 */
1832 		} else {
1833 			sock_hold(sk);
1834 		}
1835 		refcounted = true;
1836 		nsk = NULL;
1837 		if (!tcp_filter(sk, skb)) {
1838 			th = (const struct tcphdr *)skb->data;
1839 			hdr = ipv6_hdr(skb);
1840 			tcp_v6_fill_cb(skb, hdr, th);
1841 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen,
1842 					    &drop_reason);
1843 		} else {
1844 			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1845 		}
1846 		if (!nsk) {
1847 			reqsk_put(req);
1848 			if (req_stolen) {
1849 				/* Another cpu got exclusive access to req
1850 				 * and created a full blown socket.
1851 				 * Try to feed this packet to this socket
1852 				 * instead of discarding it.
1853 				 */
1854 				tcp_v6_restore_cb(skb);
1855 				sock_put(sk);
1856 				goto lookup;
1857 			}
1858 			goto discard_and_relse;
1859 		}
1860 		nf_reset_ct(skb);
1861 		if (nsk == sk) {
1862 			reqsk_put(req);
1863 			tcp_v6_restore_cb(skb);
1864 		} else {
1865 			drop_reason = tcp_child_process(sk, nsk, skb);
1866 			if (drop_reason) {
1867 				enum sk_rst_reason rst_reason;
1868 
1869 				rst_reason = sk_rst_convert_drop_reason(drop_reason);
1870 				tcp_v6_send_reset(nsk, skb, rst_reason);
1871 				goto discard_and_relse;
1872 			}
1873 			sock_put(sk);
1874 			return 0;
1875 		}
1876 	}
1877 
1878 process:
1879 	if (static_branch_unlikely(&ip6_min_hopcount)) {
1880 		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1881 		if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
1882 			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1883 			drop_reason = SKB_DROP_REASON_TCP_MINTTL;
1884 			goto discard_and_relse;
1885 		}
1886 	}
1887 
1888 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1889 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1890 		goto discard_and_relse;
1891 	}
1892 
1893 	drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr,
1894 				       AF_INET6, dif, sdif);
1895 	if (drop_reason)
1896 		goto discard_and_relse;
1897 
1898 	nf_reset_ct(skb);
1899 
1900 	if (tcp_filter(sk, skb)) {
1901 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1902 		goto discard_and_relse;
1903 	}
1904 	th = (const struct tcphdr *)skb->data;
1905 	hdr = ipv6_hdr(skb);
1906 	tcp_v6_fill_cb(skb, hdr, th);
1907 
1908 	skb->dev = NULL;
1909 
1910 	if (sk->sk_state == TCP_LISTEN) {
1911 		ret = tcp_v6_do_rcv(sk, skb);
1912 		goto put_and_return;
1913 	}
1914 
1915 	sk_incoming_cpu_update(sk);
1916 
1917 	bh_lock_sock_nested(sk);
1918 	tcp_segs_in(tcp_sk(sk), skb);
1919 	ret = 0;
1920 	if (!sock_owned_by_user(sk)) {
1921 		ret = tcp_v6_do_rcv(sk, skb);
1922 	} else {
1923 		if (tcp_add_backlog(sk, skb, &drop_reason))
1924 			goto discard_and_relse;
1925 	}
1926 	bh_unlock_sock(sk);
1927 put_and_return:
1928 	if (refcounted)
1929 		sock_put(sk);
1930 	return ret ? -1 : 0;
1931 
1932 no_tcp_socket:
1933 	drop_reason = SKB_DROP_REASON_NO_SOCKET;
1934 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1935 		goto discard_it;
1936 
1937 	tcp_v6_fill_cb(skb, hdr, th);
1938 
1939 	if (tcp_checksum_complete(skb)) {
1940 csum_error:
1941 		drop_reason = SKB_DROP_REASON_TCP_CSUM;
1942 		trace_tcp_bad_csum(skb);
1943 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1944 bad_packet:
1945 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1946 	} else {
1947 		tcp_v6_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason));
1948 	}
1949 
1950 discard_it:
1951 	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1952 	sk_skb_reason_drop(sk, skb, drop_reason);
1953 	return 0;
1954 
1955 discard_and_relse:
1956 	sk_drops_add(sk, skb);
1957 	if (refcounted)
1958 		sock_put(sk);
1959 	goto discard_it;
1960 
1961 do_time_wait:
1962 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1963 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1964 		inet_twsk_put(inet_twsk(sk));
1965 		goto discard_it;
1966 	}
1967 
1968 	tcp_v6_fill_cb(skb, hdr, th);
1969 
1970 	if (tcp_checksum_complete(skb)) {
1971 		inet_twsk_put(inet_twsk(sk));
1972 		goto csum_error;
1973 	}
1974 
1975 	tw_status = tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn,
1976 					       &drop_reason);
1977 	switch (tw_status) {
1978 	case TCP_TW_SYN:
1979 	{
1980 		struct sock *sk2;
1981 
1982 		sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1983 					    skb, __tcp_hdrlen(th),
1984 					    &ipv6_hdr(skb)->saddr, th->source,
1985 					    &ipv6_hdr(skb)->daddr,
1986 					    ntohs(th->dest),
1987 					    tcp_v6_iif_l3_slave(skb),
1988 					    sdif);
1989 		if (sk2) {
1990 			struct inet_timewait_sock *tw = inet_twsk(sk);
1991 			inet_twsk_deschedule_put(tw);
1992 			sk = sk2;
1993 			tcp_v6_restore_cb(skb);
1994 			refcounted = false;
1995 			__this_cpu_write(tcp_tw_isn, isn);
1996 			goto process;
1997 		}
1998 	}
1999 		/* to ACK */
2000 		fallthrough;
2001 	case TCP_TW_ACK:
2002 	case TCP_TW_ACK_OOW:
2003 		tcp_v6_timewait_ack(sk, skb, tw_status);
2004 		break;
2005 	case TCP_TW_RST:
2006 		tcp_v6_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET);
2007 		inet_twsk_deschedule_put(inet_twsk(sk));
2008 		goto discard_it;
2009 	case TCP_TW_SUCCESS:
2010 		;
2011 	}
2012 	goto discard_it;
2013 }
2014 
2015 void tcp_v6_early_demux(struct sk_buff *skb)
2016 {
2017 	struct net *net = dev_net_rcu(skb->dev);
2018 	const struct ipv6hdr *hdr;
2019 	const struct tcphdr *th;
2020 	struct sock *sk;
2021 
2022 	if (skb->pkt_type != PACKET_HOST)
2023 		return;
2024 
2025 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
2026 		return;
2027 
2028 	hdr = ipv6_hdr(skb);
2029 	th = tcp_hdr(skb);
2030 
2031 	if (th->doff < sizeof(struct tcphdr) / 4)
2032 		return;
2033 
2034 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
2035 	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
2036 					&hdr->saddr, th->source,
2037 					&hdr->daddr, ntohs(th->dest),
2038 					inet6_iif(skb), inet6_sdif(skb));
2039 	if (sk) {
2040 		skb->sk = sk;
2041 		skb->destructor = sock_edemux;
2042 		if (sk_fullsock(sk)) {
2043 			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
2044 
2045 			if (dst)
2046 				dst = dst_check(dst, sk->sk_rx_dst_cookie);
2047 			if (dst &&
2048 			    sk->sk_rx_dst_ifindex == skb->skb_iif)
2049 				skb_dst_set_noref(skb, dst);
2050 		}
2051 	}
2052 }
2053 
2054 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
2055 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
2056 	.twsk_destructor = tcp_twsk_destructor,
2057 };
2058 
2059 INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
2060 {
2061 	__tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
2062 }
2063 
2064 const struct inet_connection_sock_af_ops ipv6_specific = {
2065 	.queue_xmit	   = inet6_csk_xmit,
2066 	.send_check	   = tcp_v6_send_check,
2067 	.rebuild_header	   = inet6_sk_rebuild_header,
2068 	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
2069 	.conn_request	   = tcp_v6_conn_request,
2070 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
2071 	.net_header_len	   = sizeof(struct ipv6hdr),
2072 	.setsockopt	   = ipv6_setsockopt,
2073 	.getsockopt	   = ipv6_getsockopt,
2074 	.mtu_reduced	   = tcp_v6_mtu_reduced,
2075 };
2076 
2077 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2078 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
2079 #ifdef CONFIG_TCP_MD5SIG
2080 	.md5_lookup	=	tcp_v6_md5_lookup,
2081 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
2082 	.md5_parse	=	tcp_v6_parse_md5_keys,
2083 #endif
2084 #ifdef CONFIG_TCP_AO
2085 	.ao_lookup	=	tcp_v6_ao_lookup,
2086 	.calc_ao_hash	=	tcp_v6_ao_hash_skb,
2087 	.ao_parse	=	tcp_v6_parse_ao,
2088 	.ao_calc_key_sk	=	tcp_v6_ao_calc_key_sk,
2089 #endif
2090 };
2091 #endif
2092 
2093 /*
2094  *	TCP over IPv4 via INET6 API
2095  */
2096 static const struct inet_connection_sock_af_ops ipv6_mapped = {
2097 	.queue_xmit	   = ip_queue_xmit,
2098 	.send_check	   = tcp_v4_send_check,
2099 	.rebuild_header	   = inet_sk_rebuild_header,
2100 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2101 	.conn_request	   = tcp_v6_conn_request,
2102 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
2103 	.net_header_len	   = sizeof(struct iphdr),
2104 	.setsockopt	   = ipv6_setsockopt,
2105 	.getsockopt	   = ipv6_getsockopt,
2106 	.mtu_reduced	   = tcp_v4_mtu_reduced,
2107 };
2108 
2109 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2110 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
2111 #ifdef CONFIG_TCP_MD5SIG
2112 	.md5_lookup	=	tcp_v4_md5_lookup,
2113 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
2114 	.md5_parse	=	tcp_v6_parse_md5_keys,
2115 #endif
2116 #ifdef CONFIG_TCP_AO
2117 	.ao_lookup	=	tcp_v6_ao_lookup,
2118 	.calc_ao_hash	=	tcp_v4_ao_hash_skb,
2119 	.ao_parse	=	tcp_v6_parse_ao,
2120 	.ao_calc_key_sk	=	tcp_v4_ao_calc_key_sk,
2121 #endif
2122 };
2123 #endif
2124 
2125 /* NOTE: A lot of things set to zero explicitly by call to
2126  *       sk_alloc() so need not be done here.
2127  */
2128 static int tcp_v6_init_sock(struct sock *sk)
2129 {
2130 	struct inet_connection_sock *icsk = inet_csk(sk);
2131 
2132 	tcp_init_sock(sk);
2133 
2134 	icsk->icsk_af_ops = &ipv6_specific;
2135 
2136 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2137 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
2138 #endif
2139 
2140 	return 0;
2141 }
2142 
2143 #ifdef CONFIG_PROC_FS
2144 /* Proc filesystem TCPv6 sock list dumping. */
2145 static void get_openreq6(struct seq_file *seq,
2146 			 const struct request_sock *req, int i)
2147 {
2148 	long ttd = req->rsk_timer.expires - jiffies;
2149 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
2150 	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
2151 
2152 	if (ttd < 0)
2153 		ttd = 0;
2154 
2155 	seq_printf(seq,
2156 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2157 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
2158 		   i,
2159 		   src->s6_addr32[0], src->s6_addr32[1],
2160 		   src->s6_addr32[2], src->s6_addr32[3],
2161 		   inet_rsk(req)->ir_num,
2162 		   dest->s6_addr32[0], dest->s6_addr32[1],
2163 		   dest->s6_addr32[2], dest->s6_addr32[3],
2164 		   ntohs(inet_rsk(req)->ir_rmt_port),
2165 		   TCP_SYN_RECV,
2166 		   0, 0, /* could print option size, but that is af dependent. */
2167 		   1,   /* timers active (only the expire timer) */
2168 		   jiffies_to_clock_t(ttd),
2169 		   req->num_timeout,
2170 		   from_kuid_munged(seq_user_ns(seq),
2171 				    sock_i_uid(req->rsk_listener)),
2172 		   0,  /* non standard timer */
2173 		   0, /* open_requests have no inode */
2174 		   0, req);
2175 }
2176 
2177 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2178 {
2179 	const struct in6_addr *dest, *src;
2180 	__u16 destp, srcp;
2181 	int timer_active;
2182 	unsigned long timer_expires;
2183 	const struct inet_sock *inet = inet_sk(sp);
2184 	const struct tcp_sock *tp = tcp_sk(sp);
2185 	const struct inet_connection_sock *icsk = inet_csk(sp);
2186 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2187 	u8 icsk_pending;
2188 	int rx_queue;
2189 	int state;
2190 
2191 	dest  = &sp->sk_v6_daddr;
2192 	src   = &sp->sk_v6_rcv_saddr;
2193 	destp = ntohs(inet->inet_dport);
2194 	srcp  = ntohs(inet->inet_sport);
2195 
2196 	icsk_pending = smp_load_acquire(&icsk->icsk_pending);
2197 	if (icsk_pending == ICSK_TIME_RETRANS ||
2198 	    icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2199 	    icsk_pending == ICSK_TIME_LOSS_PROBE) {
2200 		timer_active	= 1;
2201 		timer_expires	= icsk_timeout(icsk);
2202 	} else if (icsk_pending == ICSK_TIME_PROBE0) {
2203 		timer_active	= 4;
2204 		timer_expires	= icsk_timeout(icsk);
2205 	} else if (timer_pending(&sp->sk_timer)) {
2206 		timer_active	= 2;
2207 		timer_expires	= sp->sk_timer.expires;
2208 	} else {
2209 		timer_active	= 0;
2210 		timer_expires = jiffies;
2211 	}
2212 
2213 	state = inet_sk_state_load(sp);
2214 	if (state == TCP_LISTEN)
2215 		rx_queue = READ_ONCE(sp->sk_ack_backlog);
2216 	else
2217 		/* Because we don't lock the socket,
2218 		 * we might find a transient negative value.
2219 		 */
2220 		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2221 				      READ_ONCE(tp->copied_seq), 0);
2222 
2223 	seq_printf(seq,
2224 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2225 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2226 		   i,
2227 		   src->s6_addr32[0], src->s6_addr32[1],
2228 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2229 		   dest->s6_addr32[0], dest->s6_addr32[1],
2230 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2231 		   state,
2232 		   READ_ONCE(tp->write_seq) - tp->snd_una,
2233 		   rx_queue,
2234 		   timer_active,
2235 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
2236 		   icsk->icsk_retransmits,
2237 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2238 		   icsk->icsk_probes_out,
2239 		   sock_i_ino(sp),
2240 		   refcount_read(&sp->sk_refcnt), sp,
2241 		   jiffies_to_clock_t(icsk->icsk_rto),
2242 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2243 		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2244 		   tcp_snd_cwnd(tp),
2245 		   state == TCP_LISTEN ?
2246 			fastopenq->max_qlen :
2247 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2248 		   );
2249 }
2250 
2251 static void get_timewait6_sock(struct seq_file *seq,
2252 			       struct inet_timewait_sock *tw, int i)
2253 {
2254 	long delta = tw->tw_timer.expires - jiffies;
2255 	const struct in6_addr *dest, *src;
2256 	__u16 destp, srcp;
2257 
2258 	dest = &tw->tw_v6_daddr;
2259 	src  = &tw->tw_v6_rcv_saddr;
2260 	destp = ntohs(tw->tw_dport);
2261 	srcp  = ntohs(tw->tw_sport);
2262 
2263 	seq_printf(seq,
2264 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2265 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2266 		   i,
2267 		   src->s6_addr32[0], src->s6_addr32[1],
2268 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2269 		   dest->s6_addr32[0], dest->s6_addr32[1],
2270 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2271 		   READ_ONCE(tw->tw_substate), 0, 0,
2272 		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2273 		   refcount_read(&tw->tw_refcnt), tw);
2274 }
2275 
2276 static int tcp6_seq_show(struct seq_file *seq, void *v)
2277 {
2278 	struct tcp_iter_state *st;
2279 	struct sock *sk = v;
2280 
2281 	if (v == SEQ_START_TOKEN) {
2282 		seq_puts(seq,
2283 			 "  sl  "
2284 			 "local_address                         "
2285 			 "remote_address                        "
2286 			 "st tx_queue rx_queue tr tm->when retrnsmt"
2287 			 "   uid  timeout inode\n");
2288 		goto out;
2289 	}
2290 	st = seq->private;
2291 
2292 	if (sk->sk_state == TCP_TIME_WAIT)
2293 		get_timewait6_sock(seq, v, st->num);
2294 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2295 		get_openreq6(seq, v, st->num);
2296 	else
2297 		get_tcp6_sock(seq, v, st->num);
2298 out:
2299 	return 0;
2300 }
2301 
2302 static const struct seq_operations tcp6_seq_ops = {
2303 	.show		= tcp6_seq_show,
2304 	.start		= tcp_seq_start,
2305 	.next		= tcp_seq_next,
2306 	.stop		= tcp_seq_stop,
2307 };
2308 
2309 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2310 	.family		= AF_INET6,
2311 };
2312 
2313 int __net_init tcp6_proc_init(struct net *net)
2314 {
2315 	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2316 			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2317 		return -ENOMEM;
2318 	return 0;
2319 }
2320 
2321 void tcp6_proc_exit(struct net *net)
2322 {
2323 	remove_proc_entry("tcp6", net->proc_net);
2324 }
2325 #endif
2326 
2327 struct proto tcpv6_prot = {
2328 	.name			= "TCPv6",
2329 	.owner			= THIS_MODULE,
2330 	.close			= tcp_close,
2331 	.pre_connect		= tcp_v6_pre_connect,
2332 	.connect		= tcp_v6_connect,
2333 	.disconnect		= tcp_disconnect,
2334 	.accept			= inet_csk_accept,
2335 	.ioctl			= tcp_ioctl,
2336 	.init			= tcp_v6_init_sock,
2337 	.destroy		= tcp_v4_destroy_sock,
2338 	.shutdown		= tcp_shutdown,
2339 	.setsockopt		= tcp_setsockopt,
2340 	.getsockopt		= tcp_getsockopt,
2341 	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
2342 	.keepalive		= tcp_set_keepalive,
2343 	.recvmsg		= tcp_recvmsg,
2344 	.sendmsg		= tcp_sendmsg,
2345 	.splice_eof		= tcp_splice_eof,
2346 	.backlog_rcv		= tcp_v6_do_rcv,
2347 	.release_cb		= tcp_release_cb,
2348 	.hash			= inet6_hash,
2349 	.unhash			= inet_unhash,
2350 	.get_port		= inet_csk_get_port,
2351 	.put_port		= inet_put_port,
2352 #ifdef CONFIG_BPF_SYSCALL
2353 	.psock_update_sk_prot	= tcp_bpf_update_proto,
2354 #endif
2355 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2356 	.leave_memory_pressure	= tcp_leave_memory_pressure,
2357 	.stream_memory_free	= tcp_stream_memory_free,
2358 	.sockets_allocated	= &tcp_sockets_allocated,
2359 
2360 	.memory_allocated	= &tcp_memory_allocated,
2361 	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,
2362 
2363 	.memory_pressure	= &tcp_memory_pressure,
2364 	.orphan_count		= &tcp_orphan_count,
2365 	.sysctl_mem		= sysctl_tcp_mem,
2366 	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
2367 	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
2368 	.max_header		= MAX_TCP_HEADER,
2369 	.obj_size		= sizeof(struct tcp6_sock),
2370 	.ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6),
2371 	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2372 	.twsk_prot		= &tcp6_timewait_sock_ops,
2373 	.rsk_prot		= &tcp6_request_sock_ops,
2374 	.h.hashinfo		= NULL,
2375 	.no_autobind		= true,
2376 	.diag_destroy		= tcp_abort,
2377 };
2378 EXPORT_SYMBOL_GPL(tcpv6_prot);
2379 
2380 
2381 static struct inet_protosw tcpv6_protosw = {
2382 	.type		=	SOCK_STREAM,
2383 	.protocol	=	IPPROTO_TCP,
2384 	.prot		=	&tcpv6_prot,
2385 	.ops		=	&inet6_stream_ops,
2386 	.flags		=	INET_PROTOSW_PERMANENT |
2387 				INET_PROTOSW_ICSK,
2388 };
2389 
2390 static int __net_init tcpv6_net_init(struct net *net)
2391 {
2392 	int res;
2393 
2394 	res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2395 				   SOCK_RAW, IPPROTO_TCP, net);
2396 	if (!res)
2397 		net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC;
2398 
2399 	return res;
2400 }
2401 
2402 static void __net_exit tcpv6_net_exit(struct net *net)
2403 {
2404 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2405 }
2406 
2407 static struct pernet_operations tcpv6_net_ops = {
2408 	.init	    = tcpv6_net_init,
2409 	.exit	    = tcpv6_net_exit,
2410 };
2411 
2412 int __init tcpv6_init(void)
2413 {
2414 	int ret;
2415 
2416 	net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
2417 		.handler     = tcp_v6_rcv,
2418 		.err_handler = tcp_v6_err,
2419 		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
2420 	};
2421 	ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2422 	if (ret)
2423 		goto out;
2424 
2425 	/* register inet6 protocol */
2426 	ret = inet6_register_protosw(&tcpv6_protosw);
2427 	if (ret)
2428 		goto out_tcpv6_protocol;
2429 
2430 	ret = register_pernet_subsys(&tcpv6_net_ops);
2431 	if (ret)
2432 		goto out_tcpv6_protosw;
2433 
2434 	ret = mptcpv6_init();
2435 	if (ret)
2436 		goto out_tcpv6_pernet_subsys;
2437 
2438 out:
2439 	return ret;
2440 
2441 out_tcpv6_pernet_subsys:
2442 	unregister_pernet_subsys(&tcpv6_net_ops);
2443 out_tcpv6_protosw:
2444 	inet6_unregister_protosw(&tcpv6_protosw);
2445 out_tcpv6_protocol:
2446 	inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2447 	goto out;
2448 }
2449 
2450 void tcpv6_exit(void)
2451 {
2452 	unregister_pernet_subsys(&tcpv6_net_ops);
2453 	inet6_unregister_protosw(&tcpv6_protosw);
2454 	inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2455 }
2456