1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * TCP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on:
10 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 */
21
22 #include <linux/bottom_half.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/jiffies.h>
30 #include <linux/in.h>
31 #include <linux/in6.h>
32 #include <linux/netdevice.h>
33 #include <linux/init.h>
34 #include <linux/jhash.h>
35 #include <linux/ipsec.h>
36 #include <linux/times.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/ipv6.h>
40 #include <linux/icmpv6.h>
41 #include <linux/random.h>
42 #include <linux/indirect_call_wrapper.h>
43
44 #include <net/tcp.h>
45 #include <net/ndisc.h>
46 #include <net/inet6_hashtables.h>
47 #include <net/inet6_connection_sock.h>
48 #include <net/ipv6.h>
49 #include <net/transp_v6.h>
50 #include <net/addrconf.h>
51 #include <net/ip6_route.h>
52 #include <net/ip6_checksum.h>
53 #include <net/inet_ecn.h>
54 #include <net/protocol.h>
55 #include <net/xfrm.h>
56 #include <net/snmp.h>
57 #include <net/dsfield.h>
58 #include <net/timewait_sock.h>
59 #include <net/inet_common.h>
60 #include <net/secure_seq.h>
61 #include <net/hotdata.h>
62 #include <net/busy_poll.h>
63 #include <net/rstreason.h>
64
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
67
68 #include <crypto/hash.h>
69 #include <linux/scatterlist.h>
70
71 #include <trace/events/tcp.h>
72
73 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
74 enum sk_rst_reason reason);
75 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
77
78 INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 const struct inet_connection_sock_af_ops ipv6_specific;
82 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 #endif
86
87 /* Helper returning the inet6 address from a given tcp socket.
88 * It can be used in TCP stack instead of inet6_sk(sk).
89 * This avoids a dereference and allow compiler optimizations.
90 * It is a specialized version of inet6_sk_generic().
91 */
92 #define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \
93 struct tcp6_sock, tcp)->inet6)
94
inet6_sk_rx_dst_set(struct sock * sk,const struct sk_buff * skb)95 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
96 {
97 struct dst_entry *dst = skb_dst(skb);
98
99 if (dst && dst_hold_safe(dst)) {
100 rcu_assign_pointer(sk->sk_rx_dst, dst);
101 sk->sk_rx_dst_ifindex = skb->skb_iif;
102 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
103 }
104 }
105
tcp_v6_init_seq(const struct sk_buff * skb)106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
107 {
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112 }
113
tcp_v6_init_ts_off(const struct net * net,const struct sk_buff * skb)114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
115 {
116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 ipv6_hdr(skb)->saddr.s6_addr32);
118 }
119
tcp_v6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)120 static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
121 int addr_len)
122 {
123 /* This check is replicated from tcp_v6_connect() and intended to
124 * prevent BPF program called below from accessing bytes that are out
125 * of the bound specified by user in addr_len.
126 */
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 sock_owned_by_me(sk);
131
132 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len);
133 }
134
tcp_v6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)135 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
136 int addr_len)
137 {
138 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
139 struct inet_connection_sock *icsk = inet_csk(sk);
140 struct in6_addr *saddr = NULL, *final_p, final;
141 struct inet_timewait_death_row *tcp_death_row;
142 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
145 struct net *net = sock_net(sk);
146 struct ipv6_txoptions *opt;
147 struct dst_entry *dst;
148 struct flowi6 fl6;
149 int addr_type;
150 int err;
151
152 if (addr_len < SIN6_LEN_RFC2133)
153 return -EINVAL;
154
155 if (usin->sin6_family != AF_INET6)
156 return -EAFNOSUPPORT;
157
158 memset(&fl6, 0, sizeof(fl6));
159
160 if (inet6_test_bit(SNDFLOW, sk)) {
161 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
162 IP6_ECN_flow_init(fl6.flowlabel);
163 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
164 struct ip6_flowlabel *flowlabel;
165 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
166 if (IS_ERR(flowlabel))
167 return -EINVAL;
168 fl6_sock_release(flowlabel);
169 }
170 }
171
172 /*
173 * connect() to INADDR_ANY means loopback (BSD'ism).
174 */
175
176 if (ipv6_addr_any(&usin->sin6_addr)) {
177 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
178 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
179 &usin->sin6_addr);
180 else
181 usin->sin6_addr = in6addr_loopback;
182 }
183
184 addr_type = ipv6_addr_type(&usin->sin6_addr);
185
186 if (addr_type & IPV6_ADDR_MULTICAST)
187 return -ENETUNREACH;
188
189 if (addr_type&IPV6_ADDR_LINKLOCAL) {
190 if (addr_len >= sizeof(struct sockaddr_in6) &&
191 usin->sin6_scope_id) {
192 /* If interface is set while binding, indices
193 * must coincide.
194 */
195 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
196 return -EINVAL;
197
198 sk->sk_bound_dev_if = usin->sin6_scope_id;
199 }
200
201 /* Connect to link-local address requires an interface */
202 if (!sk->sk_bound_dev_if)
203 return -EINVAL;
204 }
205
206 if (tp->rx_opt.ts_recent_stamp &&
207 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
208 tp->rx_opt.ts_recent = 0;
209 tp->rx_opt.ts_recent_stamp = 0;
210 WRITE_ONCE(tp->write_seq, 0);
211 }
212
213 sk->sk_v6_daddr = usin->sin6_addr;
214 np->flow_label = fl6.flowlabel;
215
216 /*
217 * TCP over IPv4
218 */
219
220 if (addr_type & IPV6_ADDR_MAPPED) {
221 u32 exthdrlen = icsk->icsk_ext_hdr_len;
222 struct sockaddr_in sin;
223
224 if (ipv6_only_sock(sk))
225 return -ENETUNREACH;
226
227 sin.sin_family = AF_INET;
228 sin.sin_port = usin->sin6_port;
229 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
230
231 /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
232 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped);
233 if (sk_is_mptcp(sk))
234 mptcpv6_handle_mapped(sk, true);
235 sk->sk_backlog_rcv = tcp_v4_do_rcv;
236 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
237 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
238 #endif
239
240 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
241
242 if (err) {
243 icsk->icsk_ext_hdr_len = exthdrlen;
244 /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */
245 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific);
246 if (sk_is_mptcp(sk))
247 mptcpv6_handle_mapped(sk, false);
248 sk->sk_backlog_rcv = tcp_v6_do_rcv;
249 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
250 tp->af_specific = &tcp_sock_ipv6_specific;
251 #endif
252 goto failure;
253 }
254 np->saddr = sk->sk_v6_rcv_saddr;
255
256 return err;
257 }
258
259 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
260 saddr = &sk->sk_v6_rcv_saddr;
261
262 fl6.flowi6_proto = IPPROTO_TCP;
263 fl6.daddr = sk->sk_v6_daddr;
264 fl6.saddr = saddr ? *saddr : np->saddr;
265 fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
266 fl6.flowi6_oif = sk->sk_bound_dev_if;
267 fl6.flowi6_mark = sk->sk_mark;
268 fl6.fl6_dport = usin->sin6_port;
269 fl6.fl6_sport = inet->inet_sport;
270 fl6.flowi6_uid = sk->sk_uid;
271
272 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
273 final_p = fl6_update_dst(&fl6, opt, &final);
274
275 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
276
277 dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
278 if (IS_ERR(dst)) {
279 err = PTR_ERR(dst);
280 goto failure;
281 }
282
283 tp->tcp_usec_ts = dst_tcp_usec_ts(dst);
284 tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
285
286 if (!saddr) {
287 saddr = &fl6.saddr;
288
289 err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
290 if (err)
291 goto failure;
292 }
293
294 /* set the source address */
295 np->saddr = *saddr;
296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
297
298 sk->sk_gso_type = SKB_GSO_TCPV6;
299 ip6_dst_store(sk, dst, NULL, NULL);
300
301 icsk->icsk_ext_hdr_len = 0;
302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
308 inet->inet_dport = usin->sin6_port;
309
310 tcp_set_state(sk, TCP_SYN_SENT);
311 err = inet6_hash_connect(tcp_death_row, sk);
312 if (err)
313 goto late_failure;
314
315 sk_set_txhash(sk);
316
317 if (likely(!tp->repair)) {
318 if (!tp->write_seq)
319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
324 tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
325 sk->sk_v6_daddr.s6_addr32);
326 }
327
328 if (tcp_fastopen_defer_connect(sk, &err))
329 return err;
330 if (err)
331 goto late_failure;
332
333 err = tcp_connect(sk);
334 if (err)
335 goto late_failure;
336
337 return 0;
338
339 late_failure:
340 tcp_set_state(sk, TCP_CLOSE);
341 inet_bhash2_reset_saddr(sk);
342 failure:
343 inet->inet_dport = 0;
344 sk->sk_route_caps = 0;
345 return err;
346 }
347
tcp_v6_mtu_reduced(struct sock * sk)348 static void tcp_v6_mtu_reduced(struct sock *sk)
349 {
350 struct dst_entry *dst;
351 u32 mtu;
352
353 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
354 return;
355
356 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
357
358 /* Drop requests trying to increase our current mss.
359 * Check done in __ip6_rt_update_pmtu() is too late.
360 */
361 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
362 return;
363
364 dst = inet6_csk_update_pmtu(sk, mtu);
365 if (!dst)
366 return;
367
368 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
369 tcp_sync_mss(sk, dst_mtu(dst));
370 tcp_simple_retransmit(sk);
371 }
372 }
373
tcp_v6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)374 static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
375 u8 type, u8 code, int offset, __be32 info)
376 {
377 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
378 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
379 struct net *net = dev_net(skb->dev);
380 struct request_sock *fastopen;
381 struct ipv6_pinfo *np;
382 struct tcp_sock *tp;
383 __u32 seq, snd_una;
384 struct sock *sk;
385 bool fatal;
386 int err;
387
388 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
389 &hdr->daddr, th->dest,
390 &hdr->saddr, ntohs(th->source),
391 skb->dev->ifindex, inet6_sdif(skb));
392
393 if (!sk) {
394 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
395 ICMP6_MIB_INERRORS);
396 return -ENOENT;
397 }
398
399 if (sk->sk_state == TCP_TIME_WAIT) {
400 /* To increase the counter of ignored icmps for TCP-AO */
401 tcp_ao_ignore_icmp(sk, AF_INET6, type, code);
402 inet_twsk_put(inet_twsk(sk));
403 return 0;
404 }
405 seq = ntohl(th->seq);
406 fatal = icmpv6_err_convert(type, code, &err);
407 if (sk->sk_state == TCP_NEW_SYN_RECV) {
408 tcp_req_err(sk, seq, fatal);
409 return 0;
410 }
411
412 if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) {
413 sock_put(sk);
414 return 0;
415 }
416
417 bh_lock_sock(sk);
418 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
419 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
420
421 if (sk->sk_state == TCP_CLOSE)
422 goto out;
423
424 if (static_branch_unlikely(&ip6_min_hopcount)) {
425 /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
426 if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
427 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
428 goto out;
429 }
430 }
431
432 tp = tcp_sk(sk);
433 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
434 fastopen = rcu_dereference(tp->fastopen_rsk);
435 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
436 if (sk->sk_state != TCP_LISTEN &&
437 !between(seq, snd_una, tp->snd_nxt)) {
438 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
439 goto out;
440 }
441
442 np = tcp_inet6_sk(sk);
443
444 if (type == NDISC_REDIRECT) {
445 if (!sock_owned_by_user(sk)) {
446 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
447
448 if (dst)
449 dst->ops->redirect(dst, sk, skb);
450 }
451 goto out;
452 }
453
454 if (type == ICMPV6_PKT_TOOBIG) {
455 u32 mtu = ntohl(info);
456
457 /* We are not interested in TCP_LISTEN and open_requests
458 * (SYN-ACKs send out by Linux are always <576bytes so
459 * they should go through unfragmented).
460 */
461 if (sk->sk_state == TCP_LISTEN)
462 goto out;
463
464 if (!ip6_sk_accept_pmtu(sk))
465 goto out;
466
467 if (mtu < IPV6_MIN_MTU)
468 goto out;
469
470 WRITE_ONCE(tp->mtu_info, mtu);
471
472 if (!sock_owned_by_user(sk))
473 tcp_v6_mtu_reduced(sk);
474 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
475 &sk->sk_tsq_flags))
476 sock_hold(sk);
477 goto out;
478 }
479
480
481 /* Might be for an request_sock */
482 switch (sk->sk_state) {
483 case TCP_SYN_SENT:
484 case TCP_SYN_RECV:
485 /* Only in fast or simultaneous open. If a fast open socket is
486 * already accepted it is treated as a connected one below.
487 */
488 if (fastopen && !fastopen->sk)
489 break;
490
491 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
492
493 if (!sock_owned_by_user(sk))
494 tcp_done_with_error(sk, err);
495 else
496 WRITE_ONCE(sk->sk_err_soft, err);
497 goto out;
498 case TCP_LISTEN:
499 break;
500 default:
501 /* check if this ICMP message allows revert of backoff.
502 * (see RFC 6069)
503 */
504 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
505 code == ICMPV6_NOROUTE)
506 tcp_ld_RTO_revert(sk, seq);
507 }
508
509 if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
510 WRITE_ONCE(sk->sk_err, err);
511 sk_error_report(sk);
512 } else {
513 WRITE_ONCE(sk->sk_err_soft, err);
514 }
515 out:
516 bh_unlock_sock(sk);
517 sock_put(sk);
518 return 0;
519 }
520
521
tcp_v6_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)522 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
523 struct flowi *fl,
524 struct request_sock *req,
525 struct tcp_fastopen_cookie *foc,
526 enum tcp_synack_type synack_type,
527 struct sk_buff *syn_skb)
528 {
529 struct inet_request_sock *ireq = inet_rsk(req);
530 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
531 struct ipv6_txoptions *opt;
532 struct flowi6 *fl6 = &fl->u.ip6;
533 struct sk_buff *skb;
534 int err = -ENOMEM;
535 u8 tclass;
536
537 /* First, grab a route. */
538 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
539 IPPROTO_TCP)) == NULL)
540 goto done;
541
542 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
543
544 if (skb) {
545 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
546 &ireq->ir_v6_rmt_addr);
547
548 fl6->daddr = ireq->ir_v6_rmt_addr;
549 if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts)
550 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
551
552 tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
553 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
554 (np->tclass & INET_ECN_MASK) :
555 np->tclass;
556
557 if (!INET_ECN_is_capable(tclass) &&
558 tcp_bpf_ca_needs_ecn((struct sock *)req))
559 tclass |= INET_ECN_ECT_0;
560
561 rcu_read_lock();
562 opt = ireq->ipv6_opt;
563 if (!opt)
564 opt = rcu_dereference(np->opt);
565 err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark),
566 opt, tclass, READ_ONCE(sk->sk_priority));
567 rcu_read_unlock();
568 err = net_xmit_eval(err);
569 }
570
571 done:
572 return err;
573 }
574
575
tcp_v6_reqsk_destructor(struct request_sock * req)576 static void tcp_v6_reqsk_destructor(struct request_sock *req)
577 {
578 kfree(inet_rsk(req)->ipv6_opt);
579 consume_skb(inet_rsk(req)->pktopts);
580 }
581
582 #ifdef CONFIG_TCP_MD5SIG
tcp_v6_md5_do_lookup(const struct sock * sk,const struct in6_addr * addr,int l3index)583 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
584 const struct in6_addr *addr,
585 int l3index)
586 {
587 return tcp_md5_do_lookup(sk, l3index,
588 (union tcp_md5_addr *)addr, AF_INET6);
589 }
590
tcp_v6_md5_lookup(const struct sock * sk,const struct sock * addr_sk)591 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
592 const struct sock *addr_sk)
593 {
594 int l3index;
595
596 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
597 addr_sk->sk_bound_dev_if);
598 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
599 l3index);
600 }
601
tcp_v6_parse_md5_keys(struct sock * sk,int optname,sockptr_t optval,int optlen)602 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
603 sockptr_t optval, int optlen)
604 {
605 struct tcp_md5sig cmd;
606 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
607 union tcp_ao_addr *addr;
608 int l3index = 0;
609 u8 prefixlen;
610 bool l3flag;
611 u8 flags;
612
613 if (optlen < sizeof(cmd))
614 return -EINVAL;
615
616 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
617 return -EFAULT;
618
619 if (sin6->sin6_family != AF_INET6)
620 return -EINVAL;
621
622 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
623 l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
624
625 if (optname == TCP_MD5SIG_EXT &&
626 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
627 prefixlen = cmd.tcpm_prefixlen;
628 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
629 prefixlen > 32))
630 return -EINVAL;
631 } else {
632 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
633 }
634
635 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
636 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
637 struct net_device *dev;
638
639 rcu_read_lock();
640 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
641 if (dev && netif_is_l3_master(dev))
642 l3index = dev->ifindex;
643 rcu_read_unlock();
644
645 /* ok to reference set/not set outside of rcu;
646 * right now device MUST be an L3 master
647 */
648 if (!dev || !l3index)
649 return -EINVAL;
650 }
651
652 if (!cmd.tcpm_keylen) {
653 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
654 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
655 AF_INET, prefixlen,
656 l3index, flags);
657 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
658 AF_INET6, prefixlen, l3index, flags);
659 }
660
661 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
662 return -EINVAL;
663
664 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
665 addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3];
666
667 /* Don't allow keys for peers that have a matching TCP-AO key.
668 * See the comment in tcp_ao_add_cmd()
669 */
670 if (tcp_ao_required(sk, addr, AF_INET,
671 l3flag ? l3index : -1, false))
672 return -EKEYREJECTED;
673 return tcp_md5_do_add(sk, addr,
674 AF_INET, prefixlen, l3index, flags,
675 cmd.tcpm_key, cmd.tcpm_keylen);
676 }
677
678 addr = (union tcp_md5_addr *)&sin6->sin6_addr;
679
680 /* Don't allow keys for peers that have a matching TCP-AO key.
681 * See the comment in tcp_ao_add_cmd()
682 */
683 if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false))
684 return -EKEYREJECTED;
685
686 return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags,
687 cmd.tcpm_key, cmd.tcpm_keylen);
688 }
689
tcp_v6_md5_hash_headers(struct tcp_sigpool * hp,const struct in6_addr * daddr,const struct in6_addr * saddr,const struct tcphdr * th,int nbytes)690 static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp,
691 const struct in6_addr *daddr,
692 const struct in6_addr *saddr,
693 const struct tcphdr *th, int nbytes)
694 {
695 struct tcp6_pseudohdr *bp;
696 struct scatterlist sg;
697 struct tcphdr *_th;
698
699 bp = hp->scratch;
700 /* 1. TCP pseudo-header (RFC2460) */
701 bp->saddr = *saddr;
702 bp->daddr = *daddr;
703 bp->protocol = cpu_to_be32(IPPROTO_TCP);
704 bp->len = cpu_to_be32(nbytes);
705
706 _th = (struct tcphdr *)(bp + 1);
707 memcpy(_th, th, sizeof(*th));
708 _th->check = 0;
709
710 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
711 ahash_request_set_crypt(hp->req, &sg, NULL,
712 sizeof(*bp) + sizeof(*th));
713 return crypto_ahash_update(hp->req);
714 }
715
tcp_v6_md5_hash_hdr(char * md5_hash,const struct tcp_md5sig_key * key,const struct in6_addr * daddr,struct in6_addr * saddr,const struct tcphdr * th)716 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
717 const struct in6_addr *daddr, struct in6_addr *saddr,
718 const struct tcphdr *th)
719 {
720 struct tcp_sigpool hp;
721
722 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
723 goto clear_hash_nostart;
724
725 if (crypto_ahash_init(hp.req))
726 goto clear_hash;
727 if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
728 goto clear_hash;
729 if (tcp_md5_hash_key(&hp, key))
730 goto clear_hash;
731 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
732 if (crypto_ahash_final(hp.req))
733 goto clear_hash;
734
735 tcp_sigpool_end(&hp);
736 return 0;
737
738 clear_hash:
739 tcp_sigpool_end(&hp);
740 clear_hash_nostart:
741 memset(md5_hash, 0, 16);
742 return 1;
743 }
744
tcp_v6_md5_hash_skb(char * md5_hash,const struct tcp_md5sig_key * key,const struct sock * sk,const struct sk_buff * skb)745 static int tcp_v6_md5_hash_skb(char *md5_hash,
746 const struct tcp_md5sig_key *key,
747 const struct sock *sk,
748 const struct sk_buff *skb)
749 {
750 const struct tcphdr *th = tcp_hdr(skb);
751 const struct in6_addr *saddr, *daddr;
752 struct tcp_sigpool hp;
753
754 if (sk) { /* valid for establish/request sockets */
755 saddr = &sk->sk_v6_rcv_saddr;
756 daddr = &sk->sk_v6_daddr;
757 } else {
758 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
759 saddr = &ip6h->saddr;
760 daddr = &ip6h->daddr;
761 }
762
763 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
764 goto clear_hash_nostart;
765
766 if (crypto_ahash_init(hp.req))
767 goto clear_hash;
768
769 if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
770 goto clear_hash;
771 if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
772 goto clear_hash;
773 if (tcp_md5_hash_key(&hp, key))
774 goto clear_hash;
775 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
776 if (crypto_ahash_final(hp.req))
777 goto clear_hash;
778
779 tcp_sigpool_end(&hp);
780 return 0;
781
782 clear_hash:
783 tcp_sigpool_end(&hp);
784 clear_hash_nostart:
785 memset(md5_hash, 0, 16);
786 return 1;
787 }
788 #endif
789
tcp_v6_init_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb,u32 tw_isn)790 static void tcp_v6_init_req(struct request_sock *req,
791 const struct sock *sk_listener,
792 struct sk_buff *skb,
793 u32 tw_isn)
794 {
795 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
796 struct inet_request_sock *ireq = inet_rsk(req);
797 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
798
799 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
800 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
801
802 /* So that link locals have meaning */
803 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
804 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
805 ireq->ir_iif = tcp_v6_iif(skb);
806
807 if (!tw_isn &&
808 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
809 np->rxopt.bits.rxinfo ||
810 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
811 np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) {
812 refcount_inc(&skb->users);
813 ireq->pktopts = skb;
814 }
815 }
816
tcp_v6_route_req(const struct sock * sk,struct sk_buff * skb,struct flowi * fl,struct request_sock * req,u32 tw_isn)817 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
818 struct sk_buff *skb,
819 struct flowi *fl,
820 struct request_sock *req,
821 u32 tw_isn)
822 {
823 tcp_v6_init_req(req, sk, skb, tw_isn);
824
825 if (security_inet_conn_request(sk, skb, req))
826 return NULL;
827
828 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
829 }
830
831 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
832 .family = AF_INET6,
833 .obj_size = sizeof(struct tcp6_request_sock),
834 .rtx_syn_ack = tcp_rtx_synack,
835 .send_ack = tcp_v6_reqsk_send_ack,
836 .destructor = tcp_v6_reqsk_destructor,
837 .send_reset = tcp_v6_send_reset,
838 .syn_ack_timeout = tcp_syn_ack_timeout,
839 };
840
841 const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
842 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
843 sizeof(struct ipv6hdr),
844 #ifdef CONFIG_TCP_MD5SIG
845 .req_md5_lookup = tcp_v6_md5_lookup,
846 .calc_md5_hash = tcp_v6_md5_hash_skb,
847 #endif
848 #ifdef CONFIG_TCP_AO
849 .ao_lookup = tcp_v6_ao_lookup_rsk,
850 .ao_calc_key = tcp_v6_ao_calc_key_rsk,
851 .ao_synack_hash = tcp_v6_ao_synack_hash,
852 #endif
853 #ifdef CONFIG_SYN_COOKIES
854 .cookie_init_seq = cookie_v6_init_sequence,
855 #endif
856 .route_req = tcp_v6_route_req,
857 .init_seq = tcp_v6_init_seq,
858 .init_ts_off = tcp_v6_init_ts_off,
859 .send_synack = tcp_v6_send_synack,
860 };
861
tcp_v6_send_response(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,int rst,u8 tclass,__be32 label,u32 priority,u32 txhash,struct tcp_key * key)862 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
863 u32 ack, u32 win, u32 tsval, u32 tsecr,
864 int oif, int rst, u8 tclass, __be32 label,
865 u32 priority, u32 txhash, struct tcp_key *key)
866 {
867 const struct tcphdr *th = tcp_hdr(skb);
868 struct tcphdr *t1;
869 struct sk_buff *buff;
870 struct flowi6 fl6;
871 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
872 struct sock *ctl_sk = net->ipv6.tcp_sk;
873 unsigned int tot_len = sizeof(struct tcphdr);
874 __be32 mrst = 0, *topt;
875 struct dst_entry *dst;
876 __u32 mark = 0;
877
878 if (tsecr)
879 tot_len += TCPOLEN_TSTAMP_ALIGNED;
880 if (tcp_key_is_md5(key))
881 tot_len += TCPOLEN_MD5SIG_ALIGNED;
882 if (tcp_key_is_ao(key))
883 tot_len += tcp_ao_len_aligned(key->ao_key);
884
885 #ifdef CONFIG_MPTCP
886 if (rst && !tcp_key_is_md5(key)) {
887 mrst = mptcp_reset_option(skb);
888
889 if (mrst)
890 tot_len += sizeof(__be32);
891 }
892 #endif
893
894 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
895 if (!buff)
896 return;
897
898 skb_reserve(buff, MAX_TCP_HEADER);
899
900 t1 = skb_push(buff, tot_len);
901 skb_reset_transport_header(buff);
902
903 /* Swap the send and the receive. */
904 memset(t1, 0, sizeof(*t1));
905 t1->dest = th->source;
906 t1->source = th->dest;
907 t1->doff = tot_len / 4;
908 t1->seq = htonl(seq);
909 t1->ack_seq = htonl(ack);
910 t1->ack = !rst || !th->ack;
911 t1->rst = rst;
912 t1->window = htons(win);
913
914 topt = (__be32 *)(t1 + 1);
915
916 if (tsecr) {
917 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
918 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
919 *topt++ = htonl(tsval);
920 *topt++ = htonl(tsecr);
921 }
922
923 if (mrst)
924 *topt++ = mrst;
925
926 #ifdef CONFIG_TCP_MD5SIG
927 if (tcp_key_is_md5(key)) {
928 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
929 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
930 tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key,
931 &ipv6_hdr(skb)->saddr,
932 &ipv6_hdr(skb)->daddr, t1);
933 }
934 #endif
935 #ifdef CONFIG_TCP_AO
936 if (tcp_key_is_ao(key)) {
937 *topt++ = htonl((TCPOPT_AO << 24) |
938 (tcp_ao_len(key->ao_key) << 16) |
939 (key->ao_key->sndid << 8) |
940 (key->rcv_next));
941
942 tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key,
943 key->traffic_key,
944 (union tcp_ao_addr *)&ipv6_hdr(skb)->saddr,
945 (union tcp_ao_addr *)&ipv6_hdr(skb)->daddr,
946 t1, key->sne);
947 }
948 #endif
949
950 memset(&fl6, 0, sizeof(fl6));
951 fl6.daddr = ipv6_hdr(skb)->saddr;
952 fl6.saddr = ipv6_hdr(skb)->daddr;
953 fl6.flowlabel = label;
954
955 buff->ip_summed = CHECKSUM_PARTIAL;
956
957 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
958
959 fl6.flowi6_proto = IPPROTO_TCP;
960 if (rt6_need_strict(&fl6.daddr) && !oif)
961 fl6.flowi6_oif = tcp_v6_iif(skb);
962 else {
963 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
964 oif = skb->skb_iif;
965
966 fl6.flowi6_oif = oif;
967 }
968
969 if (sk) {
970 if (sk->sk_state == TCP_TIME_WAIT)
971 mark = inet_twsk(sk)->tw_mark;
972 else
973 mark = READ_ONCE(sk->sk_mark);
974 skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC);
975 }
976 if (txhash) {
977 /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
978 skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
979 }
980 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
981 fl6.fl6_dport = t1->dest;
982 fl6.fl6_sport = t1->source;
983 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
984 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
985
986 /* Pass a socket to ip6_dst_lookup either it is for RST
987 * Underlying function will use this to retrieve the network
988 * namespace
989 */
990 if (sk && sk->sk_state != TCP_TIME_WAIT)
991 dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
992 else
993 dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
994 if (!IS_ERR(dst)) {
995 skb_dst_set(buff, dst);
996 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
997 tclass & ~INET_ECN_MASK, priority);
998 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
999 if (rst)
1000 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
1001 return;
1002 }
1003
1004 kfree_skb(buff);
1005 }
1006
tcp_v6_send_reset(const struct sock * sk,struct sk_buff * skb,enum sk_rst_reason reason)1007 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb,
1008 enum sk_rst_reason reason)
1009 {
1010 const struct tcphdr *th = tcp_hdr(skb);
1011 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1012 const __u8 *md5_hash_location = NULL;
1013 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1014 bool allocated_traffic_key = false;
1015 #endif
1016 const struct tcp_ao_hdr *aoh;
1017 struct tcp_key key = {};
1018 u32 seq = 0, ack_seq = 0;
1019 __be32 label = 0;
1020 u32 priority = 0;
1021 struct net *net;
1022 u32 txhash = 0;
1023 int oif = 0;
1024 #ifdef CONFIG_TCP_MD5SIG
1025 unsigned char newhash[16];
1026 int genhash;
1027 struct sock *sk1 = NULL;
1028 #endif
1029
1030 if (th->rst)
1031 return;
1032
1033 /* If sk not NULL, it means we did a successful lookup and incoming
1034 * route had to be correct. prequeue might have dropped our dst.
1035 */
1036 if (!sk && !ipv6_unicast_destination(skb))
1037 return;
1038
1039 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1040 /* Invalid TCP option size or twice included auth */
1041 if (tcp_parse_auth_options(th, &md5_hash_location, &aoh))
1042 return;
1043 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1044 rcu_read_lock();
1045 #endif
1046 #ifdef CONFIG_TCP_MD5SIG
1047 if (sk && sk_fullsock(sk)) {
1048 int l3index;
1049
1050 /* sdif set, means packet ingressed via a device
1051 * in an L3 domain and inet_iif is set to it.
1052 */
1053 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1054 key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1055 if (key.md5_key)
1056 key.type = TCP_KEY_MD5;
1057 } else if (md5_hash_location) {
1058 int dif = tcp_v6_iif_l3_slave(skb);
1059 int sdif = tcp_v6_sdif(skb);
1060 int l3index;
1061
1062 /*
1063 * active side is lost. Try to find listening socket through
1064 * source port, and then find md5 key through listening socket.
1065 * we are not loose security here:
1066 * Incoming packet is checked with md5 hash with finding key,
1067 * no RST generated if md5 hash doesn't match.
1068 */
1069 sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1070 NULL, 0, &ipv6h->saddr, th->source,
1071 &ipv6h->daddr, ntohs(th->source),
1072 dif, sdif);
1073 if (!sk1)
1074 goto out;
1075
1076 /* sdif set, means packet ingressed via a device
1077 * in an L3 domain and dif is set to it.
1078 */
1079 l3index = tcp_v6_sdif(skb) ? dif : 0;
1080
1081 key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1082 if (!key.md5_key)
1083 goto out;
1084 key.type = TCP_KEY_MD5;
1085
1086 genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
1087 if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
1088 goto out;
1089 }
1090 #endif
1091
1092 if (th->ack)
1093 seq = ntohl(th->ack_seq);
1094 else
1095 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1096 (th->doff << 2);
1097
1098 #ifdef CONFIG_TCP_AO
1099 if (aoh) {
1100 int l3index;
1101
1102 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1103 if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq,
1104 &key.ao_key, &key.traffic_key,
1105 &allocated_traffic_key,
1106 &key.rcv_next, &key.sne))
1107 goto out;
1108 key.type = TCP_KEY_AO;
1109 }
1110 #endif
1111
1112 if (sk) {
1113 oif = sk->sk_bound_dev_if;
1114 if (sk_fullsock(sk)) {
1115 if (inet6_test_bit(REPFLOW, sk))
1116 label = ip6_flowlabel(ipv6h);
1117 priority = READ_ONCE(sk->sk_priority);
1118 txhash = sk->sk_txhash;
1119 }
1120 if (sk->sk_state == TCP_TIME_WAIT) {
1121 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1122 priority = inet_twsk(sk)->tw_priority;
1123 txhash = inet_twsk(sk)->tw_txhash;
1124 }
1125 } else {
1126 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1127 label = ip6_flowlabel(ipv6h);
1128 }
1129
1130 trace_tcp_send_reset(sk, skb, reason);
1131
1132 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
1133 ipv6_get_dsfield(ipv6h), label, priority, txhash,
1134 &key);
1135
1136 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1137 out:
1138 if (allocated_traffic_key)
1139 kfree(key.traffic_key);
1140 rcu_read_unlock();
1141 #endif
1142 }
1143
tcp_v6_send_ack(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,struct tcp_key * key,u8 tclass,__be32 label,u32 priority,u32 txhash)1144 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1145 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1146 struct tcp_key *key, u8 tclass,
1147 __be32 label, u32 priority, u32 txhash)
1148 {
1149 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
1150 tclass, label, priority, txhash, key);
1151 }
1152
tcp_v6_timewait_ack(struct sock * sk,struct sk_buff * skb)1153 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1154 {
1155 struct inet_timewait_sock *tw = inet_twsk(sk);
1156 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1157 struct tcp_key key = {};
1158 #ifdef CONFIG_TCP_AO
1159 struct tcp_ao_info *ao_info;
1160
1161 if (static_branch_unlikely(&tcp_ao_needed.key)) {
1162
1163 /* FIXME: the segment to-be-acked is not verified yet */
1164 ao_info = rcu_dereference(tcptw->ao_info);
1165 if (ao_info) {
1166 const struct tcp_ao_hdr *aoh;
1167
1168 /* Invalid TCP option size or twice included auth */
1169 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1170 goto out;
1171 if (aoh)
1172 key.ao_key = tcp_ao_established_key(ao_info,
1173 aoh->rnext_keyid, -1);
1174 }
1175 }
1176 if (key.ao_key) {
1177 struct tcp_ao_key *rnext_key;
1178
1179 key.traffic_key = snd_other_key(key.ao_key);
1180 /* rcv_next switches to our rcv_next */
1181 rnext_key = READ_ONCE(ao_info->rnext_key);
1182 key.rcv_next = rnext_key->rcvid;
1183 key.sne = READ_ONCE(ao_info->snd_sne);
1184 key.type = TCP_KEY_AO;
1185 #else
1186 if (0) {
1187 #endif
1188 #ifdef CONFIG_TCP_MD5SIG
1189 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1190 key.md5_key = tcp_twsk_md5_key(tcptw);
1191 if (key.md5_key)
1192 key.type = TCP_KEY_MD5;
1193 #endif
1194 }
1195
1196 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt,
1197 READ_ONCE(tcptw->tw_rcv_nxt),
1198 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1199 tcp_tw_tsval(tcptw),
1200 READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if,
1201 &key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel),
1202 tw->tw_priority, tw->tw_txhash);
1203
1204 #ifdef CONFIG_TCP_AO
1205 out:
1206 #endif
1207 inet_twsk_put(tw);
1208 }
1209
1210 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1211 struct request_sock *req)
1212 {
1213 struct tcp_key key = {};
1214
1215 #ifdef CONFIG_TCP_AO
1216 if (static_branch_unlikely(&tcp_ao_needed.key) &&
1217 tcp_rsk_used_ao(req)) {
1218 const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
1219 const struct tcp_ao_hdr *aoh;
1220 int l3index;
1221
1222 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1223 /* Invalid TCP option size or twice included auth */
1224 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1225 return;
1226 if (!aoh)
1227 return;
1228 key.ao_key = tcp_ao_do_lookup(sk, l3index,
1229 (union tcp_ao_addr *)addr,
1230 AF_INET6, aoh->rnext_keyid, -1);
1231 if (unlikely(!key.ao_key)) {
1232 /* Send ACK with any matching MKT for the peer */
1233 key.ao_key = tcp_ao_do_lookup(sk, l3index,
1234 (union tcp_ao_addr *)addr,
1235 AF_INET6, -1, -1);
1236 /* Matching key disappeared (user removed the key?)
1237 * let the handshake timeout.
1238 */
1239 if (!key.ao_key) {
1240 net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n",
1241 addr,
1242 ntohs(tcp_hdr(skb)->source),
1243 &ipv6_hdr(skb)->daddr,
1244 ntohs(tcp_hdr(skb)->dest));
1245 return;
1246 }
1247 }
1248 key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1249 if (!key.traffic_key)
1250 return;
1251
1252 key.type = TCP_KEY_AO;
1253 key.rcv_next = aoh->keyid;
1254 tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1255 #else
1256 if (0) {
1257 #endif
1258 #ifdef CONFIG_TCP_MD5SIG
1259 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1260 int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1261
1262 key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
1263 l3index);
1264 if (key.md5_key)
1265 key.type = TCP_KEY_MD5;
1266 #endif
1267 }
1268
1269 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1270 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1271 */
1272 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1273 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1274 tcp_rsk(req)->rcv_nxt,
1275 tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale,
1276 tcp_rsk_tsval(tcp_rsk(req)),
1277 READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
1278 &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
1279 READ_ONCE(sk->sk_priority),
1280 READ_ONCE(tcp_rsk(req)->txhash));
1281 if (tcp_key_is_ao(&key))
1282 kfree(key.traffic_key);
1283 }
1284
1285
1286 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1287 {
1288 #ifdef CONFIG_SYN_COOKIES
1289 const struct tcphdr *th = tcp_hdr(skb);
1290
1291 if (!th->syn)
1292 sk = cookie_v6_check(sk, skb);
1293 #endif
1294 return sk;
1295 }
1296
1297 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1298 struct tcphdr *th, u32 *cookie)
1299 {
1300 u16 mss = 0;
1301 #ifdef CONFIG_SYN_COOKIES
1302 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1303 &tcp_request_sock_ipv6_ops, sk, th);
1304 if (mss) {
1305 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1306 tcp_synq_overflow(sk);
1307 }
1308 #endif
1309 return mss;
1310 }
1311
1312 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1313 {
1314 if (skb->protocol == htons(ETH_P_IP))
1315 return tcp_v4_conn_request(sk, skb);
1316
1317 if (!ipv6_unicast_destination(skb))
1318 goto drop;
1319
1320 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1321 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1322 return 0;
1323 }
1324
1325 return tcp_conn_request(&tcp6_request_sock_ops,
1326 &tcp_request_sock_ipv6_ops, sk, skb);
1327
1328 drop:
1329 tcp_listendrop(sk);
1330 return 0; /* don't send reset */
1331 }
1332
1333 static void tcp_v6_restore_cb(struct sk_buff *skb)
1334 {
1335 /* We need to move header back to the beginning if xfrm6_policy_check()
1336 * and tcp_v6_fill_cb() are going to be called again.
1337 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1338 */
1339 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1340 sizeof(struct inet6_skb_parm));
1341 }
1342
1343 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1344 struct request_sock *req,
1345 struct dst_entry *dst,
1346 struct request_sock *req_unhash,
1347 bool *own_req)
1348 {
1349 struct inet_request_sock *ireq;
1350 struct ipv6_pinfo *newnp;
1351 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1352 struct ipv6_txoptions *opt;
1353 struct inet_sock *newinet;
1354 bool found_dup_sk = false;
1355 struct tcp_sock *newtp;
1356 struct sock *newsk;
1357 #ifdef CONFIG_TCP_MD5SIG
1358 struct tcp_md5sig_key *key;
1359 int l3index;
1360 #endif
1361 struct flowi6 fl6;
1362
1363 if (skb->protocol == htons(ETH_P_IP)) {
1364 /*
1365 * v6 mapped
1366 */
1367
1368 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1369 req_unhash, own_req);
1370
1371 if (!newsk)
1372 return NULL;
1373
1374 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1375
1376 newnp = tcp_inet6_sk(newsk);
1377 newtp = tcp_sk(newsk);
1378
1379 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1380
1381 newnp->saddr = newsk->sk_v6_rcv_saddr;
1382
1383 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1384 if (sk_is_mptcp(newsk))
1385 mptcpv6_handle_mapped(newsk, true);
1386 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1387 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
1388 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1389 #endif
1390
1391 newnp->ipv6_mc_list = NULL;
1392 newnp->ipv6_ac_list = NULL;
1393 newnp->ipv6_fl_list = NULL;
1394 newnp->pktoptions = NULL;
1395 newnp->opt = NULL;
1396 newnp->mcast_oif = inet_iif(skb);
1397 newnp->mcast_hops = ip_hdr(skb)->ttl;
1398 newnp->rcv_flowinfo = 0;
1399 if (inet6_test_bit(REPFLOW, sk))
1400 newnp->flow_label = 0;
1401
1402 /*
1403 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1404 * here, tcp_create_openreq_child now does this for us, see the comment in
1405 * that function for the gory details. -acme
1406 */
1407
1408 /* It is tricky place. Until this moment IPv4 tcp
1409 worked with IPv6 icsk.icsk_af_ops.
1410 Sync it now.
1411 */
1412 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1413
1414 return newsk;
1415 }
1416
1417 ireq = inet_rsk(req);
1418
1419 if (sk_acceptq_is_full(sk))
1420 goto out_overflow;
1421
1422 if (!dst) {
1423 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1424 if (!dst)
1425 goto out;
1426 }
1427
1428 newsk = tcp_create_openreq_child(sk, req, skb);
1429 if (!newsk)
1430 goto out_nonewsk;
1431
1432 /*
1433 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1434 * count here, tcp_create_openreq_child now does this for us, see the
1435 * comment in that function for the gory details. -acme
1436 */
1437
1438 newsk->sk_gso_type = SKB_GSO_TCPV6;
1439 inet6_sk_rx_dst_set(newsk, skb);
1440
1441 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
1442
1443 newtp = tcp_sk(newsk);
1444 newinet = inet_sk(newsk);
1445 newnp = tcp_inet6_sk(newsk);
1446
1447 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1448
1449 ip6_dst_store(newsk, dst, NULL, NULL);
1450
1451 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1452 newnp->saddr = ireq->ir_v6_loc_addr;
1453 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1454 newsk->sk_bound_dev_if = ireq->ir_iif;
1455
1456 /* Now IPv6 options...
1457
1458 First: no IPv4 options.
1459 */
1460 newinet->inet_opt = NULL;
1461 newnp->ipv6_mc_list = NULL;
1462 newnp->ipv6_ac_list = NULL;
1463 newnp->ipv6_fl_list = NULL;
1464
1465 /* Clone RX bits */
1466 newnp->rxopt.all = np->rxopt.all;
1467
1468 newnp->pktoptions = NULL;
1469 newnp->opt = NULL;
1470 newnp->mcast_oif = tcp_v6_iif(skb);
1471 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1472 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1473 if (inet6_test_bit(REPFLOW, sk))
1474 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1475
1476 /* Set ToS of the new socket based upon the value of incoming SYN.
1477 * ECT bits are set later in tcp_init_transfer().
1478 */
1479 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1480 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1481
1482 /* Clone native IPv6 options from listening socket (if any)
1483
1484 Yes, keeping reference count would be much more clever,
1485 but we make one more one thing there: reattach optmem
1486 to newsk.
1487 */
1488 opt = ireq->ipv6_opt;
1489 if (!opt)
1490 opt = rcu_dereference(np->opt);
1491 if (opt) {
1492 opt = ipv6_dup_options(newsk, opt);
1493 RCU_INIT_POINTER(newnp->opt, opt);
1494 }
1495 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1496 if (opt)
1497 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1498 opt->opt_flen;
1499
1500 tcp_ca_openreq_child(newsk, dst);
1501
1502 tcp_sync_mss(newsk, dst_mtu(dst));
1503 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1504
1505 tcp_initialize_rcv_mss(newsk);
1506
1507 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1508 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1509
1510 #ifdef CONFIG_TCP_MD5SIG
1511 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1512
1513 if (!tcp_rsk_used_ao(req)) {
1514 /* Copy over the MD5 key from the original socket */
1515 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1516 if (key) {
1517 const union tcp_md5_addr *addr;
1518
1519 addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr;
1520 if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) {
1521 inet_csk_prepare_forced_close(newsk);
1522 tcp_done(newsk);
1523 goto out;
1524 }
1525 }
1526 }
1527 #endif
1528 #ifdef CONFIG_TCP_AO
1529 /* Copy over tcp_ao_info if any */
1530 if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6))
1531 goto out; /* OOM */
1532 #endif
1533
1534 if (__inet_inherit_port(sk, newsk) < 0) {
1535 inet_csk_prepare_forced_close(newsk);
1536 tcp_done(newsk);
1537 goto out;
1538 }
1539 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1540 &found_dup_sk);
1541 if (*own_req) {
1542 tcp_move_syn(newtp, req);
1543
1544 /* Clone pktoptions received with SYN, if we own the req */
1545 if (ireq->pktopts) {
1546 newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
1547 consume_skb(ireq->pktopts);
1548 ireq->pktopts = NULL;
1549 if (newnp->pktoptions)
1550 tcp_v6_restore_cb(newnp->pktoptions);
1551 }
1552 } else {
1553 if (!req_unhash && found_dup_sk) {
1554 /* This code path should only be executed in the
1555 * syncookie case only
1556 */
1557 bh_unlock_sock(newsk);
1558 sock_put(newsk);
1559 newsk = NULL;
1560 }
1561 }
1562
1563 return newsk;
1564
1565 out_overflow:
1566 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1567 out_nonewsk:
1568 dst_release(dst);
1569 out:
1570 tcp_listendrop(sk);
1571 return NULL;
1572 }
1573
1574 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1575 u32));
1576 /* The socket must have it's spinlock held when we get
1577 * here, unless it is a TCP_LISTEN socket.
1578 *
1579 * We have a potential double-lock case here, so even when
1580 * doing backlog processing we use the BH locking scheme.
1581 * This is because we cannot sleep with the original spinlock
1582 * held.
1583 */
1584 INDIRECT_CALLABLE_SCOPE
1585 int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1586 {
1587 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1588 struct sk_buff *opt_skb = NULL;
1589 enum skb_drop_reason reason;
1590 struct tcp_sock *tp;
1591
1592 /* Imagine: socket is IPv6. IPv4 packet arrives,
1593 goes to IPv4 receive handler and backlogged.
1594 From backlog it always goes here. Kerboom...
1595 Fortunately, tcp_rcv_established and rcv_established
1596 handle them correctly, but it is not case with
1597 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1598 */
1599
1600 if (skb->protocol == htons(ETH_P_IP))
1601 return tcp_v4_do_rcv(sk, skb);
1602
1603 /*
1604 * socket locking is here for SMP purposes as backlog rcv
1605 * is currently called with bh processing disabled.
1606 */
1607
1608 /* Do Stevens' IPV6_PKTOPTIONS.
1609
1610 Yes, guys, it is the only place in our code, where we
1611 may make it not affecting IPv4.
1612 The rest of code is protocol independent,
1613 and I do not like idea to uglify IPv4.
1614
1615 Actually, all the idea behind IPV6_PKTOPTIONS
1616 looks not very well thought. For now we latch
1617 options, received in the last packet, enqueued
1618 by tcp. Feel free to propose better solution.
1619 --ANK (980728)
1620 */
1621 if (np->rxopt.all)
1622 opt_skb = skb_clone_and_charge_r(skb, sk);
1623
1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1625 struct dst_entry *dst;
1626
1627 dst = rcu_dereference_protected(sk->sk_rx_dst,
1628 lockdep_sock_is_held(sk));
1629
1630 sock_rps_save_rxhash(sk, skb);
1631 sk_mark_napi_id(sk, skb);
1632 if (dst) {
1633 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1634 INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1635 dst, sk->sk_rx_dst_cookie) == NULL) {
1636 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1637 dst_release(dst);
1638 }
1639 }
1640
1641 tcp_rcv_established(sk, skb);
1642 if (opt_skb)
1643 goto ipv6_pktoptions;
1644 return 0;
1645 }
1646
1647 if (tcp_checksum_complete(skb))
1648 goto csum_err;
1649
1650 if (sk->sk_state == TCP_LISTEN) {
1651 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1652
1653 if (nsk != sk) {
1654 if (nsk) {
1655 reason = tcp_child_process(sk, nsk, skb);
1656 if (reason)
1657 goto reset;
1658 }
1659 if (opt_skb)
1660 __kfree_skb(opt_skb);
1661 return 0;
1662 }
1663 } else
1664 sock_rps_save_rxhash(sk, skb);
1665
1666 reason = tcp_rcv_state_process(sk, skb);
1667 if (reason)
1668 goto reset;
1669 if (opt_skb)
1670 goto ipv6_pktoptions;
1671 return 0;
1672
1673 reset:
1674 tcp_v6_send_reset(sk, skb, sk_rst_convert_drop_reason(reason));
1675 discard:
1676 if (opt_skb)
1677 __kfree_skb(opt_skb);
1678 sk_skb_reason_drop(sk, skb, reason);
1679 return 0;
1680 csum_err:
1681 reason = SKB_DROP_REASON_TCP_CSUM;
1682 trace_tcp_bad_csum(skb);
1683 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1684 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1685 goto discard;
1686
1687
1688 ipv6_pktoptions:
1689 /* Do you ask, what is it?
1690
1691 1. skb was enqueued by tcp.
1692 2. skb is added to tail of read queue, rather than out of order.
1693 3. socket is not in passive state.
1694 4. Finally, it really contains options, which user wants to receive.
1695 */
1696 tp = tcp_sk(sk);
1697 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1698 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1699 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1700 WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb));
1701 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1702 WRITE_ONCE(np->mcast_hops,
1703 ipv6_hdr(opt_skb)->hop_limit);
1704 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1705 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1706 if (inet6_test_bit(REPFLOW, sk))
1707 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1708 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1709 tcp_v6_restore_cb(opt_skb);
1710 opt_skb = xchg(&np->pktoptions, opt_skb);
1711 } else {
1712 __kfree_skb(opt_skb);
1713 opt_skb = xchg(&np->pktoptions, NULL);
1714 }
1715 }
1716
1717 consume_skb(opt_skb);
1718 return 0;
1719 }
1720
1721 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1722 const struct tcphdr *th)
1723 {
1724 /* This is tricky: we move IP6CB at its correct location into
1725 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1726 * _decode_session6() uses IP6CB().
1727 * barrier() makes sure compiler won't play aliasing games.
1728 */
1729 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1730 sizeof(struct inet6_skb_parm));
1731 barrier();
1732
1733 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1734 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1735 skb->len - th->doff*4);
1736 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1737 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1738 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1739 TCP_SKB_CB(skb)->sacked = 0;
1740 TCP_SKB_CB(skb)->has_rxtstamp =
1741 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1742 }
1743
1744 INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
1745 {
1746 enum skb_drop_reason drop_reason;
1747 int sdif = inet6_sdif(skb);
1748 int dif = inet6_iif(skb);
1749 const struct tcphdr *th;
1750 const struct ipv6hdr *hdr;
1751 struct sock *sk = NULL;
1752 bool refcounted;
1753 int ret;
1754 u32 isn;
1755 struct net *net = dev_net(skb->dev);
1756
1757 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1758 if (skb->pkt_type != PACKET_HOST)
1759 goto discard_it;
1760
1761 /*
1762 * Count it even if it's bad.
1763 */
1764 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1765
1766 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1767 goto discard_it;
1768
1769 th = (const struct tcphdr *)skb->data;
1770
1771 if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
1772 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1773 goto bad_packet;
1774 }
1775 if (!pskb_may_pull(skb, th->doff*4))
1776 goto discard_it;
1777
1778 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1779 goto csum_error;
1780
1781 th = (const struct tcphdr *)skb->data;
1782 hdr = ipv6_hdr(skb);
1783
1784 lookup:
1785 sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th),
1786 th->source, th->dest, inet6_iif(skb), sdif,
1787 &refcounted);
1788 if (!sk)
1789 goto no_tcp_socket;
1790
1791 if (sk->sk_state == TCP_TIME_WAIT)
1792 goto do_time_wait;
1793
1794 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1795 struct request_sock *req = inet_reqsk(sk);
1796 bool req_stolen = false;
1797 struct sock *nsk;
1798
1799 sk = req->rsk_listener;
1800 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1801 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1802 else
1803 drop_reason = tcp_inbound_hash(sk, req, skb,
1804 &hdr->saddr, &hdr->daddr,
1805 AF_INET6, dif, sdif);
1806 if (drop_reason) {
1807 sk_drops_add(sk, skb);
1808 reqsk_put(req);
1809 goto discard_it;
1810 }
1811 if (tcp_checksum_complete(skb)) {
1812 reqsk_put(req);
1813 goto csum_error;
1814 }
1815 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1816 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1817 if (!nsk) {
1818 inet_csk_reqsk_queue_drop_and_put(sk, req);
1819 goto lookup;
1820 }
1821 sk = nsk;
1822 /* reuseport_migrate_sock() has already held one sk_refcnt
1823 * before returning.
1824 */
1825 } else {
1826 sock_hold(sk);
1827 }
1828 refcounted = true;
1829 nsk = NULL;
1830 if (!tcp_filter(sk, skb)) {
1831 th = (const struct tcphdr *)skb->data;
1832 hdr = ipv6_hdr(skb);
1833 tcp_v6_fill_cb(skb, hdr, th);
1834 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1835 } else {
1836 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1837 }
1838 if (!nsk) {
1839 reqsk_put(req);
1840 if (req_stolen) {
1841 /* Another cpu got exclusive access to req
1842 * and created a full blown socket.
1843 * Try to feed this packet to this socket
1844 * instead of discarding it.
1845 */
1846 tcp_v6_restore_cb(skb);
1847 sock_put(sk);
1848 goto lookup;
1849 }
1850 goto discard_and_relse;
1851 }
1852 nf_reset_ct(skb);
1853 if (nsk == sk) {
1854 reqsk_put(req);
1855 tcp_v6_restore_cb(skb);
1856 } else {
1857 drop_reason = tcp_child_process(sk, nsk, skb);
1858 if (drop_reason) {
1859 enum sk_rst_reason rst_reason;
1860
1861 rst_reason = sk_rst_convert_drop_reason(drop_reason);
1862 tcp_v6_send_reset(nsk, skb, rst_reason);
1863 goto discard_and_relse;
1864 }
1865 sock_put(sk);
1866 return 0;
1867 }
1868 }
1869
1870 process:
1871 if (static_branch_unlikely(&ip6_min_hopcount)) {
1872 /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1873 if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) {
1874 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1875 drop_reason = SKB_DROP_REASON_TCP_MINTTL;
1876 goto discard_and_relse;
1877 }
1878 }
1879
1880 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
1881 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1882 goto discard_and_relse;
1883 }
1884
1885 drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr,
1886 AF_INET6, dif, sdif);
1887 if (drop_reason)
1888 goto discard_and_relse;
1889
1890 nf_reset_ct(skb);
1891
1892 if (tcp_filter(sk, skb)) {
1893 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1894 goto discard_and_relse;
1895 }
1896 th = (const struct tcphdr *)skb->data;
1897 hdr = ipv6_hdr(skb);
1898 tcp_v6_fill_cb(skb, hdr, th);
1899
1900 skb->dev = NULL;
1901
1902 if (sk->sk_state == TCP_LISTEN) {
1903 ret = tcp_v6_do_rcv(sk, skb);
1904 goto put_and_return;
1905 }
1906
1907 sk_incoming_cpu_update(sk);
1908
1909 bh_lock_sock_nested(sk);
1910 tcp_segs_in(tcp_sk(sk), skb);
1911 ret = 0;
1912 if (!sock_owned_by_user(sk)) {
1913 ret = tcp_v6_do_rcv(sk, skb);
1914 } else {
1915 if (tcp_add_backlog(sk, skb, &drop_reason))
1916 goto discard_and_relse;
1917 }
1918 bh_unlock_sock(sk);
1919 put_and_return:
1920 if (refcounted)
1921 sock_put(sk);
1922 return ret ? -1 : 0;
1923
1924 no_tcp_socket:
1925 drop_reason = SKB_DROP_REASON_NO_SOCKET;
1926 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1927 goto discard_it;
1928
1929 tcp_v6_fill_cb(skb, hdr, th);
1930
1931 if (tcp_checksum_complete(skb)) {
1932 csum_error:
1933 drop_reason = SKB_DROP_REASON_TCP_CSUM;
1934 trace_tcp_bad_csum(skb);
1935 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1936 bad_packet:
1937 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1938 } else {
1939 tcp_v6_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason));
1940 }
1941
1942 discard_it:
1943 SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1944 sk_skb_reason_drop(sk, skb, drop_reason);
1945 return 0;
1946
1947 discard_and_relse:
1948 sk_drops_add(sk, skb);
1949 if (refcounted)
1950 sock_put(sk);
1951 goto discard_it;
1952
1953 do_time_wait:
1954 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1955 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1956 inet_twsk_put(inet_twsk(sk));
1957 goto discard_it;
1958 }
1959
1960 tcp_v6_fill_cb(skb, hdr, th);
1961
1962 if (tcp_checksum_complete(skb)) {
1963 inet_twsk_put(inet_twsk(sk));
1964 goto csum_error;
1965 }
1966
1967 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn)) {
1968 case TCP_TW_SYN:
1969 {
1970 struct sock *sk2;
1971
1972 sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
1973 skb, __tcp_hdrlen(th),
1974 &ipv6_hdr(skb)->saddr, th->source,
1975 &ipv6_hdr(skb)->daddr,
1976 ntohs(th->dest),
1977 tcp_v6_iif_l3_slave(skb),
1978 sdif);
1979 if (sk2) {
1980 struct inet_timewait_sock *tw = inet_twsk(sk);
1981 inet_twsk_deschedule_put(tw);
1982 sk = sk2;
1983 tcp_v6_restore_cb(skb);
1984 refcounted = false;
1985 __this_cpu_write(tcp_tw_isn, isn);
1986 goto process;
1987 }
1988 }
1989 /* to ACK */
1990 fallthrough;
1991 case TCP_TW_ACK:
1992 tcp_v6_timewait_ack(sk, skb);
1993 break;
1994 case TCP_TW_RST:
1995 tcp_v6_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET);
1996 inet_twsk_deschedule_put(inet_twsk(sk));
1997 goto discard_it;
1998 case TCP_TW_SUCCESS:
1999 ;
2000 }
2001 goto discard_it;
2002 }
2003
2004 void tcp_v6_early_demux(struct sk_buff *skb)
2005 {
2006 struct net *net = dev_net(skb->dev);
2007 const struct ipv6hdr *hdr;
2008 const struct tcphdr *th;
2009 struct sock *sk;
2010
2011 if (skb->pkt_type != PACKET_HOST)
2012 return;
2013
2014 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
2015 return;
2016
2017 hdr = ipv6_hdr(skb);
2018 th = tcp_hdr(skb);
2019
2020 if (th->doff < sizeof(struct tcphdr) / 4)
2021 return;
2022
2023 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
2024 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
2025 &hdr->saddr, th->source,
2026 &hdr->daddr, ntohs(th->dest),
2027 inet6_iif(skb), inet6_sdif(skb));
2028 if (sk) {
2029 skb->sk = sk;
2030 skb->destructor = sock_edemux;
2031 if (sk_fullsock(sk)) {
2032 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
2033
2034 if (dst)
2035 dst = dst_check(dst, sk->sk_rx_dst_cookie);
2036 if (dst &&
2037 sk->sk_rx_dst_ifindex == skb->skb_iif)
2038 skb_dst_set_noref(skb, dst);
2039 }
2040 }
2041 }
2042
2043 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
2044 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
2045 .twsk_destructor = tcp_twsk_destructor,
2046 };
2047
2048 INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
2049 {
2050 __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
2051 }
2052
2053 const struct inet_connection_sock_af_ops ipv6_specific = {
2054 .queue_xmit = inet6_csk_xmit,
2055 .send_check = tcp_v6_send_check,
2056 .rebuild_header = inet6_sk_rebuild_header,
2057 .sk_rx_dst_set = inet6_sk_rx_dst_set,
2058 .conn_request = tcp_v6_conn_request,
2059 .syn_recv_sock = tcp_v6_syn_recv_sock,
2060 .net_header_len = sizeof(struct ipv6hdr),
2061 .setsockopt = ipv6_setsockopt,
2062 .getsockopt = ipv6_getsockopt,
2063 .addr2sockaddr = inet6_csk_addr2sockaddr,
2064 .sockaddr_len = sizeof(struct sockaddr_in6),
2065 .mtu_reduced = tcp_v6_mtu_reduced,
2066 };
2067
2068 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2069 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
2070 #ifdef CONFIG_TCP_MD5SIG
2071 .md5_lookup = tcp_v6_md5_lookup,
2072 .calc_md5_hash = tcp_v6_md5_hash_skb,
2073 .md5_parse = tcp_v6_parse_md5_keys,
2074 #endif
2075 #ifdef CONFIG_TCP_AO
2076 .ao_lookup = tcp_v6_ao_lookup,
2077 .calc_ao_hash = tcp_v6_ao_hash_skb,
2078 .ao_parse = tcp_v6_parse_ao,
2079 .ao_calc_key_sk = tcp_v6_ao_calc_key_sk,
2080 #endif
2081 };
2082 #endif
2083
2084 /*
2085 * TCP over IPv4 via INET6 API
2086 */
2087 static const struct inet_connection_sock_af_ops ipv6_mapped = {
2088 .queue_xmit = ip_queue_xmit,
2089 .send_check = tcp_v4_send_check,
2090 .rebuild_header = inet_sk_rebuild_header,
2091 .sk_rx_dst_set = inet_sk_rx_dst_set,
2092 .conn_request = tcp_v6_conn_request,
2093 .syn_recv_sock = tcp_v6_syn_recv_sock,
2094 .net_header_len = sizeof(struct iphdr),
2095 .setsockopt = ipv6_setsockopt,
2096 .getsockopt = ipv6_getsockopt,
2097 .addr2sockaddr = inet6_csk_addr2sockaddr,
2098 .sockaddr_len = sizeof(struct sockaddr_in6),
2099 .mtu_reduced = tcp_v4_mtu_reduced,
2100 };
2101
2102 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2103 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
2104 #ifdef CONFIG_TCP_MD5SIG
2105 .md5_lookup = tcp_v4_md5_lookup,
2106 .calc_md5_hash = tcp_v4_md5_hash_skb,
2107 .md5_parse = tcp_v6_parse_md5_keys,
2108 #endif
2109 #ifdef CONFIG_TCP_AO
2110 .ao_lookup = tcp_v6_ao_lookup,
2111 .calc_ao_hash = tcp_v4_ao_hash_skb,
2112 .ao_parse = tcp_v6_parse_ao,
2113 .ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
2114 #endif
2115 };
2116 #endif
2117
2118 /* NOTE: A lot of things set to zero explicitly by call to
2119 * sk_alloc() so need not be done here.
2120 */
2121 static int tcp_v6_init_sock(struct sock *sk)
2122 {
2123 struct inet_connection_sock *icsk = inet_csk(sk);
2124
2125 tcp_init_sock(sk);
2126
2127 icsk->icsk_af_ops = &ipv6_specific;
2128
2129 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2130 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
2131 #endif
2132
2133 return 0;
2134 }
2135
2136 #ifdef CONFIG_PROC_FS
2137 /* Proc filesystem TCPv6 sock list dumping. */
2138 static void get_openreq6(struct seq_file *seq,
2139 const struct request_sock *req, int i)
2140 {
2141 long ttd = req->rsk_timer.expires - jiffies;
2142 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
2143 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
2144
2145 if (ttd < 0)
2146 ttd = 0;
2147
2148 seq_printf(seq,
2149 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2150 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
2151 i,
2152 src->s6_addr32[0], src->s6_addr32[1],
2153 src->s6_addr32[2], src->s6_addr32[3],
2154 inet_rsk(req)->ir_num,
2155 dest->s6_addr32[0], dest->s6_addr32[1],
2156 dest->s6_addr32[2], dest->s6_addr32[3],
2157 ntohs(inet_rsk(req)->ir_rmt_port),
2158 TCP_SYN_RECV,
2159 0, 0, /* could print option size, but that is af dependent. */
2160 1, /* timers active (only the expire timer) */
2161 jiffies_to_clock_t(ttd),
2162 req->num_timeout,
2163 from_kuid_munged(seq_user_ns(seq),
2164 sock_i_uid(req->rsk_listener)),
2165 0, /* non standard timer */
2166 0, /* open_requests have no inode */
2167 0, req);
2168 }
2169
2170 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2171 {
2172 const struct in6_addr *dest, *src;
2173 __u16 destp, srcp;
2174 int timer_active;
2175 unsigned long timer_expires;
2176 const struct inet_sock *inet = inet_sk(sp);
2177 const struct tcp_sock *tp = tcp_sk(sp);
2178 const struct inet_connection_sock *icsk = inet_csk(sp);
2179 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2180 int rx_queue;
2181 int state;
2182
2183 dest = &sp->sk_v6_daddr;
2184 src = &sp->sk_v6_rcv_saddr;
2185 destp = ntohs(inet->inet_dport);
2186 srcp = ntohs(inet->inet_sport);
2187
2188 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2189 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2190 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2191 timer_active = 1;
2192 timer_expires = icsk->icsk_timeout;
2193 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2194 timer_active = 4;
2195 timer_expires = icsk->icsk_timeout;
2196 } else if (timer_pending(&sp->sk_timer)) {
2197 timer_active = 2;
2198 timer_expires = sp->sk_timer.expires;
2199 } else {
2200 timer_active = 0;
2201 timer_expires = jiffies;
2202 }
2203
2204 state = inet_sk_state_load(sp);
2205 if (state == TCP_LISTEN)
2206 rx_queue = READ_ONCE(sp->sk_ack_backlog);
2207 else
2208 /* Because we don't lock the socket,
2209 * we might find a transient negative value.
2210 */
2211 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2212 READ_ONCE(tp->copied_seq), 0);
2213
2214 seq_printf(seq,
2215 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2216 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
2217 i,
2218 src->s6_addr32[0], src->s6_addr32[1],
2219 src->s6_addr32[2], src->s6_addr32[3], srcp,
2220 dest->s6_addr32[0], dest->s6_addr32[1],
2221 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2222 state,
2223 READ_ONCE(tp->write_seq) - tp->snd_una,
2224 rx_queue,
2225 timer_active,
2226 jiffies_delta_to_clock_t(timer_expires - jiffies),
2227 icsk->icsk_retransmits,
2228 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2229 icsk->icsk_probes_out,
2230 sock_i_ino(sp),
2231 refcount_read(&sp->sk_refcnt), sp,
2232 jiffies_to_clock_t(icsk->icsk_rto),
2233 jiffies_to_clock_t(icsk->icsk_ack.ato),
2234 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2235 tcp_snd_cwnd(tp),
2236 state == TCP_LISTEN ?
2237 fastopenq->max_qlen :
2238 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
2239 );
2240 }
2241
2242 static void get_timewait6_sock(struct seq_file *seq,
2243 struct inet_timewait_sock *tw, int i)
2244 {
2245 long delta = tw->tw_timer.expires - jiffies;
2246 const struct in6_addr *dest, *src;
2247 __u16 destp, srcp;
2248
2249 dest = &tw->tw_v6_daddr;
2250 src = &tw->tw_v6_rcv_saddr;
2251 destp = ntohs(tw->tw_dport);
2252 srcp = ntohs(tw->tw_sport);
2253
2254 seq_printf(seq,
2255 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2256 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2257 i,
2258 src->s6_addr32[0], src->s6_addr32[1],
2259 src->s6_addr32[2], src->s6_addr32[3], srcp,
2260 dest->s6_addr32[0], dest->s6_addr32[1],
2261 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2262 READ_ONCE(tw->tw_substate), 0, 0,
2263 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2264 refcount_read(&tw->tw_refcnt), tw);
2265 }
2266
2267 static int tcp6_seq_show(struct seq_file *seq, void *v)
2268 {
2269 struct tcp_iter_state *st;
2270 struct sock *sk = v;
2271
2272 if (v == SEQ_START_TOKEN) {
2273 seq_puts(seq,
2274 " sl "
2275 "local_address "
2276 "remote_address "
2277 "st tx_queue rx_queue tr tm->when retrnsmt"
2278 " uid timeout inode\n");
2279 goto out;
2280 }
2281 st = seq->private;
2282
2283 if (sk->sk_state == TCP_TIME_WAIT)
2284 get_timewait6_sock(seq, v, st->num);
2285 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2286 get_openreq6(seq, v, st->num);
2287 else
2288 get_tcp6_sock(seq, v, st->num);
2289 out:
2290 return 0;
2291 }
2292
2293 static const struct seq_operations tcp6_seq_ops = {
2294 .show = tcp6_seq_show,
2295 .start = tcp_seq_start,
2296 .next = tcp_seq_next,
2297 .stop = tcp_seq_stop,
2298 };
2299
2300 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2301 .family = AF_INET6,
2302 };
2303
2304 int __net_init tcp6_proc_init(struct net *net)
2305 {
2306 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2307 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2308 return -ENOMEM;
2309 return 0;
2310 }
2311
2312 void tcp6_proc_exit(struct net *net)
2313 {
2314 remove_proc_entry("tcp6", net->proc_net);
2315 }
2316 #endif
2317
2318 struct proto tcpv6_prot = {
2319 .name = "TCPv6",
2320 .owner = THIS_MODULE,
2321 .close = tcp_close,
2322 .pre_connect = tcp_v6_pre_connect,
2323 .connect = tcp_v6_connect,
2324 .disconnect = tcp_disconnect,
2325 .accept = inet_csk_accept,
2326 .ioctl = tcp_ioctl,
2327 .init = tcp_v6_init_sock,
2328 .destroy = tcp_v4_destroy_sock,
2329 .shutdown = tcp_shutdown,
2330 .setsockopt = tcp_setsockopt,
2331 .getsockopt = tcp_getsockopt,
2332 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
2333 .keepalive = tcp_set_keepalive,
2334 .recvmsg = tcp_recvmsg,
2335 .sendmsg = tcp_sendmsg,
2336 .splice_eof = tcp_splice_eof,
2337 .backlog_rcv = tcp_v6_do_rcv,
2338 .release_cb = tcp_release_cb,
2339 .hash = inet6_hash,
2340 .unhash = inet_unhash,
2341 .get_port = inet_csk_get_port,
2342 .put_port = inet_put_port,
2343 #ifdef CONFIG_BPF_SYSCALL
2344 .psock_update_sk_prot = tcp_bpf_update_proto,
2345 #endif
2346 .enter_memory_pressure = tcp_enter_memory_pressure,
2347 .leave_memory_pressure = tcp_leave_memory_pressure,
2348 .stream_memory_free = tcp_stream_memory_free,
2349 .sockets_allocated = &tcp_sockets_allocated,
2350
2351 .memory_allocated = &tcp_memory_allocated,
2352 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
2353
2354 .memory_pressure = &tcp_memory_pressure,
2355 .orphan_count = &tcp_orphan_count,
2356 .sysctl_mem = sysctl_tcp_mem,
2357 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2358 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2359 .max_header = MAX_TCP_HEADER,
2360 .obj_size = sizeof(struct tcp6_sock),
2361 .ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6),
2362 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2363 .twsk_prot = &tcp6_timewait_sock_ops,
2364 .rsk_prot = &tcp6_request_sock_ops,
2365 .h.hashinfo = NULL,
2366 .no_autobind = true,
2367 .diag_destroy = tcp_abort,
2368 };
2369 EXPORT_SYMBOL_GPL(tcpv6_prot);
2370
2371
2372 static struct inet_protosw tcpv6_protosw = {
2373 .type = SOCK_STREAM,
2374 .protocol = IPPROTO_TCP,
2375 .prot = &tcpv6_prot,
2376 .ops = &inet6_stream_ops,
2377 .flags = INET_PROTOSW_PERMANENT |
2378 INET_PROTOSW_ICSK,
2379 };
2380
2381 static int __net_init tcpv6_net_init(struct net *net)
2382 {
2383 int res;
2384
2385 res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2386 SOCK_RAW, IPPROTO_TCP, net);
2387 if (!res)
2388 net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC;
2389
2390 return res;
2391 }
2392
2393 static void __net_exit tcpv6_net_exit(struct net *net)
2394 {
2395 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2396 }
2397
2398 static struct pernet_operations tcpv6_net_ops = {
2399 .init = tcpv6_net_init,
2400 .exit = tcpv6_net_exit,
2401 };
2402
2403 int __init tcpv6_init(void)
2404 {
2405 int ret;
2406
2407 net_hotdata.tcpv6_protocol = (struct inet6_protocol) {
2408 .handler = tcp_v6_rcv,
2409 .err_handler = tcp_v6_err,
2410 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
2411 };
2412 ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2413 if (ret)
2414 goto out;
2415
2416 /* register inet6 protocol */
2417 ret = inet6_register_protosw(&tcpv6_protosw);
2418 if (ret)
2419 goto out_tcpv6_protocol;
2420
2421 ret = register_pernet_subsys(&tcpv6_net_ops);
2422 if (ret)
2423 goto out_tcpv6_protosw;
2424
2425 ret = mptcpv6_init();
2426 if (ret)
2427 goto out_tcpv6_pernet_subsys;
2428
2429 out:
2430 return ret;
2431
2432 out_tcpv6_pernet_subsys:
2433 unregister_pernet_subsys(&tcpv6_net_ops);
2434 out_tcpv6_protosw:
2435 inet6_unregister_protosw(&tcpv6_protosw);
2436 out_tcpv6_protocol:
2437 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2438 goto out;
2439 }
2440
2441 void tcpv6_exit(void)
2442 {
2443 unregister_pernet_subsys(&tcpv6_net_ops);
2444 inet6_unregister_protosw(&tcpv6_protosw);
2445 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
2446 }
2447