1 /* 2 * TCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: 9 * linux/net/ipv4/tcp.c 10 * linux/net/ipv4/tcp_input.c 11 * linux/net/ipv4/tcp_output.c 12 * 13 * Fixes: 14 * Hideaki YOSHIFUJI : sin6_scope_id support 15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 17 * a single port at the same time. 18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * as published by the Free Software Foundation; either version 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26 #include <linux/bottom_half.h> 27 #include <linux/module.h> 28 #include <linux/errno.h> 29 #include <linux/types.h> 30 #include <linux/socket.h> 31 #include <linux/sockios.h> 32 #include <linux/net.h> 33 #include <linux/jiffies.h> 34 #include <linux/in.h> 35 #include <linux/in6.h> 36 #include <linux/netdevice.h> 37 #include <linux/init.h> 38 #include <linux/jhash.h> 39 #include <linux/ipsec.h> 40 #include <linux/times.h> 41 #include <linux/slab.h> 42 43 #include <linux/ipv6.h> 44 #include <linux/icmpv6.h> 45 #include <linux/random.h> 46 47 #include <net/tcp.h> 48 #include <net/ndisc.h> 49 #include <net/inet6_hashtables.h> 50 #include <net/inet6_connection_sock.h> 51 #include <net/ipv6.h> 52 #include <net/transp_v6.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_route.h> 55 #include <net/ip6_checksum.h> 56 #include <net/inet_ecn.h> 57 #include <net/protocol.h> 58 #include <net/xfrm.h> 59 #include <net/snmp.h> 60 #include <net/dsfield.h> 61 #include <net/timewait_sock.h> 62 #include <net/netdma.h> 63 #include <net/inet_common.h> 64 #include <net/secure_seq.h> 65 66 #include <asm/uaccess.h> 67 68 #include <linux/proc_fs.h> 69 #include <linux/seq_file.h> 70 71 #include <linux/crypto.h> 72 #include <linux/scatterlist.h> 73 74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); 75 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 76 struct request_sock *req); 77 78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 79 static void __tcp_v6_send_check(struct sk_buff *skb, 80 const struct in6_addr *saddr, 81 const struct in6_addr *daddr); 82 83 static const struct inet_connection_sock_af_ops ipv6_mapped; 84 static const struct inet_connection_sock_af_ops ipv6_specific; 85 #ifdef CONFIG_TCP_MD5SIG 86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; 87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 88 #else 89 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 90 const struct in6_addr *addr) 91 { 92 return NULL; 93 } 94 #endif 95 96 static void tcp_v6_hash(struct sock *sk) 97 { 98 if (sk->sk_state != TCP_CLOSE) { 99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { 100 tcp_prot.hash(sk); 101 return; 102 } 103 local_bh_disable(); 104 __inet6_hash(sk, NULL); 105 local_bh_enable(); 106 } 107 } 108 109 static __inline__ __sum16 tcp_v6_check(int len, 110 const struct in6_addr *saddr, 111 const struct in6_addr *daddr, 112 __wsum base) 113 { 114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 115 } 116 117 static __u32 tcp_v6_init_sequence(struct sk_buff *skb) 118 { 119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 120 ipv6_hdr(skb)->saddr.s6_addr32, 121 tcp_hdr(skb)->dest, 122 tcp_hdr(skb)->source); 123 } 124 125 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 126 int addr_len) 127 { 128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 129 struct inet_sock *inet = inet_sk(sk); 130 struct inet_connection_sock *icsk = inet_csk(sk); 131 struct ipv6_pinfo *np = inet6_sk(sk); 132 struct tcp_sock *tp = tcp_sk(sk); 133 struct in6_addr *saddr = NULL, *final_p, final; 134 struct rt6_info *rt; 135 struct flowi6 fl6; 136 struct dst_entry *dst; 137 int addr_type; 138 int err; 139 140 if (addr_len < SIN6_LEN_RFC2133) 141 return -EINVAL; 142 143 if (usin->sin6_family != AF_INET6) 144 return -EAFNOSUPPORT; 145 146 memset(&fl6, 0, sizeof(fl6)); 147 148 if (np->sndflow) { 149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 150 IP6_ECN_flow_init(fl6.flowlabel); 151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { 152 struct ip6_flowlabel *flowlabel; 153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); 154 if (flowlabel == NULL) 155 return -EINVAL; 156 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); 157 fl6_sock_release(flowlabel); 158 } 159 } 160 161 /* 162 * connect() to INADDR_ANY means loopback (BSD'ism). 163 */ 164 165 if(ipv6_addr_any(&usin->sin6_addr)) 166 usin->sin6_addr.s6_addr[15] = 0x1; 167 168 addr_type = ipv6_addr_type(&usin->sin6_addr); 169 170 if(addr_type & IPV6_ADDR_MULTICAST) 171 return -ENETUNREACH; 172 173 if (addr_type&IPV6_ADDR_LINKLOCAL) { 174 if (addr_len >= sizeof(struct sockaddr_in6) && 175 usin->sin6_scope_id) { 176 /* If interface is set while binding, indices 177 * must coincide. 178 */ 179 if (sk->sk_bound_dev_if && 180 sk->sk_bound_dev_if != usin->sin6_scope_id) 181 return -EINVAL; 182 183 sk->sk_bound_dev_if = usin->sin6_scope_id; 184 } 185 186 /* Connect to link-local address requires an interface */ 187 if (!sk->sk_bound_dev_if) 188 return -EINVAL; 189 } 190 191 if (tp->rx_opt.ts_recent_stamp && 192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { 193 tp->rx_opt.ts_recent = 0; 194 tp->rx_opt.ts_recent_stamp = 0; 195 tp->write_seq = 0; 196 } 197 198 ipv6_addr_copy(&np->daddr, &usin->sin6_addr); 199 np->flow_label = fl6.flowlabel; 200 201 /* 202 * TCP over IPv4 203 */ 204 205 if (addr_type == IPV6_ADDR_MAPPED) { 206 u32 exthdrlen = icsk->icsk_ext_hdr_len; 207 struct sockaddr_in sin; 208 209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 210 211 if (__ipv6_only_sock(sk)) 212 return -ENETUNREACH; 213 214 sin.sin_family = AF_INET; 215 sin.sin_port = usin->sin6_port; 216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 217 218 icsk->icsk_af_ops = &ipv6_mapped; 219 sk->sk_backlog_rcv = tcp_v4_do_rcv; 220 #ifdef CONFIG_TCP_MD5SIG 221 tp->af_specific = &tcp_sock_ipv6_mapped_specific; 222 #endif 223 224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 225 226 if (err) { 227 icsk->icsk_ext_hdr_len = exthdrlen; 228 icsk->icsk_af_ops = &ipv6_specific; 229 sk->sk_backlog_rcv = tcp_v6_do_rcv; 230 #ifdef CONFIG_TCP_MD5SIG 231 tp->af_specific = &tcp_sock_ipv6_specific; 232 #endif 233 goto failure; 234 } else { 235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); 236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, 237 &np->rcv_saddr); 238 } 239 240 return err; 241 } 242 243 if (!ipv6_addr_any(&np->rcv_saddr)) 244 saddr = &np->rcv_saddr; 245 246 fl6.flowi6_proto = IPPROTO_TCP; 247 ipv6_addr_copy(&fl6.daddr, &np->daddr); 248 ipv6_addr_copy(&fl6.saddr, 249 (saddr ? saddr : &np->saddr)); 250 fl6.flowi6_oif = sk->sk_bound_dev_if; 251 fl6.flowi6_mark = sk->sk_mark; 252 fl6.fl6_dport = usin->sin6_port; 253 fl6.fl6_sport = inet->inet_sport; 254 255 final_p = fl6_update_dst(&fl6, np->opt, &final); 256 257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 258 259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); 260 if (IS_ERR(dst)) { 261 err = PTR_ERR(dst); 262 goto failure; 263 } 264 265 if (saddr == NULL) { 266 saddr = &fl6.saddr; 267 ipv6_addr_copy(&np->rcv_saddr, saddr); 268 } 269 270 /* set the source address */ 271 ipv6_addr_copy(&np->saddr, saddr); 272 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 273 274 sk->sk_gso_type = SKB_GSO_TCPV6; 275 __ip6_dst_store(sk, dst, NULL, NULL); 276 277 rt = (struct rt6_info *) dst; 278 if (tcp_death_row.sysctl_tw_recycle && 279 !tp->rx_opt.ts_recent_stamp && 280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) { 281 struct inet_peer *peer = rt6_get_peer(rt); 282 /* 283 * VJ's idea. We save last timestamp seen from 284 * the destination in peer table, when entering state 285 * TIME-WAIT * and initialize rx_opt.ts_recent from it, 286 * when trying new connection. 287 */ 288 if (peer) { 289 inet_peer_refcheck(peer); 290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { 291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; 292 tp->rx_opt.ts_recent = peer->tcp_ts; 293 } 294 } 295 } 296 297 icsk->icsk_ext_hdr_len = 0; 298 if (np->opt) 299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 300 np->opt->opt_nflen); 301 302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 303 304 inet->inet_dport = usin->sin6_port; 305 306 tcp_set_state(sk, TCP_SYN_SENT); 307 err = inet6_hash_connect(&tcp_death_row, sk); 308 if (err) 309 goto late_failure; 310 311 if (!tp->write_seq) 312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 313 np->daddr.s6_addr32, 314 inet->inet_sport, 315 inet->inet_dport); 316 317 err = tcp_connect(sk); 318 if (err) 319 goto late_failure; 320 321 return 0; 322 323 late_failure: 324 tcp_set_state(sk, TCP_CLOSE); 325 __sk_dst_reset(sk); 326 failure: 327 inet->inet_dport = 0; 328 sk->sk_route_caps = 0; 329 return err; 330 } 331 332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 333 u8 type, u8 code, int offset, __be32 info) 334 { 335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data; 336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 337 struct ipv6_pinfo *np; 338 struct sock *sk; 339 int err; 340 struct tcp_sock *tp; 341 __u32 seq; 342 struct net *net = dev_net(skb->dev); 343 344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr, 345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex); 346 347 if (sk == NULL) { 348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), 349 ICMP6_MIB_INERRORS); 350 return; 351 } 352 353 if (sk->sk_state == TCP_TIME_WAIT) { 354 inet_twsk_put(inet_twsk(sk)); 355 return; 356 } 357 358 bh_lock_sock(sk); 359 if (sock_owned_by_user(sk)) 360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 361 362 if (sk->sk_state == TCP_CLOSE) 363 goto out; 364 365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { 366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 367 goto out; 368 } 369 370 tp = tcp_sk(sk); 371 seq = ntohl(th->seq); 372 if (sk->sk_state != TCP_LISTEN && 373 !between(seq, tp->snd_una, tp->snd_nxt)) { 374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 375 goto out; 376 } 377 378 np = inet6_sk(sk); 379 380 if (type == ICMPV6_PKT_TOOBIG) { 381 struct dst_entry *dst; 382 383 if (sock_owned_by_user(sk)) 384 goto out; 385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 386 goto out; 387 388 /* icmp should have updated the destination cache entry */ 389 dst = __sk_dst_check(sk, np->dst_cookie); 390 391 if (dst == NULL) { 392 struct inet_sock *inet = inet_sk(sk); 393 struct flowi6 fl6; 394 395 /* BUGGG_FUTURE: Again, it is not clear how 396 to handle rthdr case. Ignore this complexity 397 for now. 398 */ 399 memset(&fl6, 0, sizeof(fl6)); 400 fl6.flowi6_proto = IPPROTO_TCP; 401 ipv6_addr_copy(&fl6.daddr, &np->daddr); 402 ipv6_addr_copy(&fl6.saddr, &np->saddr); 403 fl6.flowi6_oif = sk->sk_bound_dev_if; 404 fl6.flowi6_mark = sk->sk_mark; 405 fl6.fl6_dport = inet->inet_dport; 406 fl6.fl6_sport = inet->inet_sport; 407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 408 409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false); 410 if (IS_ERR(dst)) { 411 sk->sk_err_soft = -PTR_ERR(dst); 412 goto out; 413 } 414 415 } else 416 dst_hold(dst); 417 418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 419 tcp_sync_mss(sk, dst_mtu(dst)); 420 tcp_simple_retransmit(sk); 421 } /* else let the usual retransmit timer handle it */ 422 dst_release(dst); 423 goto out; 424 } 425 426 icmpv6_err_convert(type, code, &err); 427 428 /* Might be for an request_sock */ 429 switch (sk->sk_state) { 430 struct request_sock *req, **prev; 431 case TCP_LISTEN: 432 if (sock_owned_by_user(sk)) 433 goto out; 434 435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, 436 &hdr->saddr, inet6_iif(skb)); 437 if (!req) 438 goto out; 439 440 /* ICMPs are not backlogged, hence we cannot get 441 * an established socket here. 442 */ 443 WARN_ON(req->sk != NULL); 444 445 if (seq != tcp_rsk(req)->snt_isn) { 446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 447 goto out; 448 } 449 450 inet_csk_reqsk_queue_drop(sk, req, prev); 451 goto out; 452 453 case TCP_SYN_SENT: 454 case TCP_SYN_RECV: /* Cannot happen. 455 It can, it SYNs are crossed. --ANK */ 456 if (!sock_owned_by_user(sk)) { 457 sk->sk_err = err; 458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 459 460 tcp_done(sk); 461 } else 462 sk->sk_err_soft = err; 463 goto out; 464 } 465 466 if (!sock_owned_by_user(sk) && np->recverr) { 467 sk->sk_err = err; 468 sk->sk_error_report(sk); 469 } else 470 sk->sk_err_soft = err; 471 472 out: 473 bh_unlock_sock(sk); 474 sock_put(sk); 475 } 476 477 478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 479 struct request_values *rvp) 480 { 481 struct inet6_request_sock *treq = inet6_rsk(req); 482 struct ipv6_pinfo *np = inet6_sk(sk); 483 struct sk_buff * skb; 484 struct ipv6_txoptions *opt = NULL; 485 struct in6_addr * final_p, final; 486 struct flowi6 fl6; 487 struct dst_entry *dst; 488 int err; 489 490 memset(&fl6, 0, sizeof(fl6)); 491 fl6.flowi6_proto = IPPROTO_TCP; 492 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); 493 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr); 494 fl6.flowlabel = 0; 495 fl6.flowi6_oif = treq->iif; 496 fl6.flowi6_mark = sk->sk_mark; 497 fl6.fl6_dport = inet_rsk(req)->rmt_port; 498 fl6.fl6_sport = inet_rsk(req)->loc_port; 499 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 500 501 opt = np->opt; 502 final_p = fl6_update_dst(&fl6, opt, &final); 503 504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 505 if (IS_ERR(dst)) { 506 err = PTR_ERR(dst); 507 dst = NULL; 508 goto done; 509 } 510 skb = tcp_make_synack(sk, dst, req, rvp); 511 err = -ENOMEM; 512 if (skb) { 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); 516 err = ip6_xmit(sk, skb, &fl6, opt); 517 err = net_xmit_eval(err); 518 } 519 520 done: 521 if (opt && opt != np->opt) 522 sock_kfree_s(sk, opt, opt->tot_len); 523 dst_release(dst); 524 return err; 525 } 526 527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, 528 struct request_values *rvp) 529 { 530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 531 return tcp_v6_send_synack(sk, req, rvp); 532 } 533 534 static void tcp_v6_reqsk_destructor(struct request_sock *req) 535 { 536 kfree_skb(inet6_rsk(req)->pktopts); 537 } 538 539 #ifdef CONFIG_TCP_MD5SIG 540 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 541 const struct in6_addr *addr) 542 { 543 struct tcp_sock *tp = tcp_sk(sk); 544 int i; 545 546 BUG_ON(tp == NULL); 547 548 if (!tp->md5sig_info || !tp->md5sig_info->entries6) 549 return NULL; 550 551 for (i = 0; i < tp->md5sig_info->entries6; i++) { 552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr)) 553 return &tp->md5sig_info->keys6[i].base; 554 } 555 return NULL; 556 } 557 558 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk, 559 struct sock *addr_sk) 560 { 561 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr); 562 } 563 564 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, 565 struct request_sock *req) 566 { 567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 568 } 569 570 static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, 571 char *newkey, u8 newkeylen) 572 { 573 /* Add key to the list */ 574 struct tcp_md5sig_key *key; 575 struct tcp_sock *tp = tcp_sk(sk); 576 struct tcp6_md5sig_key *keys; 577 578 key = tcp_v6_md5_do_lookup(sk, peer); 579 if (key) { 580 /* modify existing entry - just update that one */ 581 kfree(key->key); 582 key->key = newkey; 583 key->keylen = newkeylen; 584 } else { 585 /* reallocate new list if current one is full. */ 586 if (!tp->md5sig_info) { 587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC); 588 if (!tp->md5sig_info) { 589 kfree(newkey); 590 return -ENOMEM; 591 } 592 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 593 } 594 if (tp->md5sig_info->entries6 == 0 && 595 tcp_alloc_md5sig_pool(sk) == NULL) { 596 kfree(newkey); 597 return -ENOMEM; 598 } 599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { 600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * 601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); 602 603 if (!keys) { 604 kfree(newkey); 605 if (tp->md5sig_info->entries6 == 0) 606 tcp_free_md5sig_pool(); 607 return -ENOMEM; 608 } 609 610 if (tp->md5sig_info->entries6) 611 memmove(keys, tp->md5sig_info->keys6, 612 (sizeof (tp->md5sig_info->keys6[0]) * 613 tp->md5sig_info->entries6)); 614 615 kfree(tp->md5sig_info->keys6); 616 tp->md5sig_info->keys6 = keys; 617 tp->md5sig_info->alloced6++; 618 } 619 620 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, 621 peer); 622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; 623 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; 624 625 tp->md5sig_info->entries6++; 626 } 627 return 0; 628 } 629 630 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk, 631 u8 *newkey, __u8 newkeylen) 632 { 633 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr, 634 newkey, newkeylen); 635 } 636 637 static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) 638 { 639 struct tcp_sock *tp = tcp_sk(sk); 640 int i; 641 642 for (i = 0; i < tp->md5sig_info->entries6; i++) { 643 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) { 644 /* Free the key */ 645 kfree(tp->md5sig_info->keys6[i].base.key); 646 tp->md5sig_info->entries6--; 647 648 if (tp->md5sig_info->entries6 == 0) { 649 kfree(tp->md5sig_info->keys6); 650 tp->md5sig_info->keys6 = NULL; 651 tp->md5sig_info->alloced6 = 0; 652 tcp_free_md5sig_pool(); 653 } else { 654 /* shrink the database */ 655 if (tp->md5sig_info->entries6 != i) 656 memmove(&tp->md5sig_info->keys6[i], 657 &tp->md5sig_info->keys6[i+1], 658 (tp->md5sig_info->entries6 - i) 659 * sizeof (tp->md5sig_info->keys6[0])); 660 } 661 return 0; 662 } 663 } 664 return -ENOENT; 665 } 666 667 static void tcp_v6_clear_md5_list (struct sock *sk) 668 { 669 struct tcp_sock *tp = tcp_sk(sk); 670 int i; 671 672 if (tp->md5sig_info->entries6) { 673 for (i = 0; i < tp->md5sig_info->entries6; i++) 674 kfree(tp->md5sig_info->keys6[i].base.key); 675 tp->md5sig_info->entries6 = 0; 676 tcp_free_md5sig_pool(); 677 } 678 679 kfree(tp->md5sig_info->keys6); 680 tp->md5sig_info->keys6 = NULL; 681 tp->md5sig_info->alloced6 = 0; 682 683 if (tp->md5sig_info->entries4) { 684 for (i = 0; i < tp->md5sig_info->entries4; i++) 685 kfree(tp->md5sig_info->keys4[i].base.key); 686 tp->md5sig_info->entries4 = 0; 687 tcp_free_md5sig_pool(); 688 } 689 690 kfree(tp->md5sig_info->keys4); 691 tp->md5sig_info->keys4 = NULL; 692 tp->md5sig_info->alloced4 = 0; 693 } 694 695 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, 696 int optlen) 697 { 698 struct tcp_md5sig cmd; 699 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; 700 u8 *newkey; 701 702 if (optlen < sizeof(cmd)) 703 return -EINVAL; 704 705 if (copy_from_user(&cmd, optval, sizeof(cmd))) 706 return -EFAULT; 707 708 if (sin6->sin6_family != AF_INET6) 709 return -EINVAL; 710 711 if (!cmd.tcpm_keylen) { 712 if (!tcp_sk(sk)->md5sig_info) 713 return -ENOENT; 714 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) 715 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); 716 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); 717 } 718 719 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) 720 return -EINVAL; 721 722 if (!tcp_sk(sk)->md5sig_info) { 723 struct tcp_sock *tp = tcp_sk(sk); 724 struct tcp_md5sig_info *p; 725 726 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL); 727 if (!p) 728 return -ENOMEM; 729 730 tp->md5sig_info = p; 731 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 732 } 733 734 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); 735 if (!newkey) 736 return -ENOMEM; 737 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { 738 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3], 739 newkey, cmd.tcpm_keylen); 740 } 741 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen); 742 } 743 744 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 745 const struct in6_addr *daddr, 746 const struct in6_addr *saddr, int nbytes) 747 { 748 struct tcp6_pseudohdr *bp; 749 struct scatterlist sg; 750 751 bp = &hp->md5_blk.ip6; 752 /* 1. TCP pseudo-header (RFC2460) */ 753 ipv6_addr_copy(&bp->saddr, saddr); 754 ipv6_addr_copy(&bp->daddr, daddr); 755 bp->protocol = cpu_to_be32(IPPROTO_TCP); 756 bp->len = cpu_to_be32(nbytes); 757 758 sg_init_one(&sg, bp, sizeof(*bp)); 759 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); 760 } 761 762 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 763 const struct in6_addr *daddr, struct in6_addr *saddr, 764 struct tcphdr *th) 765 { 766 struct tcp_md5sig_pool *hp; 767 struct hash_desc *desc; 768 769 hp = tcp_get_md5sig_pool(); 770 if (!hp) 771 goto clear_hash_noput; 772 desc = &hp->md5_desc; 773 774 if (crypto_hash_init(desc)) 775 goto clear_hash; 776 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2)) 777 goto clear_hash; 778 if (tcp_md5_hash_header(hp, th)) 779 goto clear_hash; 780 if (tcp_md5_hash_key(hp, key)) 781 goto clear_hash; 782 if (crypto_hash_final(desc, md5_hash)) 783 goto clear_hash; 784 785 tcp_put_md5sig_pool(); 786 return 0; 787 788 clear_hash: 789 tcp_put_md5sig_pool(); 790 clear_hash_noput: 791 memset(md5_hash, 0, 16); 792 return 1; 793 } 794 795 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, 796 struct sock *sk, struct request_sock *req, 797 struct sk_buff *skb) 798 { 799 const struct in6_addr *saddr, *daddr; 800 struct tcp_md5sig_pool *hp; 801 struct hash_desc *desc; 802 struct tcphdr *th = tcp_hdr(skb); 803 804 if (sk) { 805 saddr = &inet6_sk(sk)->saddr; 806 daddr = &inet6_sk(sk)->daddr; 807 } else if (req) { 808 saddr = &inet6_rsk(req)->loc_addr; 809 daddr = &inet6_rsk(req)->rmt_addr; 810 } else { 811 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 812 saddr = &ip6h->saddr; 813 daddr = &ip6h->daddr; 814 } 815 816 hp = tcp_get_md5sig_pool(); 817 if (!hp) 818 goto clear_hash_noput; 819 desc = &hp->md5_desc; 820 821 if (crypto_hash_init(desc)) 822 goto clear_hash; 823 824 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) 825 goto clear_hash; 826 if (tcp_md5_hash_header(hp, th)) 827 goto clear_hash; 828 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) 829 goto clear_hash; 830 if (tcp_md5_hash_key(hp, key)) 831 goto clear_hash; 832 if (crypto_hash_final(desc, md5_hash)) 833 goto clear_hash; 834 835 tcp_put_md5sig_pool(); 836 return 0; 837 838 clear_hash: 839 tcp_put_md5sig_pool(); 840 clear_hash_noput: 841 memset(md5_hash, 0, 16); 842 return 1; 843 } 844 845 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 846 { 847 __u8 *hash_location = NULL; 848 struct tcp_md5sig_key *hash_expected; 849 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 850 struct tcphdr *th = tcp_hdr(skb); 851 int genhash; 852 u8 newhash[16]; 853 854 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); 855 hash_location = tcp_parse_md5sig_option(th); 856 857 /* We've parsed the options - do we have a hash? */ 858 if (!hash_expected && !hash_location) 859 return 0; 860 861 if (hash_expected && !hash_location) { 862 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 863 return 1; 864 } 865 866 if (!hash_expected && hash_location) { 867 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 868 return 1; 869 } 870 871 /* check the signature */ 872 genhash = tcp_v6_md5_hash_skb(newhash, 873 hash_expected, 874 NULL, NULL, skb); 875 876 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 877 if (net_ratelimit()) { 878 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", 879 genhash ? "failed" : "mismatch", 880 &ip6h->saddr, ntohs(th->source), 881 &ip6h->daddr, ntohs(th->dest)); 882 } 883 return 1; 884 } 885 return 0; 886 } 887 #endif 888 889 struct request_sock_ops tcp6_request_sock_ops __read_mostly = { 890 .family = AF_INET6, 891 .obj_size = sizeof(struct tcp6_request_sock), 892 .rtx_syn_ack = tcp_v6_rtx_synack, 893 .send_ack = tcp_v6_reqsk_send_ack, 894 .destructor = tcp_v6_reqsk_destructor, 895 .send_reset = tcp_v6_send_reset, 896 .syn_ack_timeout = tcp_syn_ack_timeout, 897 }; 898 899 #ifdef CONFIG_TCP_MD5SIG 900 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { 901 .md5_lookup = tcp_v6_reqsk_md5_lookup, 902 .calc_md5_hash = tcp_v6_md5_hash_skb, 903 }; 904 #endif 905 906 static void __tcp_v6_send_check(struct sk_buff *skb, 907 const struct in6_addr *saddr, const struct in6_addr *daddr) 908 { 909 struct tcphdr *th = tcp_hdr(skb); 910 911 if (skb->ip_summed == CHECKSUM_PARTIAL) { 912 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0); 913 skb->csum_start = skb_transport_header(skb) - skb->head; 914 skb->csum_offset = offsetof(struct tcphdr, check); 915 } else { 916 th->check = tcp_v6_check(skb->len, saddr, daddr, 917 csum_partial(th, th->doff << 2, 918 skb->csum)); 919 } 920 } 921 922 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) 923 { 924 struct ipv6_pinfo *np = inet6_sk(sk); 925 926 __tcp_v6_send_check(skb, &np->saddr, &np->daddr); 927 } 928 929 static int tcp_v6_gso_send_check(struct sk_buff *skb) 930 { 931 const struct ipv6hdr *ipv6h; 932 struct tcphdr *th; 933 934 if (!pskb_may_pull(skb, sizeof(*th))) 935 return -EINVAL; 936 937 ipv6h = ipv6_hdr(skb); 938 th = tcp_hdr(skb); 939 940 th->check = 0; 941 skb->ip_summed = CHECKSUM_PARTIAL; 942 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr); 943 return 0; 944 } 945 946 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, 947 struct sk_buff *skb) 948 { 949 const struct ipv6hdr *iph = skb_gro_network_header(skb); 950 951 switch (skb->ip_summed) { 952 case CHECKSUM_COMPLETE: 953 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, 954 skb->csum)) { 955 skb->ip_summed = CHECKSUM_UNNECESSARY; 956 break; 957 } 958 959 /* fall through */ 960 case CHECKSUM_NONE: 961 NAPI_GRO_CB(skb)->flush = 1; 962 return NULL; 963 } 964 965 return tcp_gro_receive(head, skb); 966 } 967 968 static int tcp6_gro_complete(struct sk_buff *skb) 969 { 970 const struct ipv6hdr *iph = ipv6_hdr(skb); 971 struct tcphdr *th = tcp_hdr(skb); 972 973 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 974 &iph->saddr, &iph->daddr, 0); 975 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 976 977 return tcp_gro_complete(skb); 978 } 979 980 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 981 u32 ts, struct tcp_md5sig_key *key, int rst) 982 { 983 struct tcphdr *th = tcp_hdr(skb), *t1; 984 struct sk_buff *buff; 985 struct flowi6 fl6; 986 struct net *net = dev_net(skb_dst(skb)->dev); 987 struct sock *ctl_sk = net->ipv6.tcp_sk; 988 unsigned int tot_len = sizeof(struct tcphdr); 989 struct dst_entry *dst; 990 __be32 *topt; 991 992 if (ts) 993 tot_len += TCPOLEN_TSTAMP_ALIGNED; 994 #ifdef CONFIG_TCP_MD5SIG 995 if (key) 996 tot_len += TCPOLEN_MD5SIG_ALIGNED; 997 #endif 998 999 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 1000 GFP_ATOMIC); 1001 if (buff == NULL) 1002 return; 1003 1004 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1005 1006 t1 = (struct tcphdr *) skb_push(buff, tot_len); 1007 skb_reset_transport_header(buff); 1008 1009 /* Swap the send and the receive. */ 1010 memset(t1, 0, sizeof(*t1)); 1011 t1->dest = th->source; 1012 t1->source = th->dest; 1013 t1->doff = tot_len / 4; 1014 t1->seq = htonl(seq); 1015 t1->ack_seq = htonl(ack); 1016 t1->ack = !rst || !th->ack; 1017 t1->rst = rst; 1018 t1->window = htons(win); 1019 1020 topt = (__be32 *)(t1 + 1); 1021 1022 if (ts) { 1023 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1024 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 1025 *topt++ = htonl(tcp_time_stamp); 1026 *topt++ = htonl(ts); 1027 } 1028 1029 #ifdef CONFIG_TCP_MD5SIG 1030 if (key) { 1031 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 1032 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 1033 tcp_v6_md5_hash_hdr((__u8 *)topt, key, 1034 &ipv6_hdr(skb)->saddr, 1035 &ipv6_hdr(skb)->daddr, t1); 1036 } 1037 #endif 1038 1039 memset(&fl6, 0, sizeof(fl6)); 1040 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr); 1041 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr); 1042 1043 buff->ip_summed = CHECKSUM_PARTIAL; 1044 buff->csum = 0; 1045 1046 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); 1047 1048 fl6.flowi6_proto = IPPROTO_TCP; 1049 fl6.flowi6_oif = inet6_iif(skb); 1050 fl6.fl6_dport = t1->dest; 1051 fl6.fl6_sport = t1->source; 1052 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 1053 1054 /* Pass a socket to ip6_dst_lookup either it is for RST 1055 * Underlying function will use this to retrieve the network 1056 * namespace 1057 */ 1058 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 1059 if (!IS_ERR(dst)) { 1060 skb_dst_set(buff, dst); 1061 ip6_xmit(ctl_sk, buff, &fl6, NULL); 1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1063 if (rst) 1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1065 return; 1066 } 1067 1068 kfree_skb(buff); 1069 } 1070 1071 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 1072 { 1073 struct tcphdr *th = tcp_hdr(skb); 1074 u32 seq = 0, ack_seq = 0; 1075 struct tcp_md5sig_key *key = NULL; 1076 1077 if (th->rst) 1078 return; 1079 1080 if (!ipv6_unicast_destination(skb)) 1081 return; 1082 1083 #ifdef CONFIG_TCP_MD5SIG 1084 if (sk) 1085 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); 1086 #endif 1087 1088 if (th->ack) 1089 seq = ntohl(th->ack_seq); 1090 else 1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 1092 (th->doff << 2); 1093 1094 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1); 1095 } 1096 1097 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 1098 struct tcp_md5sig_key *key) 1099 { 1100 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0); 1101 } 1102 1103 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 1104 { 1105 struct inet_timewait_sock *tw = inet_twsk(sk); 1106 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 1107 1108 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1110 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw)); 1111 1112 inet_twsk_put(tw); 1113 } 1114 1115 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, 1116 struct request_sock *req) 1117 { 1118 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 1119 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr)); 1120 } 1121 1122 1123 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 1124 { 1125 struct request_sock *req, **prev; 1126 const struct tcphdr *th = tcp_hdr(skb); 1127 struct sock *nsk; 1128 1129 /* Find possible connection requests. */ 1130 req = inet6_csk_search_req(sk, &prev, th->source, 1131 &ipv6_hdr(skb)->saddr, 1132 &ipv6_hdr(skb)->daddr, inet6_iif(skb)); 1133 if (req) 1134 return tcp_check_req(sk, skb, req, prev); 1135 1136 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 1137 &ipv6_hdr(skb)->saddr, th->source, 1138 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb)); 1139 1140 if (nsk) { 1141 if (nsk->sk_state != TCP_TIME_WAIT) { 1142 bh_lock_sock(nsk); 1143 return nsk; 1144 } 1145 inet_twsk_put(inet_twsk(nsk)); 1146 return NULL; 1147 } 1148 1149 #ifdef CONFIG_SYN_COOKIES 1150 if (!th->syn) 1151 sk = cookie_v6_check(sk, skb); 1152 #endif 1153 return sk; 1154 } 1155 1156 /* FIXME: this is substantially similar to the ipv4 code. 1157 * Can some kind of merge be done? -- erics 1158 */ 1159 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 1160 { 1161 struct tcp_extend_values tmp_ext; 1162 struct tcp_options_received tmp_opt; 1163 u8 *hash_location; 1164 struct request_sock *req; 1165 struct inet6_request_sock *treq; 1166 struct ipv6_pinfo *np = inet6_sk(sk); 1167 struct tcp_sock *tp = tcp_sk(sk); 1168 __u32 isn = TCP_SKB_CB(skb)->when; 1169 struct dst_entry *dst = NULL; 1170 int want_cookie = 0; 1171 1172 if (skb->protocol == htons(ETH_P_IP)) 1173 return tcp_v4_conn_request(sk, skb); 1174 1175 if (!ipv6_unicast_destination(skb)) 1176 goto drop; 1177 1178 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1179 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); 1180 if (!want_cookie) 1181 goto drop; 1182 } 1183 1184 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1185 goto drop; 1186 1187 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 1188 if (req == NULL) 1189 goto drop; 1190 1191 #ifdef CONFIG_TCP_MD5SIG 1192 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops; 1193 #endif 1194 1195 tcp_clear_options(&tmp_opt); 1196 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 1197 tmp_opt.user_mss = tp->rx_opt.user_mss; 1198 tcp_parse_options(skb, &tmp_opt, &hash_location, 0); 1199 1200 if (tmp_opt.cookie_plus > 0 && 1201 tmp_opt.saw_tstamp && 1202 !tp->rx_opt.cookie_out_never && 1203 (sysctl_tcp_cookie_size > 0 || 1204 (tp->cookie_values != NULL && 1205 tp->cookie_values->cookie_desired > 0))) { 1206 u8 *c; 1207 u32 *d; 1208 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS]; 1209 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE; 1210 1211 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0) 1212 goto drop_and_free; 1213 1214 /* Secret recipe starts with IP addresses */ 1215 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0]; 1216 *mess++ ^= *d++; 1217 *mess++ ^= *d++; 1218 *mess++ ^= *d++; 1219 *mess++ ^= *d++; 1220 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0]; 1221 *mess++ ^= *d++; 1222 *mess++ ^= *d++; 1223 *mess++ ^= *d++; 1224 *mess++ ^= *d++; 1225 1226 /* plus variable length Initiator Cookie */ 1227 c = (u8 *)mess; 1228 while (l-- > 0) 1229 *c++ ^= *hash_location++; 1230 1231 want_cookie = 0; /* not our kind of cookie */ 1232 tmp_ext.cookie_out_never = 0; /* false */ 1233 tmp_ext.cookie_plus = tmp_opt.cookie_plus; 1234 } else if (!tp->rx_opt.cookie_in_always) { 1235 /* redundant indications, but ensure initialization. */ 1236 tmp_ext.cookie_out_never = 1; /* true */ 1237 tmp_ext.cookie_plus = 0; 1238 } else { 1239 goto drop_and_free; 1240 } 1241 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always; 1242 1243 if (want_cookie && !tmp_opt.saw_tstamp) 1244 tcp_clear_options(&tmp_opt); 1245 1246 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 1247 tcp_openreq_init(req, &tmp_opt, skb); 1248 1249 treq = inet6_rsk(req); 1250 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); 1251 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); 1252 if (!want_cookie || tmp_opt.tstamp_ok) 1253 TCP_ECN_create_request(req, tcp_hdr(skb)); 1254 1255 if (!isn) { 1256 struct inet_peer *peer = NULL; 1257 1258 if (ipv6_opt_accepted(sk, skb) || 1259 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1260 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1261 atomic_inc(&skb->users); 1262 treq->pktopts = skb; 1263 } 1264 treq->iif = sk->sk_bound_dev_if; 1265 1266 /* So that link locals have meaning */ 1267 if (!sk->sk_bound_dev_if && 1268 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1269 treq->iif = inet6_iif(skb); 1270 1271 if (want_cookie) { 1272 isn = cookie_v6_init_sequence(sk, skb, &req->mss); 1273 req->cookie_ts = tmp_opt.tstamp_ok; 1274 goto have_isn; 1275 } 1276 1277 /* VJ's idea. We save last timestamp seen 1278 * from the destination in peer table, when entering 1279 * state TIME-WAIT, and check against it before 1280 * accepting new connection request. 1281 * 1282 * If "isn" is not zero, this request hit alive 1283 * timewait bucket, so that all the necessary checks 1284 * are made in the function processing timewait state. 1285 */ 1286 if (tmp_opt.saw_tstamp && 1287 tcp_death_row.sysctl_tw_recycle && 1288 (dst = inet6_csk_route_req(sk, req)) != NULL && 1289 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL && 1290 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6, 1291 &treq->rmt_addr)) { 1292 inet_peer_refcheck(peer); 1293 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1294 (s32)(peer->tcp_ts - req->ts_recent) > 1295 TCP_PAWS_WINDOW) { 1296 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 1297 goto drop_and_release; 1298 } 1299 } 1300 /* Kill the following clause, if you dislike this way. */ 1301 else if (!sysctl_tcp_syncookies && 1302 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1303 (sysctl_max_syn_backlog >> 2)) && 1304 (!peer || !peer->tcp_ts_stamp) && 1305 (!dst || !dst_metric(dst, RTAX_RTT))) { 1306 /* Without syncookies last quarter of 1307 * backlog is filled with destinations, 1308 * proven to be alive. 1309 * It means that we continue to communicate 1310 * to destinations, already remembered 1311 * to the moment of synflood. 1312 */ 1313 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n", 1314 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source)); 1315 goto drop_and_release; 1316 } 1317 1318 isn = tcp_v6_init_sequence(skb); 1319 } 1320 have_isn: 1321 tcp_rsk(req)->snt_isn = isn; 1322 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1323 1324 security_inet_conn_request(sk, skb, req); 1325 1326 if (tcp_v6_send_synack(sk, req, 1327 (struct request_values *)&tmp_ext) || 1328 want_cookie) 1329 goto drop_and_free; 1330 1331 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 1332 return 0; 1333 1334 drop_and_release: 1335 dst_release(dst); 1336 drop_and_free: 1337 reqsk_free(req); 1338 drop: 1339 return 0; /* don't send reset */ 1340 } 1341 1342 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, 1343 struct request_sock *req, 1344 struct dst_entry *dst) 1345 { 1346 struct inet6_request_sock *treq; 1347 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1348 struct tcp6_sock *newtcp6sk; 1349 struct inet_sock *newinet; 1350 struct tcp_sock *newtp; 1351 struct sock *newsk; 1352 struct ipv6_txoptions *opt; 1353 #ifdef CONFIG_TCP_MD5SIG 1354 struct tcp_md5sig_key *key; 1355 #endif 1356 1357 if (skb->protocol == htons(ETH_P_IP)) { 1358 /* 1359 * v6 mapped 1360 */ 1361 1362 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); 1363 1364 if (newsk == NULL) 1365 return NULL; 1366 1367 newtcp6sk = (struct tcp6_sock *)newsk; 1368 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1369 1370 newinet = inet_sk(newsk); 1371 newnp = inet6_sk(newsk); 1372 newtp = tcp_sk(newsk); 1373 1374 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1375 1376 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); 1377 1378 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); 1379 1380 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 1381 1382 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 1383 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 1384 #ifdef CONFIG_TCP_MD5SIG 1385 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1386 #endif 1387 1388 newnp->ipv6_ac_list = NULL; 1389 newnp->ipv6_fl_list = NULL; 1390 newnp->pktoptions = NULL; 1391 newnp->opt = NULL; 1392 newnp->mcast_oif = inet6_iif(skb); 1393 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1394 1395 /* 1396 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1397 * here, tcp_create_openreq_child now does this for us, see the comment in 1398 * that function for the gory details. -acme 1399 */ 1400 1401 /* It is tricky place. Until this moment IPv4 tcp 1402 worked with IPv6 icsk.icsk_af_ops. 1403 Sync it now. 1404 */ 1405 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 1406 1407 return newsk; 1408 } 1409 1410 treq = inet6_rsk(req); 1411 opt = np->opt; 1412 1413 if (sk_acceptq_is_full(sk)) 1414 goto out_overflow; 1415 1416 if (!dst) { 1417 dst = inet6_csk_route_req(sk, req); 1418 if (!dst) 1419 goto out; 1420 } 1421 1422 newsk = tcp_create_openreq_child(sk, req, skb); 1423 if (newsk == NULL) 1424 goto out_nonewsk; 1425 1426 /* 1427 * No need to charge this sock to the relevant IPv6 refcnt debug socks 1428 * count here, tcp_create_openreq_child now does this for us, see the 1429 * comment in that function for the gory details. -acme 1430 */ 1431 1432 newsk->sk_gso_type = SKB_GSO_TCPV6; 1433 __ip6_dst_store(newsk, dst, NULL, NULL); 1434 1435 newtcp6sk = (struct tcp6_sock *)newsk; 1436 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 1437 1438 newtp = tcp_sk(newsk); 1439 newinet = inet_sk(newsk); 1440 newnp = inet6_sk(newsk); 1441 1442 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1443 1444 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr); 1445 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr); 1446 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); 1447 newsk->sk_bound_dev_if = treq->iif; 1448 1449 /* Now IPv6 options... 1450 1451 First: no IPv4 options. 1452 */ 1453 newinet->inet_opt = NULL; 1454 newnp->ipv6_ac_list = NULL; 1455 newnp->ipv6_fl_list = NULL; 1456 1457 /* Clone RX bits */ 1458 newnp->rxopt.all = np->rxopt.all; 1459 1460 /* Clone pktoptions received with SYN */ 1461 newnp->pktoptions = NULL; 1462 if (treq->pktopts != NULL) { 1463 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); 1464 kfree_skb(treq->pktopts); 1465 treq->pktopts = NULL; 1466 if (newnp->pktoptions) 1467 skb_set_owner_r(newnp->pktoptions, newsk); 1468 } 1469 newnp->opt = NULL; 1470 newnp->mcast_oif = inet6_iif(skb); 1471 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1472 1473 /* Clone native IPv6 options from listening socket (if any) 1474 1475 Yes, keeping reference count would be much more clever, 1476 but we make one more one thing there: reattach optmem 1477 to newsk. 1478 */ 1479 if (opt) { 1480 newnp->opt = ipv6_dup_options(newsk, opt); 1481 if (opt != np->opt) 1482 sock_kfree_s(sk, opt, opt->tot_len); 1483 } 1484 1485 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1486 if (newnp->opt) 1487 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 1488 newnp->opt->opt_flen); 1489 1490 tcp_mtup_init(newsk); 1491 tcp_sync_mss(newsk, dst_mtu(dst)); 1492 newtp->advmss = dst_metric_advmss(dst); 1493 tcp_initialize_rcv_mss(newsk); 1494 if (tcp_rsk(req)->snt_synack) 1495 tcp_valid_rtt_meas(newsk, 1496 tcp_time_stamp - tcp_rsk(req)->snt_synack); 1497 newtp->total_retrans = req->retrans; 1498 1499 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1500 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 1501 1502 #ifdef CONFIG_TCP_MD5SIG 1503 /* Copy over the MD5 key from the original socket */ 1504 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { 1505 /* We're using one, so create a matching key 1506 * on the newsk structure. If we fail to get 1507 * memory, then we end up not copying the key 1508 * across. Shucks. 1509 */ 1510 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); 1511 if (newkey != NULL) 1512 tcp_v6_md5_do_add(newsk, &newnp->daddr, 1513 newkey, key->keylen); 1514 } 1515 #endif 1516 1517 if (__inet_inherit_port(sk, newsk) < 0) { 1518 sock_put(newsk); 1519 goto out; 1520 } 1521 __inet6_hash(newsk, NULL); 1522 1523 return newsk; 1524 1525 out_overflow: 1526 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1527 out_nonewsk: 1528 if (opt && opt != np->opt) 1529 sock_kfree_s(sk, opt, opt->tot_len); 1530 dst_release(dst); 1531 out: 1532 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1533 return NULL; 1534 } 1535 1536 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) 1537 { 1538 if (skb->ip_summed == CHECKSUM_COMPLETE) { 1539 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr, 1540 &ipv6_hdr(skb)->daddr, skb->csum)) { 1541 skb->ip_summed = CHECKSUM_UNNECESSARY; 1542 return 0; 1543 } 1544 } 1545 1546 skb->csum = ~csum_unfold(tcp_v6_check(skb->len, 1547 &ipv6_hdr(skb)->saddr, 1548 &ipv6_hdr(skb)->daddr, 0)); 1549 1550 if (skb->len <= 76) { 1551 return __skb_checksum_complete(skb); 1552 } 1553 return 0; 1554 } 1555 1556 /* The socket must have it's spinlock held when we get 1557 * here. 1558 * 1559 * We have a potential double-lock case here, so even when 1560 * doing backlog processing we use the BH locking scheme. 1561 * This is because we cannot sleep with the original spinlock 1562 * held. 1563 */ 1564 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 1565 { 1566 struct ipv6_pinfo *np = inet6_sk(sk); 1567 struct tcp_sock *tp; 1568 struct sk_buff *opt_skb = NULL; 1569 1570 /* Imagine: socket is IPv6. IPv4 packet arrives, 1571 goes to IPv4 receive handler and backlogged. 1572 From backlog it always goes here. Kerboom... 1573 Fortunately, tcp_rcv_established and rcv_established 1574 handle them correctly, but it is not case with 1575 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK 1576 */ 1577 1578 if (skb->protocol == htons(ETH_P_IP)) 1579 return tcp_v4_do_rcv(sk, skb); 1580 1581 #ifdef CONFIG_TCP_MD5SIG 1582 if (tcp_v6_inbound_md5_hash (sk, skb)) 1583 goto discard; 1584 #endif 1585 1586 if (sk_filter(sk, skb)) 1587 goto discard; 1588 1589 /* 1590 * socket locking is here for SMP purposes as backlog rcv 1591 * is currently called with bh processing disabled. 1592 */ 1593 1594 /* Do Stevens' IPV6_PKTOPTIONS. 1595 1596 Yes, guys, it is the only place in our code, where we 1597 may make it not affecting IPv4. 1598 The rest of code is protocol independent, 1599 and I do not like idea to uglify IPv4. 1600 1601 Actually, all the idea behind IPV6_PKTOPTIONS 1602 looks not very well thought. For now we latch 1603 options, received in the last packet, enqueued 1604 by tcp. Feel free to propose better solution. 1605 --ANK (980728) 1606 */ 1607 if (np->rxopt.all) 1608 opt_skb = skb_clone(skb, GFP_ATOMIC); 1609 1610 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1611 sock_rps_save_rxhash(sk, skb->rxhash); 1612 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1613 goto reset; 1614 if (opt_skb) 1615 goto ipv6_pktoptions; 1616 return 0; 1617 } 1618 1619 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) 1620 goto csum_err; 1621 1622 if (sk->sk_state == TCP_LISTEN) { 1623 struct sock *nsk = tcp_v6_hnd_req(sk, skb); 1624 if (!nsk) 1625 goto discard; 1626 1627 /* 1628 * Queue it on the new socket if the new socket is active, 1629 * otherwise we just shortcircuit this and continue with 1630 * the new socket.. 1631 */ 1632 if(nsk != sk) { 1633 sock_rps_save_rxhash(nsk, skb->rxhash); 1634 if (tcp_child_process(sk, nsk, skb)) 1635 goto reset; 1636 if (opt_skb) 1637 __kfree_skb(opt_skb); 1638 return 0; 1639 } 1640 } else 1641 sock_rps_save_rxhash(sk, skb->rxhash); 1642 1643 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1644 goto reset; 1645 if (opt_skb) 1646 goto ipv6_pktoptions; 1647 return 0; 1648 1649 reset: 1650 tcp_v6_send_reset(sk, skb); 1651 discard: 1652 if (opt_skb) 1653 __kfree_skb(opt_skb); 1654 kfree_skb(skb); 1655 return 0; 1656 csum_err: 1657 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 1658 goto discard; 1659 1660 1661 ipv6_pktoptions: 1662 /* Do you ask, what is it? 1663 1664 1. skb was enqueued by tcp. 1665 2. skb is added to tail of read queue, rather than out of order. 1666 3. socket is not in passive state. 1667 4. Finally, it really contains options, which user wants to receive. 1668 */ 1669 tp = tcp_sk(sk); 1670 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1671 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1672 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) 1673 np->mcast_oif = inet6_iif(opt_skb); 1674 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1675 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; 1676 if (ipv6_opt_accepted(sk, opt_skb)) { 1677 skb_set_owner_r(opt_skb, sk); 1678 opt_skb = xchg(&np->pktoptions, opt_skb); 1679 } else { 1680 __kfree_skb(opt_skb); 1681 opt_skb = xchg(&np->pktoptions, NULL); 1682 } 1683 } 1684 1685 kfree_skb(opt_skb); 1686 return 0; 1687 } 1688 1689 static int tcp_v6_rcv(struct sk_buff *skb) 1690 { 1691 struct tcphdr *th; 1692 const struct ipv6hdr *hdr; 1693 struct sock *sk; 1694 int ret; 1695 struct net *net = dev_net(skb->dev); 1696 1697 if (skb->pkt_type != PACKET_HOST) 1698 goto discard_it; 1699 1700 /* 1701 * Count it even if it's bad. 1702 */ 1703 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); 1704 1705 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1706 goto discard_it; 1707 1708 th = tcp_hdr(skb); 1709 1710 if (th->doff < sizeof(struct tcphdr)/4) 1711 goto bad_packet; 1712 if (!pskb_may_pull(skb, th->doff*4)) 1713 goto discard_it; 1714 1715 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb)) 1716 goto bad_packet; 1717 1718 th = tcp_hdr(skb); 1719 hdr = ipv6_hdr(skb); 1720 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1721 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1722 skb->len - th->doff*4); 1723 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1724 TCP_SKB_CB(skb)->when = 0; 1725 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr); 1726 TCP_SKB_CB(skb)->sacked = 0; 1727 1728 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); 1729 if (!sk) 1730 goto no_tcp_socket; 1731 1732 process: 1733 if (sk->sk_state == TCP_TIME_WAIT) 1734 goto do_time_wait; 1735 1736 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { 1737 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 1738 goto discard_and_relse; 1739 } 1740 1741 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1742 goto discard_and_relse; 1743 1744 if (sk_filter(sk, skb)) 1745 goto discard_and_relse; 1746 1747 skb->dev = NULL; 1748 1749 bh_lock_sock_nested(sk); 1750 ret = 0; 1751 if (!sock_owned_by_user(sk)) { 1752 #ifdef CONFIG_NET_DMA 1753 struct tcp_sock *tp = tcp_sk(sk); 1754 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1755 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1756 if (tp->ucopy.dma_chan) 1757 ret = tcp_v6_do_rcv(sk, skb); 1758 else 1759 #endif 1760 { 1761 if (!tcp_prequeue(sk, skb)) 1762 ret = tcp_v6_do_rcv(sk, skb); 1763 } 1764 } else if (unlikely(sk_add_backlog(sk, skb))) { 1765 bh_unlock_sock(sk); 1766 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1767 goto discard_and_relse; 1768 } 1769 bh_unlock_sock(sk); 1770 1771 sock_put(sk); 1772 return ret ? -1 : 0; 1773 1774 no_tcp_socket: 1775 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1776 goto discard_it; 1777 1778 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1779 bad_packet: 1780 TCP_INC_STATS_BH(net, TCP_MIB_INERRS); 1781 } else { 1782 tcp_v6_send_reset(NULL, skb); 1783 } 1784 1785 discard_it: 1786 1787 /* 1788 * Discard frame 1789 */ 1790 1791 kfree_skb(skb); 1792 return 0; 1793 1794 discard_and_relse: 1795 sock_put(sk); 1796 goto discard_it; 1797 1798 do_time_wait: 1799 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1800 inet_twsk_put(inet_twsk(sk)); 1801 goto discard_it; 1802 } 1803 1804 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1805 TCP_INC_STATS_BH(net, TCP_MIB_INERRS); 1806 inet_twsk_put(inet_twsk(sk)); 1807 goto discard_it; 1808 } 1809 1810 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1811 case TCP_TW_SYN: 1812 { 1813 struct sock *sk2; 1814 1815 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, 1816 &ipv6_hdr(skb)->daddr, 1817 ntohs(th->dest), inet6_iif(skb)); 1818 if (sk2 != NULL) { 1819 struct inet_timewait_sock *tw = inet_twsk(sk); 1820 inet_twsk_deschedule(tw, &tcp_death_row); 1821 inet_twsk_put(tw); 1822 sk = sk2; 1823 goto process; 1824 } 1825 /* Fall through to ACK */ 1826 } 1827 case TCP_TW_ACK: 1828 tcp_v6_timewait_ack(sk, skb); 1829 break; 1830 case TCP_TW_RST: 1831 goto no_tcp_socket; 1832 case TCP_TW_SUCCESS:; 1833 } 1834 goto discard_it; 1835 } 1836 1837 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it) 1838 { 1839 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk); 1840 struct ipv6_pinfo *np = inet6_sk(sk); 1841 struct inet_peer *peer; 1842 1843 if (!rt || 1844 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) { 1845 peer = inet_getpeer_v6(&np->daddr, 1); 1846 *release_it = true; 1847 } else { 1848 if (!rt->rt6i_peer) 1849 rt6_bind_peer(rt, 1); 1850 peer = rt->rt6i_peer; 1851 *release_it = false; 1852 } 1853 1854 return peer; 1855 } 1856 1857 static void *tcp_v6_tw_get_peer(struct sock *sk) 1858 { 1859 struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 1860 struct inet_timewait_sock *tw = inet_twsk(sk); 1861 1862 if (tw->tw_family == AF_INET) 1863 return tcp_v4_tw_get_peer(sk); 1864 1865 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1); 1866 } 1867 1868 static struct timewait_sock_ops tcp6_timewait_sock_ops = { 1869 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 1870 .twsk_unique = tcp_twsk_unique, 1871 .twsk_destructor= tcp_twsk_destructor, 1872 .twsk_getpeer = tcp_v6_tw_get_peer, 1873 }; 1874 1875 static const struct inet_connection_sock_af_ops ipv6_specific = { 1876 .queue_xmit = inet6_csk_xmit, 1877 .send_check = tcp_v6_send_check, 1878 .rebuild_header = inet6_sk_rebuild_header, 1879 .conn_request = tcp_v6_conn_request, 1880 .syn_recv_sock = tcp_v6_syn_recv_sock, 1881 .get_peer = tcp_v6_get_peer, 1882 .net_header_len = sizeof(struct ipv6hdr), 1883 .setsockopt = ipv6_setsockopt, 1884 .getsockopt = ipv6_getsockopt, 1885 .addr2sockaddr = inet6_csk_addr2sockaddr, 1886 .sockaddr_len = sizeof(struct sockaddr_in6), 1887 .bind_conflict = inet6_csk_bind_conflict, 1888 #ifdef CONFIG_COMPAT 1889 .compat_setsockopt = compat_ipv6_setsockopt, 1890 .compat_getsockopt = compat_ipv6_getsockopt, 1891 #endif 1892 }; 1893 1894 #ifdef CONFIG_TCP_MD5SIG 1895 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { 1896 .md5_lookup = tcp_v6_md5_lookup, 1897 .calc_md5_hash = tcp_v6_md5_hash_skb, 1898 .md5_add = tcp_v6_md5_add_func, 1899 .md5_parse = tcp_v6_parse_md5_keys, 1900 }; 1901 #endif 1902 1903 /* 1904 * TCP over IPv4 via INET6 API 1905 */ 1906 1907 static const struct inet_connection_sock_af_ops ipv6_mapped = { 1908 .queue_xmit = ip_queue_xmit, 1909 .send_check = tcp_v4_send_check, 1910 .rebuild_header = inet_sk_rebuild_header, 1911 .conn_request = tcp_v6_conn_request, 1912 .syn_recv_sock = tcp_v6_syn_recv_sock, 1913 .get_peer = tcp_v4_get_peer, 1914 .net_header_len = sizeof(struct iphdr), 1915 .setsockopt = ipv6_setsockopt, 1916 .getsockopt = ipv6_getsockopt, 1917 .addr2sockaddr = inet6_csk_addr2sockaddr, 1918 .sockaddr_len = sizeof(struct sockaddr_in6), 1919 .bind_conflict = inet6_csk_bind_conflict, 1920 #ifdef CONFIG_COMPAT 1921 .compat_setsockopt = compat_ipv6_setsockopt, 1922 .compat_getsockopt = compat_ipv6_getsockopt, 1923 #endif 1924 }; 1925 1926 #ifdef CONFIG_TCP_MD5SIG 1927 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { 1928 .md5_lookup = tcp_v4_md5_lookup, 1929 .calc_md5_hash = tcp_v4_md5_hash_skb, 1930 .md5_add = tcp_v6_md5_add_func, 1931 .md5_parse = tcp_v6_parse_md5_keys, 1932 }; 1933 #endif 1934 1935 /* NOTE: A lot of things set to zero explicitly by call to 1936 * sk_alloc() so need not be done here. 1937 */ 1938 static int tcp_v6_init_sock(struct sock *sk) 1939 { 1940 struct inet_connection_sock *icsk = inet_csk(sk); 1941 struct tcp_sock *tp = tcp_sk(sk); 1942 1943 skb_queue_head_init(&tp->out_of_order_queue); 1944 tcp_init_xmit_timers(sk); 1945 tcp_prequeue_init(tp); 1946 1947 icsk->icsk_rto = TCP_TIMEOUT_INIT; 1948 tp->mdev = TCP_TIMEOUT_INIT; 1949 1950 /* So many TCP implementations out there (incorrectly) count the 1951 * initial SYN frame in their delayed-ACK and congestion control 1952 * algorithms that we must have the following bandaid to talk 1953 * efficiently to them. -DaveM 1954 */ 1955 tp->snd_cwnd = 2; 1956 1957 /* See draft-stevens-tcpca-spec-01 for discussion of the 1958 * initialization of these values. 1959 */ 1960 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 1961 tp->snd_cwnd_clamp = ~0; 1962 tp->mss_cache = TCP_MSS_DEFAULT; 1963 1964 tp->reordering = sysctl_tcp_reordering; 1965 1966 sk->sk_state = TCP_CLOSE; 1967 1968 icsk->icsk_af_ops = &ipv6_specific; 1969 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 1970 icsk->icsk_sync_mss = tcp_sync_mss; 1971 sk->sk_write_space = sk_stream_write_space; 1972 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1973 1974 #ifdef CONFIG_TCP_MD5SIG 1975 tp->af_specific = &tcp_sock_ipv6_specific; 1976 #endif 1977 1978 /* TCP Cookie Transactions */ 1979 if (sysctl_tcp_cookie_size > 0) { 1980 /* Default, cookies without s_data_payload. */ 1981 tp->cookie_values = 1982 kzalloc(sizeof(*tp->cookie_values), 1983 sk->sk_allocation); 1984 if (tp->cookie_values != NULL) 1985 kref_init(&tp->cookie_values->kref); 1986 } 1987 /* Presumed zeroed, in order of appearance: 1988 * cookie_in_always, cookie_out_never, 1989 * s_data_constant, s_data_in, s_data_out 1990 */ 1991 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1992 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1993 1994 local_bh_disable(); 1995 percpu_counter_inc(&tcp_sockets_allocated); 1996 local_bh_enable(); 1997 1998 return 0; 1999 } 2000 2001 static void tcp_v6_destroy_sock(struct sock *sk) 2002 { 2003 #ifdef CONFIG_TCP_MD5SIG 2004 /* Clean up the MD5 key list */ 2005 if (tcp_sk(sk)->md5sig_info) 2006 tcp_v6_clear_md5_list(sk); 2007 #endif 2008 tcp_v4_destroy_sock(sk); 2009 inet6_destroy_sock(sk); 2010 } 2011 2012 #ifdef CONFIG_PROC_FS 2013 /* Proc filesystem TCPv6 sock list dumping. */ 2014 static void get_openreq6(struct seq_file *seq, 2015 struct sock *sk, struct request_sock *req, int i, int uid) 2016 { 2017 int ttd = req->expires - jiffies; 2018 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2019 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 2020 2021 if (ttd < 0) 2022 ttd = 0; 2023 2024 seq_printf(seq, 2025 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2026 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", 2027 i, 2028 src->s6_addr32[0], src->s6_addr32[1], 2029 src->s6_addr32[2], src->s6_addr32[3], 2030 ntohs(inet_rsk(req)->loc_port), 2031 dest->s6_addr32[0], dest->s6_addr32[1], 2032 dest->s6_addr32[2], dest->s6_addr32[3], 2033 ntohs(inet_rsk(req)->rmt_port), 2034 TCP_SYN_RECV, 2035 0,0, /* could print option size, but that is af dependent. */ 2036 1, /* timers active (only the expire timer) */ 2037 jiffies_to_clock_t(ttd), 2038 req->retrans, 2039 uid, 2040 0, /* non standard timer */ 2041 0, /* open_requests have no inode */ 2042 0, req); 2043 } 2044 2045 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 2046 { 2047 const struct in6_addr *dest, *src; 2048 __u16 destp, srcp; 2049 int timer_active; 2050 unsigned long timer_expires; 2051 struct inet_sock *inet = inet_sk(sp); 2052 struct tcp_sock *tp = tcp_sk(sp); 2053 const struct inet_connection_sock *icsk = inet_csk(sp); 2054 struct ipv6_pinfo *np = inet6_sk(sp); 2055 2056 dest = &np->daddr; 2057 src = &np->rcv_saddr; 2058 destp = ntohs(inet->inet_dport); 2059 srcp = ntohs(inet->inet_sport); 2060 2061 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 2062 timer_active = 1; 2063 timer_expires = icsk->icsk_timeout; 2064 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 2065 timer_active = 4; 2066 timer_expires = icsk->icsk_timeout; 2067 } else if (timer_pending(&sp->sk_timer)) { 2068 timer_active = 2; 2069 timer_expires = sp->sk_timer.expires; 2070 } else { 2071 timer_active = 0; 2072 timer_expires = jiffies; 2073 } 2074 2075 seq_printf(seq, 2076 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2077 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", 2078 i, 2079 src->s6_addr32[0], src->s6_addr32[1], 2080 src->s6_addr32[2], src->s6_addr32[3], srcp, 2081 dest->s6_addr32[0], dest->s6_addr32[1], 2082 dest->s6_addr32[2], dest->s6_addr32[3], destp, 2083 sp->sk_state, 2084 tp->write_seq-tp->snd_una, 2085 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 2086 timer_active, 2087 jiffies_to_clock_t(timer_expires - jiffies), 2088 icsk->icsk_retransmits, 2089 sock_i_uid(sp), 2090 icsk->icsk_probes_out, 2091 sock_i_ino(sp), 2092 atomic_read(&sp->sk_refcnt), sp, 2093 jiffies_to_clock_t(icsk->icsk_rto), 2094 jiffies_to_clock_t(icsk->icsk_ack.ato), 2095 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, 2096 tp->snd_cwnd, 2097 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh 2098 ); 2099 } 2100 2101 static void get_timewait6_sock(struct seq_file *seq, 2102 struct inet_timewait_sock *tw, int i) 2103 { 2104 const struct in6_addr *dest, *src; 2105 __u16 destp, srcp; 2106 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2107 int ttd = tw->tw_ttd - jiffies; 2108 2109 if (ttd < 0) 2110 ttd = 0; 2111 2112 dest = &tw6->tw_v6_daddr; 2113 src = &tw6->tw_v6_rcv_saddr; 2114 destp = ntohs(tw->tw_dport); 2115 srcp = ntohs(tw->tw_sport); 2116 2117 seq_printf(seq, 2118 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2119 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", 2120 i, 2121 src->s6_addr32[0], src->s6_addr32[1], 2122 src->s6_addr32[2], src->s6_addr32[3], srcp, 2123 dest->s6_addr32[0], dest->s6_addr32[1], 2124 dest->s6_addr32[2], dest->s6_addr32[3], destp, 2125 tw->tw_substate, 0, 0, 2126 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2127 atomic_read(&tw->tw_refcnt), tw); 2128 } 2129 2130 static int tcp6_seq_show(struct seq_file *seq, void *v) 2131 { 2132 struct tcp_iter_state *st; 2133 2134 if (v == SEQ_START_TOKEN) { 2135 seq_puts(seq, 2136 " sl " 2137 "local_address " 2138 "remote_address " 2139 "st tx_queue rx_queue tr tm->when retrnsmt" 2140 " uid timeout inode\n"); 2141 goto out; 2142 } 2143 st = seq->private; 2144 2145 switch (st->state) { 2146 case TCP_SEQ_STATE_LISTENING: 2147 case TCP_SEQ_STATE_ESTABLISHED: 2148 get_tcp6_sock(seq, v, st->num); 2149 break; 2150 case TCP_SEQ_STATE_OPENREQ: 2151 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); 2152 break; 2153 case TCP_SEQ_STATE_TIME_WAIT: 2154 get_timewait6_sock(seq, v, st->num); 2155 break; 2156 } 2157 out: 2158 return 0; 2159 } 2160 2161 static struct tcp_seq_afinfo tcp6_seq_afinfo = { 2162 .name = "tcp6", 2163 .family = AF_INET6, 2164 .seq_fops = { 2165 .owner = THIS_MODULE, 2166 }, 2167 .seq_ops = { 2168 .show = tcp6_seq_show, 2169 }, 2170 }; 2171 2172 int __net_init tcp6_proc_init(struct net *net) 2173 { 2174 return tcp_proc_register(net, &tcp6_seq_afinfo); 2175 } 2176 2177 void tcp6_proc_exit(struct net *net) 2178 { 2179 tcp_proc_unregister(net, &tcp6_seq_afinfo); 2180 } 2181 #endif 2182 2183 struct proto tcpv6_prot = { 2184 .name = "TCPv6", 2185 .owner = THIS_MODULE, 2186 .close = tcp_close, 2187 .connect = tcp_v6_connect, 2188 .disconnect = tcp_disconnect, 2189 .accept = inet_csk_accept, 2190 .ioctl = tcp_ioctl, 2191 .init = tcp_v6_init_sock, 2192 .destroy = tcp_v6_destroy_sock, 2193 .shutdown = tcp_shutdown, 2194 .setsockopt = tcp_setsockopt, 2195 .getsockopt = tcp_getsockopt, 2196 .recvmsg = tcp_recvmsg, 2197 .sendmsg = tcp_sendmsg, 2198 .sendpage = tcp_sendpage, 2199 .backlog_rcv = tcp_v6_do_rcv, 2200 .hash = tcp_v6_hash, 2201 .unhash = inet_unhash, 2202 .get_port = inet_csk_get_port, 2203 .enter_memory_pressure = tcp_enter_memory_pressure, 2204 .sockets_allocated = &tcp_sockets_allocated, 2205 .memory_allocated = &tcp_memory_allocated, 2206 .memory_pressure = &tcp_memory_pressure, 2207 .orphan_count = &tcp_orphan_count, 2208 .sysctl_mem = sysctl_tcp_mem, 2209 .sysctl_wmem = sysctl_tcp_wmem, 2210 .sysctl_rmem = sysctl_tcp_rmem, 2211 .max_header = MAX_TCP_HEADER, 2212 .obj_size = sizeof(struct tcp6_sock), 2213 .slab_flags = SLAB_DESTROY_BY_RCU, 2214 .twsk_prot = &tcp6_timewait_sock_ops, 2215 .rsk_prot = &tcp6_request_sock_ops, 2216 .h.hashinfo = &tcp_hashinfo, 2217 .no_autobind = true, 2218 #ifdef CONFIG_COMPAT 2219 .compat_setsockopt = compat_tcp_setsockopt, 2220 .compat_getsockopt = compat_tcp_getsockopt, 2221 #endif 2222 }; 2223 2224 static const struct inet6_protocol tcpv6_protocol = { 2225 .handler = tcp_v6_rcv, 2226 .err_handler = tcp_v6_err, 2227 .gso_send_check = tcp_v6_gso_send_check, 2228 .gso_segment = tcp_tso_segment, 2229 .gro_receive = tcp6_gro_receive, 2230 .gro_complete = tcp6_gro_complete, 2231 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 2232 }; 2233 2234 static struct inet_protosw tcpv6_protosw = { 2235 .type = SOCK_STREAM, 2236 .protocol = IPPROTO_TCP, 2237 .prot = &tcpv6_prot, 2238 .ops = &inet6_stream_ops, 2239 .no_check = 0, 2240 .flags = INET_PROTOSW_PERMANENT | 2241 INET_PROTOSW_ICSK, 2242 }; 2243 2244 static int __net_init tcpv6_net_init(struct net *net) 2245 { 2246 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, 2247 SOCK_RAW, IPPROTO_TCP, net); 2248 } 2249 2250 static void __net_exit tcpv6_net_exit(struct net *net) 2251 { 2252 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2253 } 2254 2255 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) 2256 { 2257 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); 2258 } 2259 2260 static struct pernet_operations tcpv6_net_ops = { 2261 .init = tcpv6_net_init, 2262 .exit = tcpv6_net_exit, 2263 .exit_batch = tcpv6_net_exit_batch, 2264 }; 2265 2266 int __init tcpv6_init(void) 2267 { 2268 int ret; 2269 2270 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); 2271 if (ret) 2272 goto out; 2273 2274 /* register inet6 protocol */ 2275 ret = inet6_register_protosw(&tcpv6_protosw); 2276 if (ret) 2277 goto out_tcpv6_protocol; 2278 2279 ret = register_pernet_subsys(&tcpv6_net_ops); 2280 if (ret) 2281 goto out_tcpv6_protosw; 2282 out: 2283 return ret; 2284 2285 out_tcpv6_protocol: 2286 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2287 out_tcpv6_protosw: 2288 inet6_unregister_protosw(&tcpv6_protosw); 2289 goto out; 2290 } 2291 2292 void tcpv6_exit(void) 2293 { 2294 unregister_pernet_subsys(&tcpv6_net_ops); 2295 inet6_unregister_protosw(&tcpv6_protosw); 2296 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); 2297 } 2298