1 /* 2 * TCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Based on: 11 * linux/net/ipv4/tcp.c 12 * linux/net/ipv4/tcp_input.c 13 * linux/net/ipv4/tcp_output.c 14 * 15 * Fixes: 16 * Hideaki YOSHIFUJI : sin6_scope_id support 17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 19 * a single port at the same time. 20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. 21 * 22 * This program is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU General Public License 24 * as published by the Free Software Foundation; either version 25 * 2 of the License, or (at your option) any later version. 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/types.h> 31 #include <linux/socket.h> 32 #include <linux/sockios.h> 33 #include <linux/net.h> 34 #include <linux/jiffies.h> 35 #include <linux/in.h> 36 #include <linux/in6.h> 37 #include <linux/netdevice.h> 38 #include <linux/init.h> 39 #include <linux/jhash.h> 40 #include <linux/ipsec.h> 41 #include <linux/times.h> 42 43 #include <linux/ipv6.h> 44 #include <linux/icmpv6.h> 45 #include <linux/random.h> 46 47 #include <net/tcp.h> 48 #include <net/ndisc.h> 49 #include <net/inet6_hashtables.h> 50 #include <net/inet6_connection_sock.h> 51 #include <net/ipv6.h> 52 #include <net/transp_v6.h> 53 #include <net/addrconf.h> 54 #include <net/ip6_route.h> 55 #include <net/ip6_checksum.h> 56 #include <net/inet_ecn.h> 57 #include <net/protocol.h> 58 #include <net/xfrm.h> 59 #include <net/addrconf.h> 60 #include <net/snmp.h> 61 #include <net/dsfield.h> 62 #include <net/timewait_sock.h> 63 64 #include <asm/uaccess.h> 65 66 #include <linux/proc_fs.h> 67 #include <linux/seq_file.h> 68 69 /* Socket used for sending RSTs and ACKs */ 70 static struct socket *tcp6_socket; 71 72 static void tcp_v6_send_reset(struct sk_buff *skb); 73 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); 74 static void tcp_v6_send_check(struct sock *sk, int len, 75 struct sk_buff *skb); 76 77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 78 79 static struct inet_connection_sock_af_ops ipv6_mapped; 80 static struct inet_connection_sock_af_ops ipv6_specific; 81 82 static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 83 { 84 return inet_csk_get_port(&tcp_hashinfo, sk, snum, 85 inet6_csk_bind_conflict); 86 } 87 88 static void tcp_v6_hash(struct sock *sk) 89 { 90 if (sk->sk_state != TCP_CLOSE) { 91 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) { 92 tcp_prot.hash(sk); 93 return; 94 } 95 local_bh_disable(); 96 __inet6_hash(&tcp_hashinfo, sk); 97 local_bh_enable(); 98 } 99 } 100 101 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len, 102 struct in6_addr *saddr, 103 struct in6_addr *daddr, 104 unsigned long base) 105 { 106 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 107 } 108 109 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) 110 { 111 if (skb->protocol == htons(ETH_P_IPV6)) { 112 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, 113 skb->nh.ipv6h->saddr.s6_addr32, 114 skb->h.th->dest, 115 skb->h.th->source); 116 } else { 117 return secure_tcp_sequence_number(skb->nh.iph->daddr, 118 skb->nh.iph->saddr, 119 skb->h.th->dest, 120 skb->h.th->source); 121 } 122 } 123 124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 125 int addr_len) 126 { 127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 128 struct inet_sock *inet = inet_sk(sk); 129 struct inet_connection_sock *icsk = inet_csk(sk); 130 struct ipv6_pinfo *np = inet6_sk(sk); 131 struct tcp_sock *tp = tcp_sk(sk); 132 struct in6_addr *saddr = NULL, *final_p = NULL, final; 133 struct flowi fl; 134 struct dst_entry *dst; 135 int addr_type; 136 int err; 137 138 if (addr_len < SIN6_LEN_RFC2133) 139 return -EINVAL; 140 141 if (usin->sin6_family != AF_INET6) 142 return(-EAFNOSUPPORT); 143 144 memset(&fl, 0, sizeof(fl)); 145 146 if (np->sndflow) { 147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; 148 IP6_ECN_flow_init(fl.fl6_flowlabel); 149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { 150 struct ip6_flowlabel *flowlabel; 151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 152 if (flowlabel == NULL) 153 return -EINVAL; 154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); 155 fl6_sock_release(flowlabel); 156 } 157 } 158 159 /* 160 * connect() to INADDR_ANY means loopback (BSD'ism). 161 */ 162 163 if(ipv6_addr_any(&usin->sin6_addr)) 164 usin->sin6_addr.s6_addr[15] = 0x1; 165 166 addr_type = ipv6_addr_type(&usin->sin6_addr); 167 168 if(addr_type & IPV6_ADDR_MULTICAST) 169 return -ENETUNREACH; 170 171 if (addr_type&IPV6_ADDR_LINKLOCAL) { 172 if (addr_len >= sizeof(struct sockaddr_in6) && 173 usin->sin6_scope_id) { 174 /* If interface is set while binding, indices 175 * must coincide. 176 */ 177 if (sk->sk_bound_dev_if && 178 sk->sk_bound_dev_if != usin->sin6_scope_id) 179 return -EINVAL; 180 181 sk->sk_bound_dev_if = usin->sin6_scope_id; 182 } 183 184 /* Connect to link-local address requires an interface */ 185 if (!sk->sk_bound_dev_if) 186 return -EINVAL; 187 } 188 189 if (tp->rx_opt.ts_recent_stamp && 190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) { 191 tp->rx_opt.ts_recent = 0; 192 tp->rx_opt.ts_recent_stamp = 0; 193 tp->write_seq = 0; 194 } 195 196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr); 197 np->flow_label = fl.fl6_flowlabel; 198 199 /* 200 * TCP over IPv4 201 */ 202 203 if (addr_type == IPV6_ADDR_MAPPED) { 204 u32 exthdrlen = icsk->icsk_ext_hdr_len; 205 struct sockaddr_in sin; 206 207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 208 209 if (__ipv6_only_sock(sk)) 210 return -ENETUNREACH; 211 212 sin.sin_family = AF_INET; 213 sin.sin_port = usin->sin6_port; 214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 215 216 icsk->icsk_af_ops = &ipv6_mapped; 217 sk->sk_backlog_rcv = tcp_v4_do_rcv; 218 219 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 220 221 if (err) { 222 icsk->icsk_ext_hdr_len = exthdrlen; 223 icsk->icsk_af_ops = &ipv6_specific; 224 sk->sk_backlog_rcv = tcp_v6_do_rcv; 225 goto failure; 226 } else { 227 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 228 inet->saddr); 229 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), 230 inet->rcv_saddr); 231 } 232 233 return err; 234 } 235 236 if (!ipv6_addr_any(&np->rcv_saddr)) 237 saddr = &np->rcv_saddr; 238 239 fl.proto = IPPROTO_TCP; 240 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 241 ipv6_addr_copy(&fl.fl6_src, 242 (saddr ? saddr : &np->saddr)); 243 fl.oif = sk->sk_bound_dev_if; 244 fl.fl_ip_dport = usin->sin6_port; 245 fl.fl_ip_sport = inet->sport; 246 247 if (np->opt && np->opt->srcrt) { 248 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; 249 ipv6_addr_copy(&final, &fl.fl6_dst); 250 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 251 final_p = &final; 252 } 253 254 err = ip6_dst_lookup(sk, &dst, &fl); 255 if (err) 256 goto failure; 257 if (final_p) 258 ipv6_addr_copy(&fl.fl6_dst, final_p); 259 260 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 261 goto failure; 262 263 if (saddr == NULL) { 264 saddr = &fl.fl6_src; 265 ipv6_addr_copy(&np->rcv_saddr, saddr); 266 } 267 268 /* set the source address */ 269 ipv6_addr_copy(&np->saddr, saddr); 270 inet->rcv_saddr = LOOPBACK4_IPV6; 271 272 sk->sk_gso_type = SKB_GSO_TCPV6; 273 __ip6_dst_store(sk, dst, NULL); 274 275 icsk->icsk_ext_hdr_len = 0; 276 if (np->opt) 277 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 278 np->opt->opt_nflen); 279 280 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 281 282 inet->dport = usin->sin6_port; 283 284 tcp_set_state(sk, TCP_SYN_SENT); 285 err = inet6_hash_connect(&tcp_death_row, sk); 286 if (err) 287 goto late_failure; 288 289 if (!tp->write_seq) 290 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 291 np->daddr.s6_addr32, 292 inet->sport, 293 inet->dport); 294 295 err = tcp_connect(sk); 296 if (err) 297 goto late_failure; 298 299 return 0; 300 301 late_failure: 302 tcp_set_state(sk, TCP_CLOSE); 303 __sk_dst_reset(sk); 304 failure: 305 inet->dport = 0; 306 sk->sk_route_caps = 0; 307 return err; 308 } 309 310 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 311 int type, int code, int offset, __u32 info) 312 { 313 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 314 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 315 struct ipv6_pinfo *np; 316 struct sock *sk; 317 int err; 318 struct tcp_sock *tp; 319 __u32 seq; 320 321 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, 322 th->source, skb->dev->ifindex); 323 324 if (sk == NULL) { 325 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 326 return; 327 } 328 329 if (sk->sk_state == TCP_TIME_WAIT) { 330 inet_twsk_put((struct inet_timewait_sock *)sk); 331 return; 332 } 333 334 bh_lock_sock(sk); 335 if (sock_owned_by_user(sk)) 336 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 337 338 if (sk->sk_state == TCP_CLOSE) 339 goto out; 340 341 tp = tcp_sk(sk); 342 seq = ntohl(th->seq); 343 if (sk->sk_state != TCP_LISTEN && 344 !between(seq, tp->snd_una, tp->snd_nxt)) { 345 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 346 goto out; 347 } 348 349 np = inet6_sk(sk); 350 351 if (type == ICMPV6_PKT_TOOBIG) { 352 struct dst_entry *dst = NULL; 353 354 if (sock_owned_by_user(sk)) 355 goto out; 356 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 357 goto out; 358 359 /* icmp should have updated the destination cache entry */ 360 dst = __sk_dst_check(sk, np->dst_cookie); 361 362 if (dst == NULL) { 363 struct inet_sock *inet = inet_sk(sk); 364 struct flowi fl; 365 366 /* BUGGG_FUTURE: Again, it is not clear how 367 to handle rthdr case. Ignore this complexity 368 for now. 369 */ 370 memset(&fl, 0, sizeof(fl)); 371 fl.proto = IPPROTO_TCP; 372 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 373 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 374 fl.oif = sk->sk_bound_dev_if; 375 fl.fl_ip_dport = inet->dport; 376 fl.fl_ip_sport = inet->sport; 377 378 if ((err = ip6_dst_lookup(sk, &dst, &fl))) { 379 sk->sk_err_soft = -err; 380 goto out; 381 } 382 383 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { 384 sk->sk_err_soft = -err; 385 goto out; 386 } 387 388 } else 389 dst_hold(dst); 390 391 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 392 tcp_sync_mss(sk, dst_mtu(dst)); 393 tcp_simple_retransmit(sk); 394 } /* else let the usual retransmit timer handle it */ 395 dst_release(dst); 396 goto out; 397 } 398 399 icmpv6_err_convert(type, code, &err); 400 401 /* Might be for an request_sock */ 402 switch (sk->sk_state) { 403 struct request_sock *req, **prev; 404 case TCP_LISTEN: 405 if (sock_owned_by_user(sk)) 406 goto out; 407 408 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr, 409 &hdr->saddr, inet6_iif(skb)); 410 if (!req) 411 goto out; 412 413 /* ICMPs are not backlogged, hence we cannot get 414 * an established socket here. 415 */ 416 BUG_TRAP(req->sk == NULL); 417 418 if (seq != tcp_rsk(req)->snt_isn) { 419 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 420 goto out; 421 } 422 423 inet_csk_reqsk_queue_drop(sk, req, prev); 424 goto out; 425 426 case TCP_SYN_SENT: 427 case TCP_SYN_RECV: /* Cannot happen. 428 It can, it SYNs are crossed. --ANK */ 429 if (!sock_owned_by_user(sk)) { 430 sk->sk_err = err; 431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 432 433 tcp_done(sk); 434 } else 435 sk->sk_err_soft = err; 436 goto out; 437 } 438 439 if (!sock_owned_by_user(sk) && np->recverr) { 440 sk->sk_err = err; 441 sk->sk_error_report(sk); 442 } else 443 sk->sk_err_soft = err; 444 445 out: 446 bh_unlock_sock(sk); 447 sock_put(sk); 448 } 449 450 451 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 452 struct dst_entry *dst) 453 { 454 struct inet6_request_sock *treq = inet6_rsk(req); 455 struct ipv6_pinfo *np = inet6_sk(sk); 456 struct sk_buff * skb; 457 struct ipv6_txoptions *opt = NULL; 458 struct in6_addr * final_p = NULL, final; 459 struct flowi fl; 460 int err = -1; 461 462 memset(&fl, 0, sizeof(fl)); 463 fl.proto = IPPROTO_TCP; 464 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 465 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 466 fl.fl6_flowlabel = 0; 467 fl.oif = treq->iif; 468 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 469 fl.fl_ip_sport = inet_sk(sk)->sport; 470 471 if (dst == NULL) { 472 opt = np->opt; 473 if (opt == NULL && 474 np->rxopt.bits.osrcrt == 2 && 475 treq->pktopts) { 476 struct sk_buff *pktopts = treq->pktopts; 477 struct inet6_skb_parm *rxopt = IP6CB(pktopts); 478 if (rxopt->srcrt) 479 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt)); 480 } 481 482 if (opt && opt->srcrt) { 483 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 484 ipv6_addr_copy(&final, &fl.fl6_dst); 485 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 486 final_p = &final; 487 } 488 489 err = ip6_dst_lookup(sk, &dst, &fl); 490 if (err) 491 goto done; 492 if (final_p) 493 ipv6_addr_copy(&fl.fl6_dst, final_p); 494 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 495 goto done; 496 } 497 498 skb = tcp_make_synack(sk, dst, req); 499 if (skb) { 500 struct tcphdr *th = skb->h.th; 501 502 th->check = tcp_v6_check(th, skb->len, 503 &treq->loc_addr, &treq->rmt_addr, 504 csum_partial((char *)th, skb->len, skb->csum)); 505 506 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 507 err = ip6_xmit(sk, skb, &fl, opt, 0); 508 if (err == NET_XMIT_CN) 509 err = 0; 510 } 511 512 done: 513 if (opt && opt != np->opt) 514 sock_kfree_s(sk, opt, opt->tot_len); 515 dst_release(dst); 516 return err; 517 } 518 519 static void tcp_v6_reqsk_destructor(struct request_sock *req) 520 { 521 if (inet6_rsk(req)->pktopts) 522 kfree_skb(inet6_rsk(req)->pktopts); 523 } 524 525 static struct request_sock_ops tcp6_request_sock_ops = { 526 .family = AF_INET6, 527 .obj_size = sizeof(struct tcp6_request_sock), 528 .rtx_syn_ack = tcp_v6_send_synack, 529 .send_ack = tcp_v6_reqsk_send_ack, 530 .destructor = tcp_v6_reqsk_destructor, 531 .send_reset = tcp_v6_send_reset 532 }; 533 534 static struct timewait_sock_ops tcp6_timewait_sock_ops = { 535 .twsk_obj_size = sizeof(struct tcp6_timewait_sock), 536 .twsk_unique = tcp_twsk_unique, 537 }; 538 539 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 540 { 541 struct ipv6_pinfo *np = inet6_sk(sk); 542 struct tcphdr *th = skb->h.th; 543 544 if (skb->ip_summed == CHECKSUM_HW) { 545 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); 546 skb->csum = offsetof(struct tcphdr, check); 547 } else { 548 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 549 csum_partial((char *)th, th->doff<<2, 550 skb->csum)); 551 } 552 } 553 554 static int tcp_v6_gso_send_check(struct sk_buff *skb) 555 { 556 struct ipv6hdr *ipv6h; 557 struct tcphdr *th; 558 559 if (!pskb_may_pull(skb, sizeof(*th))) 560 return -EINVAL; 561 562 ipv6h = skb->nh.ipv6h; 563 th = skb->h.th; 564 565 th->check = 0; 566 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, 567 IPPROTO_TCP, 0); 568 skb->csum = offsetof(struct tcphdr, check); 569 skb->ip_summed = CHECKSUM_HW; 570 return 0; 571 } 572 573 static void tcp_v6_send_reset(struct sk_buff *skb) 574 { 575 struct tcphdr *th = skb->h.th, *t1; 576 struct sk_buff *buff; 577 struct flowi fl; 578 579 if (th->rst) 580 return; 581 582 if (!ipv6_unicast_destination(skb)) 583 return; 584 585 /* 586 * We need to grab some memory, and put together an RST, 587 * and then put it into the queue to be sent. 588 */ 589 590 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr), 591 GFP_ATOMIC); 592 if (buff == NULL) 593 return; 594 595 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); 596 597 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr)); 598 599 /* Swap the send and the receive. */ 600 memset(t1, 0, sizeof(*t1)); 601 t1->dest = th->source; 602 t1->source = th->dest; 603 t1->doff = sizeof(*t1)/4; 604 t1->rst = 1; 605 606 if(th->ack) { 607 t1->seq = th->ack_seq; 608 } else { 609 t1->ack = 1; 610 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin 611 + skb->len - (th->doff<<2)); 612 } 613 614 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0); 615 616 memset(&fl, 0, sizeof(fl)); 617 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr); 618 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr); 619 620 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 621 sizeof(*t1), IPPROTO_TCP, 622 buff->csum); 623 624 fl.proto = IPPROTO_TCP; 625 fl.oif = inet6_iif(skb); 626 fl.fl_ip_dport = t1->dest; 627 fl.fl_ip_sport = t1->source; 628 629 /* sk = NULL, but it is safe for now. RST socket required. */ 630 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 631 632 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 633 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 634 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 635 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); 636 return; 637 } 638 } 639 640 kfree_skb(buff); 641 } 642 643 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) 644 { 645 struct tcphdr *th = skb->h.th, *t1; 646 struct sk_buff *buff; 647 struct flowi fl; 648 int tot_len = sizeof(struct tcphdr); 649 650 if (ts) 651 tot_len += 3*4; 652 653 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 654 GFP_ATOMIC); 655 if (buff == NULL) 656 return; 657 658 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 659 660 t1 = (struct tcphdr *) skb_push(buff,tot_len); 661 662 /* Swap the send and the receive. */ 663 memset(t1, 0, sizeof(*t1)); 664 t1->dest = th->source; 665 t1->source = th->dest; 666 t1->doff = tot_len/4; 667 t1->seq = htonl(seq); 668 t1->ack_seq = htonl(ack); 669 t1->ack = 1; 670 t1->window = htons(win); 671 672 if (ts) { 673 u32 *ptr = (u32*)(t1 + 1); 674 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 675 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 676 *ptr++ = htonl(tcp_time_stamp); 677 *ptr = htonl(ts); 678 } 679 680 buff->csum = csum_partial((char *)t1, tot_len, 0); 681 682 memset(&fl, 0, sizeof(fl)); 683 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr); 684 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr); 685 686 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 687 tot_len, IPPROTO_TCP, 688 buff->csum); 689 690 fl.proto = IPPROTO_TCP; 691 fl.oif = inet6_iif(skb); 692 fl.fl_ip_dport = t1->dest; 693 fl.fl_ip_sport = t1->source; 694 695 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) { 696 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) { 697 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0); 698 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); 699 return; 700 } 701 } 702 703 kfree_skb(buff); 704 } 705 706 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) 707 { 708 struct inet_timewait_sock *tw = inet_twsk(sk); 709 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); 710 711 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 712 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 713 tcptw->tw_ts_recent); 714 715 inet_twsk_put(tw); 716 } 717 718 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) 719 { 720 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent); 721 } 722 723 724 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 725 { 726 struct request_sock *req, **prev; 727 const struct tcphdr *th = skb->h.th; 728 struct sock *nsk; 729 730 /* Find possible connection requests. */ 731 req = inet6_csk_search_req(sk, &prev, th->source, 732 &skb->nh.ipv6h->saddr, 733 &skb->nh.ipv6h->daddr, inet6_iif(skb)); 734 if (req) 735 return tcp_check_req(sk, skb, req, prev); 736 737 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr, 738 th->source, &skb->nh.ipv6h->daddr, 739 ntohs(th->dest), inet6_iif(skb)); 740 741 if (nsk) { 742 if (nsk->sk_state != TCP_TIME_WAIT) { 743 bh_lock_sock(nsk); 744 return nsk; 745 } 746 inet_twsk_put((struct inet_timewait_sock *)nsk); 747 return NULL; 748 } 749 750 #if 0 /*def CONFIG_SYN_COOKIES*/ 751 if (!th->rst && !th->syn && th->ack) 752 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt)); 753 #endif 754 return sk; 755 } 756 757 /* FIXME: this is substantially similar to the ipv4 code. 758 * Can some kind of merge be done? -- erics 759 */ 760 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 761 { 762 struct inet6_request_sock *treq; 763 struct ipv6_pinfo *np = inet6_sk(sk); 764 struct tcp_options_received tmp_opt; 765 struct tcp_sock *tp = tcp_sk(sk); 766 struct request_sock *req = NULL; 767 __u32 isn = TCP_SKB_CB(skb)->when; 768 769 if (skb->protocol == htons(ETH_P_IP)) 770 return tcp_v4_conn_request(sk, skb); 771 772 if (!ipv6_unicast_destination(skb)) 773 goto drop; 774 775 /* 776 * There are no SYN attacks on IPv6, yet... 777 */ 778 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 779 if (net_ratelimit()) 780 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 781 goto drop; 782 } 783 784 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 785 goto drop; 786 787 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 788 if (req == NULL) 789 goto drop; 790 791 tcp_clear_options(&tmp_opt); 792 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 793 tmp_opt.user_mss = tp->rx_opt.user_mss; 794 795 tcp_parse_options(skb, &tmp_opt, 0); 796 797 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 798 tcp_openreq_init(req, &tmp_opt, skb); 799 800 treq = inet6_rsk(req); 801 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr); 802 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr); 803 TCP_ECN_create_request(req, skb->h.th); 804 treq->pktopts = NULL; 805 if (ipv6_opt_accepted(sk, skb) || 806 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 807 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 808 atomic_inc(&skb->users); 809 treq->pktopts = skb; 810 } 811 treq->iif = sk->sk_bound_dev_if; 812 813 /* So that link locals have meaning */ 814 if (!sk->sk_bound_dev_if && 815 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 816 treq->iif = inet6_iif(skb); 817 818 if (isn == 0) 819 isn = tcp_v6_init_sequence(sk,skb); 820 821 tcp_rsk(req)->snt_isn = isn; 822 823 if (tcp_v6_send_synack(sk, req, NULL)) 824 goto drop; 825 826 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 827 return 0; 828 829 drop: 830 if (req) 831 reqsk_free(req); 832 833 return 0; /* don't send reset */ 834 } 835 836 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, 837 struct request_sock *req, 838 struct dst_entry *dst) 839 { 840 struct inet6_request_sock *treq = inet6_rsk(req); 841 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 842 struct tcp6_sock *newtcp6sk; 843 struct inet_sock *newinet; 844 struct tcp_sock *newtp; 845 struct sock *newsk; 846 struct ipv6_txoptions *opt; 847 848 if (skb->protocol == htons(ETH_P_IP)) { 849 /* 850 * v6 mapped 851 */ 852 853 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); 854 855 if (newsk == NULL) 856 return NULL; 857 858 newtcp6sk = (struct tcp6_sock *)newsk; 859 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 860 861 newinet = inet_sk(newsk); 862 newnp = inet6_sk(newsk); 863 newtp = tcp_sk(newsk); 864 865 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 866 867 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), 868 newinet->daddr); 869 870 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), 871 newinet->saddr); 872 873 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 874 875 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; 876 newsk->sk_backlog_rcv = tcp_v4_do_rcv; 877 newnp->pktoptions = NULL; 878 newnp->opt = NULL; 879 newnp->mcast_oif = inet6_iif(skb); 880 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 881 882 /* 883 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 884 * here, tcp_create_openreq_child now does this for us, see the comment in 885 * that function for the gory details. -acme 886 */ 887 888 /* It is tricky place. Until this moment IPv4 tcp 889 worked with IPv6 icsk.icsk_af_ops. 890 Sync it now. 891 */ 892 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 893 894 return newsk; 895 } 896 897 opt = np->opt; 898 899 if (sk_acceptq_is_full(sk)) 900 goto out_overflow; 901 902 if (np->rxopt.bits.osrcrt == 2 && 903 opt == NULL && treq->pktopts) { 904 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts); 905 if (rxopt->srcrt) 906 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt)); 907 } 908 909 if (dst == NULL) { 910 struct in6_addr *final_p = NULL, final; 911 struct flowi fl; 912 913 memset(&fl, 0, sizeof(fl)); 914 fl.proto = IPPROTO_TCP; 915 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 916 if (opt && opt->srcrt) { 917 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 918 ipv6_addr_copy(&final, &fl.fl6_dst); 919 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 920 final_p = &final; 921 } 922 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 923 fl.oif = sk->sk_bound_dev_if; 924 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 925 fl.fl_ip_sport = inet_sk(sk)->sport; 926 927 if (ip6_dst_lookup(sk, &dst, &fl)) 928 goto out; 929 930 if (final_p) 931 ipv6_addr_copy(&fl.fl6_dst, final_p); 932 933 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 934 goto out; 935 } 936 937 newsk = tcp_create_openreq_child(sk, req, skb); 938 if (newsk == NULL) 939 goto out; 940 941 /* 942 * No need to charge this sock to the relevant IPv6 refcnt debug socks 943 * count here, tcp_create_openreq_child now does this for us, see the 944 * comment in that function for the gory details. -acme 945 */ 946 947 newsk->sk_gso_type = SKB_GSO_TCPV6; 948 __ip6_dst_store(newsk, dst, NULL); 949 950 newtcp6sk = (struct tcp6_sock *)newsk; 951 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; 952 953 newtp = tcp_sk(newsk); 954 newinet = inet_sk(newsk); 955 newnp = inet6_sk(newsk); 956 957 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 958 959 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr); 960 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr); 961 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); 962 newsk->sk_bound_dev_if = treq->iif; 963 964 /* Now IPv6 options... 965 966 First: no IPv4 options. 967 */ 968 newinet->opt = NULL; 969 970 /* Clone RX bits */ 971 newnp->rxopt.all = np->rxopt.all; 972 973 /* Clone pktoptions received with SYN */ 974 newnp->pktoptions = NULL; 975 if (treq->pktopts != NULL) { 976 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); 977 kfree_skb(treq->pktopts); 978 treq->pktopts = NULL; 979 if (newnp->pktoptions) 980 skb_set_owner_r(newnp->pktoptions, newsk); 981 } 982 newnp->opt = NULL; 983 newnp->mcast_oif = inet6_iif(skb); 984 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 985 986 /* Clone native IPv6 options from listening socket (if any) 987 988 Yes, keeping reference count would be much more clever, 989 but we make one more one thing there: reattach optmem 990 to newsk. 991 */ 992 if (opt) { 993 newnp->opt = ipv6_dup_options(newsk, opt); 994 if (opt != np->opt) 995 sock_kfree_s(sk, opt, opt->tot_len); 996 } 997 998 inet_csk(newsk)->icsk_ext_hdr_len = 0; 999 if (newnp->opt) 1000 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 1001 newnp->opt->opt_flen); 1002 1003 tcp_mtup_init(newsk); 1004 tcp_sync_mss(newsk, dst_mtu(dst)); 1005 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1006 tcp_initialize_rcv_mss(newsk); 1007 1008 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 1009 1010 __inet6_hash(&tcp_hashinfo, newsk); 1011 inet_inherit_port(&tcp_hashinfo, sk, newsk); 1012 1013 return newsk; 1014 1015 out_overflow: 1016 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 1017 out: 1018 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 1019 if (opt && opt != np->opt) 1020 sock_kfree_s(sk, opt, opt->tot_len); 1021 dst_release(dst); 1022 return NULL; 1023 } 1024 1025 static int tcp_v6_checksum_init(struct sk_buff *skb) 1026 { 1027 if (skb->ip_summed == CHECKSUM_HW) { 1028 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1029 &skb->nh.ipv6h->daddr,skb->csum)) { 1030 skb->ip_summed = CHECKSUM_UNNECESSARY; 1031 return 0; 1032 } 1033 } 1034 1035 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, 1036 &skb->nh.ipv6h->daddr, 0); 1037 1038 if (skb->len <= 76) { 1039 return __skb_checksum_complete(skb); 1040 } 1041 return 0; 1042 } 1043 1044 /* The socket must have it's spinlock held when we get 1045 * here. 1046 * 1047 * We have a potential double-lock case here, so even when 1048 * doing backlog processing we use the BH locking scheme. 1049 * This is because we cannot sleep with the original spinlock 1050 * held. 1051 */ 1052 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 1053 { 1054 struct ipv6_pinfo *np = inet6_sk(sk); 1055 struct tcp_sock *tp; 1056 struct sk_buff *opt_skb = NULL; 1057 1058 /* Imagine: socket is IPv6. IPv4 packet arrives, 1059 goes to IPv4 receive handler and backlogged. 1060 From backlog it always goes here. Kerboom... 1061 Fortunately, tcp_rcv_established and rcv_established 1062 handle them correctly, but it is not case with 1063 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK 1064 */ 1065 1066 if (skb->protocol == htons(ETH_P_IP)) 1067 return tcp_v4_do_rcv(sk, skb); 1068 1069 if (sk_filter(sk, skb, 0)) 1070 goto discard; 1071 1072 /* 1073 * socket locking is here for SMP purposes as backlog rcv 1074 * is currently called with bh processing disabled. 1075 */ 1076 1077 /* Do Stevens' IPV6_PKTOPTIONS. 1078 1079 Yes, guys, it is the only place in our code, where we 1080 may make it not affecting IPv4. 1081 The rest of code is protocol independent, 1082 and I do not like idea to uglify IPv4. 1083 1084 Actually, all the idea behind IPV6_PKTOPTIONS 1085 looks not very well thought. For now we latch 1086 options, received in the last packet, enqueued 1087 by tcp. Feel free to propose better solution. 1088 --ANK (980728) 1089 */ 1090 if (np->rxopt.all) 1091 opt_skb = skb_clone(skb, GFP_ATOMIC); 1092 1093 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1094 TCP_CHECK_TIMER(sk); 1095 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) 1096 goto reset; 1097 TCP_CHECK_TIMER(sk); 1098 if (opt_skb) 1099 goto ipv6_pktoptions; 1100 return 0; 1101 } 1102 1103 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb)) 1104 goto csum_err; 1105 1106 if (sk->sk_state == TCP_LISTEN) { 1107 struct sock *nsk = tcp_v6_hnd_req(sk, skb); 1108 if (!nsk) 1109 goto discard; 1110 1111 /* 1112 * Queue it on the new socket if the new socket is active, 1113 * otherwise we just shortcircuit this and continue with 1114 * the new socket.. 1115 */ 1116 if(nsk != sk) { 1117 if (tcp_child_process(sk, nsk, skb)) 1118 goto reset; 1119 if (opt_skb) 1120 __kfree_skb(opt_skb); 1121 return 0; 1122 } 1123 } 1124 1125 TCP_CHECK_TIMER(sk); 1126 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) 1127 goto reset; 1128 TCP_CHECK_TIMER(sk); 1129 if (opt_skb) 1130 goto ipv6_pktoptions; 1131 return 0; 1132 1133 reset: 1134 tcp_v6_send_reset(skb); 1135 discard: 1136 if (opt_skb) 1137 __kfree_skb(opt_skb); 1138 kfree_skb(skb); 1139 return 0; 1140 csum_err: 1141 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1142 goto discard; 1143 1144 1145 ipv6_pktoptions: 1146 /* Do you ask, what is it? 1147 1148 1. skb was enqueued by tcp. 1149 2. skb is added to tail of read queue, rather than out of order. 1150 3. socket is not in passive state. 1151 4. Finally, it really contains options, which user wants to receive. 1152 */ 1153 tp = tcp_sk(sk); 1154 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && 1155 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 1156 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) 1157 np->mcast_oif = inet6_iif(opt_skb); 1158 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) 1159 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit; 1160 if (ipv6_opt_accepted(sk, opt_skb)) { 1161 skb_set_owner_r(opt_skb, sk); 1162 opt_skb = xchg(&np->pktoptions, opt_skb); 1163 } else { 1164 __kfree_skb(opt_skb); 1165 opt_skb = xchg(&np->pktoptions, NULL); 1166 } 1167 } 1168 1169 if (opt_skb) 1170 kfree_skb(opt_skb); 1171 return 0; 1172 } 1173 1174 static int tcp_v6_rcv(struct sk_buff **pskb) 1175 { 1176 struct sk_buff *skb = *pskb; 1177 struct tcphdr *th; 1178 struct sock *sk; 1179 int ret; 1180 1181 if (skb->pkt_type != PACKET_HOST) 1182 goto discard_it; 1183 1184 /* 1185 * Count it even if it's bad. 1186 */ 1187 TCP_INC_STATS_BH(TCP_MIB_INSEGS); 1188 1189 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 1190 goto discard_it; 1191 1192 th = skb->h.th; 1193 1194 if (th->doff < sizeof(struct tcphdr)/4) 1195 goto bad_packet; 1196 if (!pskb_may_pull(skb, th->doff*4)) 1197 goto discard_it; 1198 1199 if ((skb->ip_summed != CHECKSUM_UNNECESSARY && 1200 tcp_v6_checksum_init(skb))) 1201 goto bad_packet; 1202 1203 th = skb->h.th; 1204 TCP_SKB_CB(skb)->seq = ntohl(th->seq); 1205 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + 1206 skb->len - th->doff*4); 1207 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); 1208 TCP_SKB_CB(skb)->when = 0; 1209 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h); 1210 TCP_SKB_CB(skb)->sacked = 0; 1211 1212 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source, 1213 &skb->nh.ipv6h->daddr, ntohs(th->dest), 1214 inet6_iif(skb)); 1215 1216 if (!sk) 1217 goto no_tcp_socket; 1218 1219 process: 1220 if (sk->sk_state == TCP_TIME_WAIT) 1221 goto do_time_wait; 1222 1223 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1224 goto discard_and_relse; 1225 1226 if (sk_filter(sk, skb, 0)) 1227 goto discard_and_relse; 1228 1229 skb->dev = NULL; 1230 1231 bh_lock_sock(sk); 1232 ret = 0; 1233 if (!sock_owned_by_user(sk)) { 1234 #ifdef CONFIG_NET_DMA 1235 struct tcp_sock *tp = tcp_sk(sk); 1236 if (tp->ucopy.dma_chan) 1237 ret = tcp_v6_do_rcv(sk, skb); 1238 else 1239 #endif 1240 { 1241 if (!tcp_prequeue(sk, skb)) 1242 ret = tcp_v6_do_rcv(sk, skb); 1243 } 1244 } else 1245 sk_add_backlog(sk, skb); 1246 bh_unlock_sock(sk); 1247 1248 sock_put(sk); 1249 return ret ? -1 : 0; 1250 1251 no_tcp_socket: 1252 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1253 goto discard_it; 1254 1255 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1256 bad_packet: 1257 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1258 } else { 1259 tcp_v6_send_reset(skb); 1260 } 1261 1262 discard_it: 1263 1264 /* 1265 * Discard frame 1266 */ 1267 1268 kfree_skb(skb); 1269 return 0; 1270 1271 discard_and_relse: 1272 sock_put(sk); 1273 goto discard_it; 1274 1275 do_time_wait: 1276 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1277 inet_twsk_put((struct inet_timewait_sock *)sk); 1278 goto discard_it; 1279 } 1280 1281 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1282 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1283 inet_twsk_put((struct inet_timewait_sock *)sk); 1284 goto discard_it; 1285 } 1286 1287 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1288 skb, th)) { 1289 case TCP_TW_SYN: 1290 { 1291 struct sock *sk2; 1292 1293 sk2 = inet6_lookup_listener(&tcp_hashinfo, 1294 &skb->nh.ipv6h->daddr, 1295 ntohs(th->dest), inet6_iif(skb)); 1296 if (sk2 != NULL) { 1297 struct inet_timewait_sock *tw = inet_twsk(sk); 1298 inet_twsk_deschedule(tw, &tcp_death_row); 1299 inet_twsk_put(tw); 1300 sk = sk2; 1301 goto process; 1302 } 1303 /* Fall through to ACK */ 1304 } 1305 case TCP_TW_ACK: 1306 tcp_v6_timewait_ack(sk, skb); 1307 break; 1308 case TCP_TW_RST: 1309 goto no_tcp_socket; 1310 case TCP_TW_SUCCESS:; 1311 } 1312 goto discard_it; 1313 } 1314 1315 static int tcp_v6_remember_stamp(struct sock *sk) 1316 { 1317 /* Alas, not yet... */ 1318 return 0; 1319 } 1320 1321 static struct inet_connection_sock_af_ops ipv6_specific = { 1322 .queue_xmit = inet6_csk_xmit, 1323 .send_check = tcp_v6_send_check, 1324 .rebuild_header = inet6_sk_rebuild_header, 1325 .conn_request = tcp_v6_conn_request, 1326 .syn_recv_sock = tcp_v6_syn_recv_sock, 1327 .remember_stamp = tcp_v6_remember_stamp, 1328 .net_header_len = sizeof(struct ipv6hdr), 1329 .setsockopt = ipv6_setsockopt, 1330 .getsockopt = ipv6_getsockopt, 1331 .addr2sockaddr = inet6_csk_addr2sockaddr, 1332 .sockaddr_len = sizeof(struct sockaddr_in6), 1333 #ifdef CONFIG_COMPAT 1334 .compat_setsockopt = compat_ipv6_setsockopt, 1335 .compat_getsockopt = compat_ipv6_getsockopt, 1336 #endif 1337 }; 1338 1339 /* 1340 * TCP over IPv4 via INET6 API 1341 */ 1342 1343 static struct inet_connection_sock_af_ops ipv6_mapped = { 1344 .queue_xmit = ip_queue_xmit, 1345 .send_check = tcp_v4_send_check, 1346 .rebuild_header = inet_sk_rebuild_header, 1347 .conn_request = tcp_v6_conn_request, 1348 .syn_recv_sock = tcp_v6_syn_recv_sock, 1349 .remember_stamp = tcp_v4_remember_stamp, 1350 .net_header_len = sizeof(struct iphdr), 1351 .setsockopt = ipv6_setsockopt, 1352 .getsockopt = ipv6_getsockopt, 1353 .addr2sockaddr = inet6_csk_addr2sockaddr, 1354 .sockaddr_len = sizeof(struct sockaddr_in6), 1355 #ifdef CONFIG_COMPAT 1356 .compat_setsockopt = compat_ipv6_setsockopt, 1357 .compat_getsockopt = compat_ipv6_getsockopt, 1358 #endif 1359 }; 1360 1361 /* NOTE: A lot of things set to zero explicitly by call to 1362 * sk_alloc() so need not be done here. 1363 */ 1364 static int tcp_v6_init_sock(struct sock *sk) 1365 { 1366 struct inet_connection_sock *icsk = inet_csk(sk); 1367 struct tcp_sock *tp = tcp_sk(sk); 1368 1369 skb_queue_head_init(&tp->out_of_order_queue); 1370 tcp_init_xmit_timers(sk); 1371 tcp_prequeue_init(tp); 1372 1373 icsk->icsk_rto = TCP_TIMEOUT_INIT; 1374 tp->mdev = TCP_TIMEOUT_INIT; 1375 1376 /* So many TCP implementations out there (incorrectly) count the 1377 * initial SYN frame in their delayed-ACK and congestion control 1378 * algorithms that we must have the following bandaid to talk 1379 * efficiently to them. -DaveM 1380 */ 1381 tp->snd_cwnd = 2; 1382 1383 /* See draft-stevens-tcpca-spec-01 for discussion of the 1384 * initialization of these values. 1385 */ 1386 tp->snd_ssthresh = 0x7fffffff; 1387 tp->snd_cwnd_clamp = ~0; 1388 tp->mss_cache = 536; 1389 1390 tp->reordering = sysctl_tcp_reordering; 1391 1392 sk->sk_state = TCP_CLOSE; 1393 1394 icsk->icsk_af_ops = &ipv6_specific; 1395 icsk->icsk_ca_ops = &tcp_init_congestion_ops; 1396 icsk->icsk_sync_mss = tcp_sync_mss; 1397 sk->sk_write_space = sk_stream_write_space; 1398 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1399 1400 sk->sk_sndbuf = sysctl_tcp_wmem[1]; 1401 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; 1402 1403 atomic_inc(&tcp_sockets_allocated); 1404 1405 return 0; 1406 } 1407 1408 static int tcp_v6_destroy_sock(struct sock *sk) 1409 { 1410 tcp_v4_destroy_sock(sk); 1411 return inet6_destroy_sock(sk); 1412 } 1413 1414 /* Proc filesystem TCPv6 sock list dumping. */ 1415 static void get_openreq6(struct seq_file *seq, 1416 struct sock *sk, struct request_sock *req, int i, int uid) 1417 { 1418 int ttd = req->expires - jiffies; 1419 struct in6_addr *src = &inet6_rsk(req)->loc_addr; 1420 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 1421 1422 if (ttd < 0) 1423 ttd = 0; 1424 1425 seq_printf(seq, 1426 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1427 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 1428 i, 1429 src->s6_addr32[0], src->s6_addr32[1], 1430 src->s6_addr32[2], src->s6_addr32[3], 1431 ntohs(inet_sk(sk)->sport), 1432 dest->s6_addr32[0], dest->s6_addr32[1], 1433 dest->s6_addr32[2], dest->s6_addr32[3], 1434 ntohs(inet_rsk(req)->rmt_port), 1435 TCP_SYN_RECV, 1436 0,0, /* could print option size, but that is af dependent. */ 1437 1, /* timers active (only the expire timer) */ 1438 jiffies_to_clock_t(ttd), 1439 req->retrans, 1440 uid, 1441 0, /* non standard timer */ 1442 0, /* open_requests have no inode */ 1443 0, req); 1444 } 1445 1446 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 1447 { 1448 struct in6_addr *dest, *src; 1449 __u16 destp, srcp; 1450 int timer_active; 1451 unsigned long timer_expires; 1452 struct inet_sock *inet = inet_sk(sp); 1453 struct tcp_sock *tp = tcp_sk(sp); 1454 const struct inet_connection_sock *icsk = inet_csk(sp); 1455 struct ipv6_pinfo *np = inet6_sk(sp); 1456 1457 dest = &np->daddr; 1458 src = &np->rcv_saddr; 1459 destp = ntohs(inet->dport); 1460 srcp = ntohs(inet->sport); 1461 1462 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 1463 timer_active = 1; 1464 timer_expires = icsk->icsk_timeout; 1465 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 1466 timer_active = 4; 1467 timer_expires = icsk->icsk_timeout; 1468 } else if (timer_pending(&sp->sk_timer)) { 1469 timer_active = 2; 1470 timer_expires = sp->sk_timer.expires; 1471 } else { 1472 timer_active = 0; 1473 timer_expires = jiffies; 1474 } 1475 1476 seq_printf(seq, 1477 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1478 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n", 1479 i, 1480 src->s6_addr32[0], src->s6_addr32[1], 1481 src->s6_addr32[2], src->s6_addr32[3], srcp, 1482 dest->s6_addr32[0], dest->s6_addr32[1], 1483 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1484 sp->sk_state, 1485 tp->write_seq-tp->snd_una, 1486 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1487 timer_active, 1488 jiffies_to_clock_t(timer_expires - jiffies), 1489 icsk->icsk_retransmits, 1490 sock_i_uid(sp), 1491 icsk->icsk_probes_out, 1492 sock_i_ino(sp), 1493 atomic_read(&sp->sk_refcnt), sp, 1494 icsk->icsk_rto, 1495 icsk->icsk_ack.ato, 1496 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, 1497 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 1498 ); 1499 } 1500 1501 static void get_timewait6_sock(struct seq_file *seq, 1502 struct inet_timewait_sock *tw, int i) 1503 { 1504 struct in6_addr *dest, *src; 1505 __u16 destp, srcp; 1506 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 1507 int ttd = tw->tw_ttd - jiffies; 1508 1509 if (ttd < 0) 1510 ttd = 0; 1511 1512 dest = &tw6->tw_v6_daddr; 1513 src = &tw6->tw_v6_rcv_saddr; 1514 destp = ntohs(tw->tw_dport); 1515 srcp = ntohs(tw->tw_sport); 1516 1517 seq_printf(seq, 1518 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1519 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 1520 i, 1521 src->s6_addr32[0], src->s6_addr32[1], 1522 src->s6_addr32[2], src->s6_addr32[3], srcp, 1523 dest->s6_addr32[0], dest->s6_addr32[1], 1524 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1525 tw->tw_substate, 0, 0, 1526 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 1527 atomic_read(&tw->tw_refcnt), tw); 1528 } 1529 1530 #ifdef CONFIG_PROC_FS 1531 static int tcp6_seq_show(struct seq_file *seq, void *v) 1532 { 1533 struct tcp_iter_state *st; 1534 1535 if (v == SEQ_START_TOKEN) { 1536 seq_puts(seq, 1537 " sl " 1538 "local_address " 1539 "remote_address " 1540 "st tx_queue rx_queue tr tm->when retrnsmt" 1541 " uid timeout inode\n"); 1542 goto out; 1543 } 1544 st = seq->private; 1545 1546 switch (st->state) { 1547 case TCP_SEQ_STATE_LISTENING: 1548 case TCP_SEQ_STATE_ESTABLISHED: 1549 get_tcp6_sock(seq, v, st->num); 1550 break; 1551 case TCP_SEQ_STATE_OPENREQ: 1552 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); 1553 break; 1554 case TCP_SEQ_STATE_TIME_WAIT: 1555 get_timewait6_sock(seq, v, st->num); 1556 break; 1557 } 1558 out: 1559 return 0; 1560 } 1561 1562 static struct file_operations tcp6_seq_fops; 1563 static struct tcp_seq_afinfo tcp6_seq_afinfo = { 1564 .owner = THIS_MODULE, 1565 .name = "tcp6", 1566 .family = AF_INET6, 1567 .seq_show = tcp6_seq_show, 1568 .seq_fops = &tcp6_seq_fops, 1569 }; 1570 1571 int __init tcp6_proc_init(void) 1572 { 1573 return tcp_proc_register(&tcp6_seq_afinfo); 1574 } 1575 1576 void tcp6_proc_exit(void) 1577 { 1578 tcp_proc_unregister(&tcp6_seq_afinfo); 1579 } 1580 #endif 1581 1582 struct proto tcpv6_prot = { 1583 .name = "TCPv6", 1584 .owner = THIS_MODULE, 1585 .close = tcp_close, 1586 .connect = tcp_v6_connect, 1587 .disconnect = tcp_disconnect, 1588 .accept = inet_csk_accept, 1589 .ioctl = tcp_ioctl, 1590 .init = tcp_v6_init_sock, 1591 .destroy = tcp_v6_destroy_sock, 1592 .shutdown = tcp_shutdown, 1593 .setsockopt = tcp_setsockopt, 1594 .getsockopt = tcp_getsockopt, 1595 .sendmsg = tcp_sendmsg, 1596 .recvmsg = tcp_recvmsg, 1597 .backlog_rcv = tcp_v6_do_rcv, 1598 .hash = tcp_v6_hash, 1599 .unhash = tcp_unhash, 1600 .get_port = tcp_v6_get_port, 1601 .enter_memory_pressure = tcp_enter_memory_pressure, 1602 .sockets_allocated = &tcp_sockets_allocated, 1603 .memory_allocated = &tcp_memory_allocated, 1604 .memory_pressure = &tcp_memory_pressure, 1605 .orphan_count = &tcp_orphan_count, 1606 .sysctl_mem = sysctl_tcp_mem, 1607 .sysctl_wmem = sysctl_tcp_wmem, 1608 .sysctl_rmem = sysctl_tcp_rmem, 1609 .max_header = MAX_TCP_HEADER, 1610 .obj_size = sizeof(struct tcp6_sock), 1611 .twsk_prot = &tcp6_timewait_sock_ops, 1612 .rsk_prot = &tcp6_request_sock_ops, 1613 #ifdef CONFIG_COMPAT 1614 .compat_setsockopt = compat_tcp_setsockopt, 1615 .compat_getsockopt = compat_tcp_getsockopt, 1616 #endif 1617 }; 1618 1619 static struct inet6_protocol tcpv6_protocol = { 1620 .handler = tcp_v6_rcv, 1621 .err_handler = tcp_v6_err, 1622 .gso_send_check = tcp_v6_gso_send_check, 1623 .gso_segment = tcp_tso_segment, 1624 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1625 }; 1626 1627 static struct inet_protosw tcpv6_protosw = { 1628 .type = SOCK_STREAM, 1629 .protocol = IPPROTO_TCP, 1630 .prot = &tcpv6_prot, 1631 .ops = &inet6_stream_ops, 1632 .capability = -1, 1633 .no_check = 0, 1634 .flags = INET_PROTOSW_PERMANENT | 1635 INET_PROTOSW_ICSK, 1636 }; 1637 1638 void __init tcpv6_init(void) 1639 { 1640 /* register inet6 protocol */ 1641 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0) 1642 printk(KERN_ERR "tcpv6_init: Could not register protocol\n"); 1643 inet6_register_protosw(&tcpv6_protosw); 1644 1645 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW, 1646 IPPROTO_TCP) < 0) 1647 panic("Failed to create the TCPv6 control socket.\n"); 1648 } 1649