1 /* 2 * DCCP over IPv6 3 * Linux INET6 implementation 4 * 5 * Based on net/dccp6/ipv6.c 6 * 7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/xfrm.h> 18 19 #include <net/addrconf.h> 20 #include <net/inet_common.h> 21 #include <net/inet_hashtables.h> 22 #include <net/inet_sock.h> 23 #include <net/inet6_connection_sock.h> 24 #include <net/inet6_hashtables.h> 25 #include <net/ip6_route.h> 26 #include <net/ipv6.h> 27 #include <net/protocol.h> 28 #include <net/transp_v6.h> 29 #include <net/ip6_checksum.h> 30 #include <net/xfrm.h> 31 32 #include "dccp.h" 33 #include "ipv6.h" 34 #include "feat.h" 35 36 /* Socket used for sending RSTs and ACKs */ 37 static struct socket *dccp_v6_ctl_socket; 38 39 static void dccp_v6_ctl_send_reset(struct sk_buff *skb); 40 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb, 41 struct request_sock *req); 42 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb); 43 44 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 45 46 static struct inet_connection_sock_af_ops dccp_ipv6_mapped; 47 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; 48 49 static int dccp_v6_get_port(struct sock *sk, unsigned short snum) 50 { 51 return inet_csk_get_port(&dccp_hashinfo, sk, snum, 52 inet6_csk_bind_conflict); 53 } 54 55 static void dccp_v6_hash(struct sock *sk) 56 { 57 if (sk->sk_state != DCCP_CLOSED) { 58 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { 59 dccp_hash(sk); 60 return; 61 } 62 local_bh_disable(); 63 __inet6_hash(&dccp_hashinfo, sk); 64 local_bh_enable(); 65 } 66 } 67 68 static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len, 69 struct in6_addr *saddr, 70 struct in6_addr *daddr, 71 unsigned long base) 72 { 73 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base); 74 } 75 76 static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) 77 { 78 const struct dccp_hdr *dh = dccp_hdr(skb); 79 80 if (skb->protocol == htons(ETH_P_IPV6)) 81 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32, 82 skb->nh.ipv6h->saddr.s6_addr32, 83 dh->dccph_dport, 84 dh->dccph_sport); 85 86 return secure_dccp_sequence_number(skb->nh.iph->daddr, 87 skb->nh.iph->saddr, 88 dh->dccph_dport, 89 dh->dccph_sport); 90 } 91 92 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 93 int addr_len) 94 { 95 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; 96 struct inet_connection_sock *icsk = inet_csk(sk); 97 struct inet_sock *inet = inet_sk(sk); 98 struct ipv6_pinfo *np = inet6_sk(sk); 99 struct dccp_sock *dp = dccp_sk(sk); 100 struct in6_addr *saddr = NULL, *final_p = NULL, final; 101 struct flowi fl; 102 struct dst_entry *dst; 103 int addr_type; 104 int err; 105 106 dp->dccps_role = DCCP_ROLE_CLIENT; 107 108 if (addr_len < SIN6_LEN_RFC2133) 109 return -EINVAL; 110 111 if (usin->sin6_family != AF_INET6) 112 return -EAFNOSUPPORT; 113 114 memset(&fl, 0, sizeof(fl)); 115 116 if (np->sndflow) { 117 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 118 IP6_ECN_flow_init(fl.fl6_flowlabel); 119 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) { 120 struct ip6_flowlabel *flowlabel; 121 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 122 if (flowlabel == NULL) 123 return -EINVAL; 124 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); 125 fl6_sock_release(flowlabel); 126 } 127 } 128 /* 129 * connect() to INADDR_ANY means loopback (BSD'ism). 130 */ 131 if (ipv6_addr_any(&usin->sin6_addr)) 132 usin->sin6_addr.s6_addr[15] = 1; 133 134 addr_type = ipv6_addr_type(&usin->sin6_addr); 135 136 if (addr_type & IPV6_ADDR_MULTICAST) 137 return -ENETUNREACH; 138 139 if (addr_type & IPV6_ADDR_LINKLOCAL) { 140 if (addr_len >= sizeof(struct sockaddr_in6) && 141 usin->sin6_scope_id) { 142 /* If interface is set while binding, indices 143 * must coincide. 144 */ 145 if (sk->sk_bound_dev_if && 146 sk->sk_bound_dev_if != usin->sin6_scope_id) 147 return -EINVAL; 148 149 sk->sk_bound_dev_if = usin->sin6_scope_id; 150 } 151 152 /* Connect to link-local address requires an interface */ 153 if (!sk->sk_bound_dev_if) 154 return -EINVAL; 155 } 156 157 ipv6_addr_copy(&np->daddr, &usin->sin6_addr); 158 np->flow_label = fl.fl6_flowlabel; 159 160 /* 161 * DCCP over IPv4 162 */ 163 if (addr_type == IPV6_ADDR_MAPPED) { 164 u32 exthdrlen = icsk->icsk_ext_hdr_len; 165 struct sockaddr_in sin; 166 167 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); 168 169 if (__ipv6_only_sock(sk)) 170 return -ENETUNREACH; 171 172 sin.sin_family = AF_INET; 173 sin.sin_port = usin->sin6_port; 174 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; 175 176 icsk->icsk_af_ops = &dccp_ipv6_mapped; 177 sk->sk_backlog_rcv = dccp_v4_do_rcv; 178 179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); 180 if (err) { 181 icsk->icsk_ext_hdr_len = exthdrlen; 182 icsk->icsk_af_ops = &dccp_ipv6_af_ops; 183 sk->sk_backlog_rcv = dccp_v6_do_rcv; 184 goto failure; 185 } else { 186 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF), 187 inet->saddr); 188 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF), 189 inet->rcv_saddr); 190 } 191 192 return err; 193 } 194 195 if (!ipv6_addr_any(&np->rcv_saddr)) 196 saddr = &np->rcv_saddr; 197 198 fl.proto = IPPROTO_DCCP; 199 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 200 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr); 201 fl.oif = sk->sk_bound_dev_if; 202 fl.fl_ip_dport = usin->sin6_port; 203 fl.fl_ip_sport = inet->sport; 204 205 if (np->opt != NULL && np->opt->srcrt != NULL) { 206 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; 207 208 ipv6_addr_copy(&final, &fl.fl6_dst); 209 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 210 final_p = &final; 211 } 212 213 err = ip6_dst_lookup(sk, &dst, &fl); 214 if (err) 215 goto failure; 216 217 if (final_p) 218 ipv6_addr_copy(&fl.fl6_dst, final_p); 219 220 err = xfrm_lookup(&dst, &fl, sk, 0); 221 if (err < 0) 222 goto failure; 223 224 if (saddr == NULL) { 225 saddr = &fl.fl6_src; 226 ipv6_addr_copy(&np->rcv_saddr, saddr); 227 } 228 229 /* set the source address */ 230 ipv6_addr_copy(&np->saddr, saddr); 231 inet->rcv_saddr = LOOPBACK4_IPV6; 232 233 __ip6_dst_store(sk, dst, NULL); 234 235 icsk->icsk_ext_hdr_len = 0; 236 if (np->opt != NULL) 237 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 238 np->opt->opt_nflen); 239 240 inet->dport = usin->sin6_port; 241 242 dccp_set_state(sk, DCCP_REQUESTING); 243 err = inet6_hash_connect(&dccp_death_row, sk); 244 if (err) 245 goto late_failure; 246 /* FIXME */ 247 #if 0 248 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32, 249 np->daddr.s6_addr32, 250 inet->sport, 251 inet->dport); 252 #endif 253 err = dccp_connect(sk); 254 if (err) 255 goto late_failure; 256 257 return 0; 258 259 late_failure: 260 dccp_set_state(sk, DCCP_CLOSED); 261 __sk_dst_reset(sk); 262 failure: 263 inet->dport = 0; 264 sk->sk_route_caps = 0; 265 return err; 266 } 267 268 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 269 int type, int code, int offset, __be32 info) 270 { 271 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 272 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 273 struct ipv6_pinfo *np; 274 struct sock *sk; 275 int err; 276 __u64 seq; 277 278 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport, 279 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex); 280 281 if (sk == NULL) { 282 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); 283 return; 284 } 285 286 if (sk->sk_state == DCCP_TIME_WAIT) { 287 inet_twsk_put((struct inet_timewait_sock *)sk); 288 return; 289 } 290 291 bh_lock_sock(sk); 292 if (sock_owned_by_user(sk)) 293 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); 294 295 if (sk->sk_state == DCCP_CLOSED) 296 goto out; 297 298 np = inet6_sk(sk); 299 300 if (type == ICMPV6_PKT_TOOBIG) { 301 struct dst_entry *dst = NULL; 302 303 if (sock_owned_by_user(sk)) 304 goto out; 305 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) 306 goto out; 307 308 /* icmp should have updated the destination cache entry */ 309 dst = __sk_dst_check(sk, np->dst_cookie); 310 if (dst == NULL) { 311 struct inet_sock *inet = inet_sk(sk); 312 struct flowi fl; 313 314 /* BUGGG_FUTURE: Again, it is not clear how 315 to handle rthdr case. Ignore this complexity 316 for now. 317 */ 318 memset(&fl, 0, sizeof(fl)); 319 fl.proto = IPPROTO_DCCP; 320 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 321 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 322 fl.oif = sk->sk_bound_dev_if; 323 fl.fl_ip_dport = inet->dport; 324 fl.fl_ip_sport = inet->sport; 325 326 err = ip6_dst_lookup(sk, &dst, &fl); 327 if (err) { 328 sk->sk_err_soft = -err; 329 goto out; 330 } 331 332 err = xfrm_lookup(&dst, &fl, sk, 0); 333 if (err < 0) { 334 sk->sk_err_soft = -err; 335 goto out; 336 } 337 } else 338 dst_hold(dst); 339 340 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { 341 dccp_sync_mss(sk, dst_mtu(dst)); 342 } /* else let the usual retransmit timer handle it */ 343 dst_release(dst); 344 goto out; 345 } 346 347 icmpv6_err_convert(type, code, &err); 348 349 seq = DCCP_SKB_CB(skb)->dccpd_seq; 350 /* Might be for an request_sock */ 351 switch (sk->sk_state) { 352 struct request_sock *req, **prev; 353 case DCCP_LISTEN: 354 if (sock_owned_by_user(sk)) 355 goto out; 356 357 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport, 358 &hdr->daddr, &hdr->saddr, 359 inet6_iif(skb)); 360 if (req == NULL) 361 goto out; 362 363 /* 364 * ICMPs are not backlogged, hence we cannot get an established 365 * socket here. 366 */ 367 BUG_TRAP(req->sk == NULL); 368 369 if (seq != dccp_rsk(req)->dreq_iss) { 370 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); 371 goto out; 372 } 373 374 inet_csk_reqsk_queue_drop(sk, req, prev); 375 goto out; 376 377 case DCCP_REQUESTING: 378 case DCCP_RESPOND: /* Cannot happen. 379 It can, it SYNs are crossed. --ANK */ 380 if (!sock_owned_by_user(sk)) { 381 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 382 sk->sk_err = err; 383 /* 384 * Wake people up to see the error 385 * (see connect in sock.c) 386 */ 387 sk->sk_error_report(sk); 388 dccp_done(sk); 389 } else 390 sk->sk_err_soft = err; 391 goto out; 392 } 393 394 if (!sock_owned_by_user(sk) && np->recverr) { 395 sk->sk_err = err; 396 sk->sk_error_report(sk); 397 } else 398 sk->sk_err_soft = err; 399 400 out: 401 bh_unlock_sock(sk); 402 sock_put(sk); 403 } 404 405 406 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, 407 struct dst_entry *dst) 408 { 409 struct inet6_request_sock *ireq6 = inet6_rsk(req); 410 struct ipv6_pinfo *np = inet6_sk(sk); 411 struct sk_buff *skb; 412 struct ipv6_txoptions *opt = NULL; 413 struct in6_addr *final_p = NULL, final; 414 struct flowi fl; 415 int err = -1; 416 417 memset(&fl, 0, sizeof(fl)); 418 fl.proto = IPPROTO_DCCP; 419 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 420 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 421 fl.fl6_flowlabel = 0; 422 fl.oif = ireq6->iif; 423 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 424 fl.fl_ip_sport = inet_sk(sk)->sport; 425 426 if (dst == NULL) { 427 opt = np->opt; 428 if (opt == NULL && 429 np->rxopt.bits.osrcrt == 2 && 430 ireq6->pktopts) { 431 struct sk_buff *pktopts = ireq6->pktopts; 432 struct inet6_skb_parm *rxopt = IP6CB(pktopts); 433 434 if (rxopt->srcrt) 435 opt = ipv6_invert_rthdr(sk, 436 (struct ipv6_rt_hdr *)(pktopts->nh.raw + 437 rxopt->srcrt)); 438 } 439 440 if (opt != NULL && opt->srcrt != NULL) { 441 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 442 443 ipv6_addr_copy(&final, &fl.fl6_dst); 444 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 445 final_p = &final; 446 } 447 448 err = ip6_dst_lookup(sk, &dst, &fl); 449 if (err) 450 goto done; 451 452 if (final_p) 453 ipv6_addr_copy(&fl.fl6_dst, final_p); 454 455 err = xfrm_lookup(&dst, &fl, sk, 0); 456 if (err < 0) 457 goto done; 458 } 459 460 skb = dccp_make_response(sk, dst, req); 461 if (skb != NULL) { 462 struct dccp_hdr *dh = dccp_hdr(skb); 463 464 dh->dccph_checksum = dccp_v6_check(dh, skb->len, 465 &ireq6->loc_addr, 466 &ireq6->rmt_addr, 467 csum_partial((char *)dh, 468 skb->len, 469 skb->csum)); 470 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 471 err = ip6_xmit(sk, skb, &fl, opt, 0); 472 if (err == NET_XMIT_CN) 473 err = 0; 474 } 475 476 done: 477 if (opt != NULL && opt != np->opt) 478 sock_kfree_s(sk, opt, opt->tot_len); 479 dst_release(dst); 480 return err; 481 } 482 483 static void dccp_v6_reqsk_destructor(struct request_sock *req) 484 { 485 if (inet6_rsk(req)->pktopts != NULL) 486 kfree_skb(inet6_rsk(req)->pktopts); 487 } 488 489 static struct request_sock_ops dccp6_request_sock_ops = { 490 .family = AF_INET6, 491 .obj_size = sizeof(struct dccp6_request_sock), 492 .rtx_syn_ack = dccp_v6_send_response, 493 .send_ack = dccp_v6_reqsk_send_ack, 494 .destructor = dccp_v6_reqsk_destructor, 495 .send_reset = dccp_v6_ctl_send_reset, 496 }; 497 498 static struct timewait_sock_ops dccp6_timewait_sock_ops = { 499 .twsk_obj_size = sizeof(struct dccp6_timewait_sock), 500 }; 501 502 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 503 { 504 struct ipv6_pinfo *np = inet6_sk(sk); 505 struct dccp_hdr *dh = dccp_hdr(skb); 506 507 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr, 508 len, IPPROTO_DCCP, 509 csum_partial((char *)dh, 510 dh->dccph_doff << 2, 511 skb->csum)); 512 } 513 514 static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb) 515 { 516 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 517 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + 518 sizeof(struct dccp_hdr_ext) + 519 sizeof(struct dccp_hdr_reset); 520 struct sk_buff *skb; 521 struct flowi fl; 522 u64 seqno; 523 524 if (rxdh->dccph_type == DCCP_PKT_RESET) 525 return; 526 527 if (!ipv6_unicast_destination(rxskb)) 528 return; 529 530 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, 531 GFP_ATOMIC); 532 if (skb == NULL) 533 return; 534 535 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); 536 537 skb->h.raw = skb_push(skb, dccp_hdr_reset_len); 538 dh = dccp_hdr(skb); 539 memset(dh, 0, dccp_hdr_reset_len); 540 541 /* Swap the send and the receive. */ 542 dh->dccph_type = DCCP_PKT_RESET; 543 dh->dccph_sport = rxdh->dccph_dport; 544 dh->dccph_dport = rxdh->dccph_sport; 545 dh->dccph_doff = dccp_hdr_reset_len / 4; 546 dh->dccph_x = 1; 547 dccp_hdr_reset(skb)->dccph_reset_code = 548 DCCP_SKB_CB(rxskb)->dccpd_reset_code; 549 550 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */ 551 seqno = 0; 552 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 553 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); 554 555 dccp_hdr_set_seq(dh, seqno); 556 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), 557 DCCP_SKB_CB(rxskb)->dccpd_seq); 558 559 memset(&fl, 0, sizeof(fl)); 560 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); 561 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); 562 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 563 sizeof(*dh), IPPROTO_DCCP, 564 skb->csum); 565 fl.proto = IPPROTO_DCCP; 566 fl.oif = inet6_iif(rxskb); 567 fl.fl_ip_dport = dh->dccph_dport; 568 fl.fl_ip_sport = dh->dccph_sport; 569 570 /* sk = NULL, but it is safe for now. RST socket required. */ 571 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { 572 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { 573 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0); 574 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 575 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 576 return; 577 } 578 } 579 580 kfree_skb(skb); 581 } 582 583 static void dccp_v6_reqsk_send_ack(struct sk_buff *rxskb, 584 struct request_sock *req) 585 { 586 struct flowi fl; 587 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; 588 const u32 dccp_hdr_ack_len = sizeof(struct dccp_hdr) + 589 sizeof(struct dccp_hdr_ext) + 590 sizeof(struct dccp_hdr_ack_bits); 591 struct sk_buff *skb; 592 593 skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, 594 GFP_ATOMIC); 595 if (skb == NULL) 596 return; 597 598 skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); 599 600 skb->h.raw = skb_push(skb, dccp_hdr_ack_len); 601 dh = dccp_hdr(skb); 602 memset(dh, 0, dccp_hdr_ack_len); 603 604 /* Build DCCP header and checksum it. */ 605 dh->dccph_type = DCCP_PKT_ACK; 606 dh->dccph_sport = rxdh->dccph_dport; 607 dh->dccph_dport = rxdh->dccph_sport; 608 dh->dccph_doff = dccp_hdr_ack_len / 4; 609 dh->dccph_x = 1; 610 611 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq); 612 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), 613 DCCP_SKB_CB(rxskb)->dccpd_seq); 614 615 memset(&fl, 0, sizeof(fl)); 616 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr); 617 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr); 618 619 /* FIXME: calculate checksum, IPv4 also should... */ 620 621 fl.proto = IPPROTO_DCCP; 622 fl.oif = inet6_iif(rxskb); 623 fl.fl_ip_dport = dh->dccph_dport; 624 fl.fl_ip_sport = dh->dccph_sport; 625 626 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) { 627 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) { 628 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0); 629 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 630 return; 631 } 632 } 633 634 kfree_skb(skb); 635 } 636 637 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 638 { 639 const struct dccp_hdr *dh = dccp_hdr(skb); 640 const struct ipv6hdr *iph = skb->nh.ipv6h; 641 struct sock *nsk; 642 struct request_sock **prev; 643 /* Find possible connection requests. */ 644 struct request_sock *req = inet6_csk_search_req(sk, &prev, 645 dh->dccph_sport, 646 &iph->saddr, 647 &iph->daddr, 648 inet6_iif(skb)); 649 if (req != NULL) 650 return dccp_check_req(sk, skb, req, prev); 651 652 nsk = __inet6_lookup_established(&dccp_hashinfo, 653 &iph->saddr, dh->dccph_sport, 654 &iph->daddr, ntohs(dh->dccph_dport), 655 inet6_iif(skb)); 656 if (nsk != NULL) { 657 if (nsk->sk_state != DCCP_TIME_WAIT) { 658 bh_lock_sock(nsk); 659 return nsk; 660 } 661 inet_twsk_put((struct inet_timewait_sock *)nsk); 662 return NULL; 663 } 664 665 return sk; 666 } 667 668 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 669 { 670 struct inet_request_sock *ireq; 671 struct dccp_sock dp; 672 struct request_sock *req; 673 struct dccp_request_sock *dreq; 674 struct inet6_request_sock *ireq6; 675 struct ipv6_pinfo *np = inet6_sk(sk); 676 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 677 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 678 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; 679 680 if (skb->protocol == htons(ETH_P_IP)) 681 return dccp_v4_conn_request(sk, skb); 682 683 if (!ipv6_unicast_destination(skb)) 684 goto drop; 685 686 if (dccp_bad_service_code(sk, service)) { 687 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 688 goto drop; 689 } 690 /* 691 * There are no SYN attacks on IPv6, yet... 692 */ 693 if (inet_csk_reqsk_queue_is_full(sk)) 694 goto drop; 695 696 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 697 goto drop; 698 699 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot); 700 if (req == NULL) 701 goto drop; 702 703 /* FIXME: process options */ 704 705 dccp_openreq_init(req, &dp, skb); 706 707 ireq6 = inet6_rsk(req); 708 ireq = inet_rsk(req); 709 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr); 710 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr); 711 req->rcv_wnd = dccp_feat_default_sequence_window; 712 ireq6->pktopts = NULL; 713 714 if (ipv6_opt_accepted(sk, skb) || 715 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 716 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 717 atomic_inc(&skb->users); 718 ireq6->pktopts = skb; 719 } 720 ireq6->iif = sk->sk_bound_dev_if; 721 722 /* So that link locals have meaning */ 723 if (!sk->sk_bound_dev_if && 724 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) 725 ireq6->iif = inet6_iif(skb); 726 727 /* 728 * Step 3: Process LISTEN state 729 * 730 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie 731 * 732 * In fact we defer setting S.GSR, S.SWL, S.SWH to 733 * dccp_create_openreq_child. 734 */ 735 dreq = dccp_rsk(req); 736 dreq->dreq_isr = dcb->dccpd_seq; 737 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb); 738 dreq->dreq_service = service; 739 740 if (dccp_v6_send_response(sk, req, NULL)) 741 goto drop_and_free; 742 743 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 744 return 0; 745 746 drop_and_free: 747 reqsk_free(req); 748 drop: 749 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); 750 dcb->dccpd_reset_code = reset_code; 751 return -1; 752 } 753 754 static struct sock *dccp_v6_request_recv_sock(struct sock *sk, 755 struct sk_buff *skb, 756 struct request_sock *req, 757 struct dst_entry *dst) 758 { 759 struct inet6_request_sock *ireq6 = inet6_rsk(req); 760 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 761 struct inet_sock *newinet; 762 struct dccp_sock *newdp; 763 struct dccp6_sock *newdp6; 764 struct sock *newsk; 765 struct ipv6_txoptions *opt; 766 767 if (skb->protocol == htons(ETH_P_IP)) { 768 /* 769 * v6 mapped 770 */ 771 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); 772 if (newsk == NULL) 773 return NULL; 774 775 newdp6 = (struct dccp6_sock *)newsk; 776 newdp = dccp_sk(newsk); 777 newinet = inet_sk(newsk); 778 newinet->pinet6 = &newdp6->inet6; 779 newnp = inet6_sk(newsk); 780 781 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 782 783 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF), 784 newinet->daddr); 785 786 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF), 787 newinet->saddr); 788 789 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); 790 791 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; 792 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 793 newnp->pktoptions = NULL; 794 newnp->opt = NULL; 795 newnp->mcast_oif = inet6_iif(skb); 796 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 797 798 /* 799 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 800 * here, dccp_create_openreq_child now does this for us, see the comment in 801 * that function for the gory details. -acme 802 */ 803 804 /* It is tricky place. Until this moment IPv4 tcp 805 worked with IPv6 icsk.icsk_af_ops. 806 Sync it now. 807 */ 808 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); 809 810 return newsk; 811 } 812 813 opt = np->opt; 814 815 if (sk_acceptq_is_full(sk)) 816 goto out_overflow; 817 818 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) { 819 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts); 820 821 if (rxopt->srcrt) 822 opt = ipv6_invert_rthdr(sk, 823 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw + 824 rxopt->srcrt)); 825 } 826 827 if (dst == NULL) { 828 struct in6_addr *final_p = NULL, final; 829 struct flowi fl; 830 831 memset(&fl, 0, sizeof(fl)); 832 fl.proto = IPPROTO_DCCP; 833 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 834 if (opt != NULL && opt->srcrt != NULL) { 835 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; 836 837 ipv6_addr_copy(&final, &fl.fl6_dst); 838 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 839 final_p = &final; 840 } 841 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 842 fl.oif = sk->sk_bound_dev_if; 843 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 844 fl.fl_ip_sport = inet_sk(sk)->sport; 845 846 if (ip6_dst_lookup(sk, &dst, &fl)) 847 goto out; 848 849 if (final_p) 850 ipv6_addr_copy(&fl.fl6_dst, final_p); 851 852 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) 853 goto out; 854 } 855 856 newsk = dccp_create_openreq_child(sk, req, skb); 857 if (newsk == NULL) 858 goto out; 859 860 /* 861 * No need to charge this sock to the relevant IPv6 refcnt debug socks 862 * count here, dccp_create_openreq_child now does this for us, see the 863 * comment in that function for the gory details. -acme 864 */ 865 866 __ip6_dst_store(newsk, dst, NULL); 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 868 NETIF_F_TSO); 869 newdp6 = (struct dccp6_sock *)newsk; 870 newinet = inet_sk(newsk); 871 newinet->pinet6 = &newdp6->inet6; 872 newdp = dccp_sk(newsk); 873 newnp = inet6_sk(newsk); 874 875 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 876 877 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr); 878 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr); 879 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); 880 newsk->sk_bound_dev_if = ireq6->iif; 881 882 /* Now IPv6 options... 883 884 First: no IPv4 options. 885 */ 886 newinet->opt = NULL; 887 888 /* Clone RX bits */ 889 newnp->rxopt.all = np->rxopt.all; 890 891 /* Clone pktoptions received with SYN */ 892 newnp->pktoptions = NULL; 893 if (ireq6->pktopts != NULL) { 894 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); 895 kfree_skb(ireq6->pktopts); 896 ireq6->pktopts = NULL; 897 if (newnp->pktoptions) 898 skb_set_owner_r(newnp->pktoptions, newsk); 899 } 900 newnp->opt = NULL; 901 newnp->mcast_oif = inet6_iif(skb); 902 newnp->mcast_hops = skb->nh.ipv6h->hop_limit; 903 904 /* 905 * Clone native IPv6 options from listening socket (if any) 906 * 907 * Yes, keeping reference count would be much more clever, but we make 908 * one more one thing there: reattach optmem to newsk. 909 */ 910 if (opt != NULL) { 911 newnp->opt = ipv6_dup_options(newsk, opt); 912 if (opt != np->opt) 913 sock_kfree_s(sk, opt, opt->tot_len); 914 } 915 916 inet_csk(newsk)->icsk_ext_hdr_len = 0; 917 if (newnp->opt != NULL) 918 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 919 newnp->opt->opt_flen); 920 921 dccp_sync_mss(newsk, dst_mtu(dst)); 922 923 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 924 925 __inet6_hash(&dccp_hashinfo, newsk); 926 inet_inherit_port(&dccp_hashinfo, sk, newsk); 927 928 return newsk; 929 930 out_overflow: 931 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); 932 out: 933 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); 934 if (opt != NULL && opt != np->opt) 935 sock_kfree_s(sk, opt, opt->tot_len); 936 dst_release(dst); 937 return NULL; 938 } 939 940 /* The socket must have it's spinlock held when we get 941 * here. 942 * 943 * We have a potential double-lock case here, so even when 944 * doing backlog processing we use the BH locking scheme. 945 * This is because we cannot sleep with the original spinlock 946 * held. 947 */ 948 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) 949 { 950 struct ipv6_pinfo *np = inet6_sk(sk); 951 struct sk_buff *opt_skb = NULL; 952 953 /* Imagine: socket is IPv6. IPv4 packet arrives, 954 goes to IPv4 receive handler and backlogged. 955 From backlog it always goes here. Kerboom... 956 Fortunately, dccp_rcv_established and rcv_established 957 handle them correctly, but it is not case with 958 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK 959 */ 960 961 if (skb->protocol == htons(ETH_P_IP)) 962 return dccp_v4_do_rcv(sk, skb); 963 964 if (sk_filter(sk, skb, 0)) 965 goto discard; 966 967 /* 968 * socket locking is here for SMP purposes as backlog rcv is currently 969 * called with bh processing disabled. 970 */ 971 972 /* Do Stevens' IPV6_PKTOPTIONS. 973 974 Yes, guys, it is the only place in our code, where we 975 may make it not affecting IPv4. 976 The rest of code is protocol independent, 977 and I do not like idea to uglify IPv4. 978 979 Actually, all the idea behind IPV6_PKTOPTIONS 980 looks not very well thought. For now we latch 981 options, received in the last packet, enqueued 982 by tcp. Feel free to propose better solution. 983 --ANK (980728) 984 */ 985 if (np->rxopt.all) 986 opt_skb = skb_clone(skb, GFP_ATOMIC); 987 988 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ 989 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) 990 goto reset; 991 return 0; 992 } 993 994 if (sk->sk_state == DCCP_LISTEN) { 995 struct sock *nsk = dccp_v6_hnd_req(sk, skb); 996 997 if (nsk == NULL) 998 goto discard; 999 /* 1000 * Queue it on the new socket if the new socket is active, 1001 * otherwise we just shortcircuit this and continue with 1002 * the new socket.. 1003 */ 1004 if (nsk != sk) { 1005 if (dccp_child_process(sk, nsk, skb)) 1006 goto reset; 1007 if (opt_skb != NULL) 1008 __kfree_skb(opt_skb); 1009 return 0; 1010 } 1011 } 1012 1013 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) 1014 goto reset; 1015 return 0; 1016 1017 reset: 1018 dccp_v6_ctl_send_reset(skb); 1019 discard: 1020 if (opt_skb != NULL) 1021 __kfree_skb(opt_skb); 1022 kfree_skb(skb); 1023 return 0; 1024 } 1025 1026 static int dccp_v6_rcv(struct sk_buff **pskb) 1027 { 1028 const struct dccp_hdr *dh; 1029 struct sk_buff *skb = *pskb; 1030 struct sock *sk; 1031 1032 /* Step 1: Check header basics: */ 1033 1034 if (dccp_invalid_packet(skb)) 1035 goto discard_it; 1036 1037 dh = dccp_hdr(skb); 1038 1039 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb); 1040 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; 1041 1042 if (dccp_packet_without_ack(skb)) 1043 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; 1044 else 1045 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); 1046 1047 /* Step 2: 1048 * Look up flow ID in table and get corresponding socket */ 1049 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr, 1050 dh->dccph_sport, 1051 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport), 1052 inet6_iif(skb)); 1053 /* 1054 * Step 2: 1055 * If no socket ... 1056 * Generate Reset(No Connection) unless P.type == Reset 1057 * Drop packet and return 1058 */ 1059 if (sk == NULL) 1060 goto no_dccp_socket; 1061 1062 /* 1063 * Step 2: 1064 * ... or S.state == TIMEWAIT, 1065 * Generate Reset(No Connection) unless P.type == Reset 1066 * Drop packet and return 1067 */ 1068 if (sk->sk_state == DCCP_TIME_WAIT) 1069 goto do_time_wait; 1070 1071 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 1072 goto discard_and_relse; 1073 1074 return sk_receive_skb(sk, skb) ? -1 : 0; 1075 1076 no_dccp_socket: 1077 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 1078 goto discard_it; 1079 /* 1080 * Step 2: 1081 * Generate Reset(No Connection) unless P.type == Reset 1082 * Drop packet and return 1083 */ 1084 if (dh->dccph_type != DCCP_PKT_RESET) { 1085 DCCP_SKB_CB(skb)->dccpd_reset_code = 1086 DCCP_RESET_CODE_NO_CONNECTION; 1087 dccp_v6_ctl_send_reset(skb); 1088 } 1089 discard_it: 1090 1091 /* 1092 * Discard frame 1093 */ 1094 1095 kfree_skb(skb); 1096 return 0; 1097 1098 discard_and_relse: 1099 sock_put(sk); 1100 goto discard_it; 1101 1102 do_time_wait: 1103 inet_twsk_put((struct inet_timewait_sock *)sk); 1104 goto no_dccp_socket; 1105 } 1106 1107 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { 1108 .queue_xmit = inet6_csk_xmit, 1109 .send_check = dccp_v6_send_check, 1110 .rebuild_header = inet6_sk_rebuild_header, 1111 .conn_request = dccp_v6_conn_request, 1112 .syn_recv_sock = dccp_v6_request_recv_sock, 1113 .net_header_len = sizeof(struct ipv6hdr), 1114 .setsockopt = ipv6_setsockopt, 1115 .getsockopt = ipv6_getsockopt, 1116 .addr2sockaddr = inet6_csk_addr2sockaddr, 1117 .sockaddr_len = sizeof(struct sockaddr_in6), 1118 #ifdef CONFIG_COMPAT 1119 .compat_setsockopt = compat_ipv6_setsockopt, 1120 .compat_getsockopt = compat_ipv6_getsockopt, 1121 #endif 1122 }; 1123 1124 /* 1125 * DCCP over IPv4 via INET6 API 1126 */ 1127 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = { 1128 .queue_xmit = ip_queue_xmit, 1129 .send_check = dccp_v4_send_check, 1130 .rebuild_header = inet_sk_rebuild_header, 1131 .conn_request = dccp_v6_conn_request, 1132 .syn_recv_sock = dccp_v6_request_recv_sock, 1133 .net_header_len = sizeof(struct iphdr), 1134 .setsockopt = ipv6_setsockopt, 1135 .getsockopt = ipv6_getsockopt, 1136 .addr2sockaddr = inet6_csk_addr2sockaddr, 1137 .sockaddr_len = sizeof(struct sockaddr_in6), 1138 #ifdef CONFIG_COMPAT 1139 .compat_setsockopt = compat_ipv6_setsockopt, 1140 .compat_getsockopt = compat_ipv6_getsockopt, 1141 #endif 1142 }; 1143 1144 /* NOTE: A lot of things set to zero explicitly by call to 1145 * sk_alloc() so need not be done here. 1146 */ 1147 static int dccp_v6_init_sock(struct sock *sk) 1148 { 1149 static __u8 dccp_v6_ctl_sock_initialized; 1150 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); 1151 1152 if (err == 0) { 1153 if (unlikely(!dccp_v6_ctl_sock_initialized)) 1154 dccp_v6_ctl_sock_initialized = 1; 1155 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; 1156 } 1157 1158 return err; 1159 } 1160 1161 static int dccp_v6_destroy_sock(struct sock *sk) 1162 { 1163 dccp_destroy_sock(sk); 1164 return inet6_destroy_sock(sk); 1165 } 1166 1167 static struct proto dccp_v6_prot = { 1168 .name = "DCCPv6", 1169 .owner = THIS_MODULE, 1170 .close = dccp_close, 1171 .connect = dccp_v6_connect, 1172 .disconnect = dccp_disconnect, 1173 .ioctl = dccp_ioctl, 1174 .init = dccp_v6_init_sock, 1175 .setsockopt = dccp_setsockopt, 1176 .getsockopt = dccp_getsockopt, 1177 .sendmsg = dccp_sendmsg, 1178 .recvmsg = dccp_recvmsg, 1179 .backlog_rcv = dccp_v6_do_rcv, 1180 .hash = dccp_v6_hash, 1181 .unhash = dccp_unhash, 1182 .accept = inet_csk_accept, 1183 .get_port = dccp_v6_get_port, 1184 .shutdown = dccp_shutdown, 1185 .destroy = dccp_v6_destroy_sock, 1186 .orphan_count = &dccp_orphan_count, 1187 .max_header = MAX_DCCP_HEADER, 1188 .obj_size = sizeof(struct dccp6_sock), 1189 .rsk_prot = &dccp6_request_sock_ops, 1190 .twsk_prot = &dccp6_timewait_sock_ops, 1191 #ifdef CONFIG_COMPAT 1192 .compat_setsockopt = compat_dccp_setsockopt, 1193 .compat_getsockopt = compat_dccp_getsockopt, 1194 #endif 1195 }; 1196 1197 static struct inet6_protocol dccp_v6_protocol = { 1198 .handler = dccp_v6_rcv, 1199 .err_handler = dccp_v6_err, 1200 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, 1201 }; 1202 1203 static struct proto_ops inet6_dccp_ops = { 1204 .family = PF_INET6, 1205 .owner = THIS_MODULE, 1206 .release = inet6_release, 1207 .bind = inet6_bind, 1208 .connect = inet_stream_connect, 1209 .socketpair = sock_no_socketpair, 1210 .accept = inet_accept, 1211 .getname = inet6_getname, 1212 .poll = dccp_poll, 1213 .ioctl = inet6_ioctl, 1214 .listen = inet_dccp_listen, 1215 .shutdown = inet_shutdown, 1216 .setsockopt = sock_common_setsockopt, 1217 .getsockopt = sock_common_getsockopt, 1218 .sendmsg = inet_sendmsg, 1219 .recvmsg = sock_common_recvmsg, 1220 .mmap = sock_no_mmap, 1221 .sendpage = sock_no_sendpage, 1222 #ifdef CONFIG_COMPAT 1223 .compat_setsockopt = compat_sock_common_setsockopt, 1224 .compat_getsockopt = compat_sock_common_getsockopt, 1225 #endif 1226 }; 1227 1228 static struct inet_protosw dccp_v6_protosw = { 1229 .type = SOCK_DCCP, 1230 .protocol = IPPROTO_DCCP, 1231 .prot = &dccp_v6_prot, 1232 .ops = &inet6_dccp_ops, 1233 .capability = -1, 1234 .flags = INET_PROTOSW_ICSK, 1235 }; 1236 1237 static int __init dccp_v6_init(void) 1238 { 1239 int err = proto_register(&dccp_v6_prot, 1); 1240 1241 if (err != 0) 1242 goto out; 1243 1244 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1245 if (err != 0) 1246 goto out_unregister_proto; 1247 1248 inet6_register_protosw(&dccp_v6_protosw); 1249 1250 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6, 1251 SOCK_DCCP, IPPROTO_DCCP); 1252 if (err != 0) 1253 goto out_unregister_protosw; 1254 out: 1255 return err; 1256 out_unregister_protosw: 1257 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1258 inet6_unregister_protosw(&dccp_v6_protosw); 1259 out_unregister_proto: 1260 proto_unregister(&dccp_v6_prot); 1261 goto out; 1262 } 1263 1264 static void __exit dccp_v6_exit(void) 1265 { 1266 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); 1267 inet6_unregister_protosw(&dccp_v6_protosw); 1268 proto_unregister(&dccp_v6_prot); 1269 } 1270 1271 module_init(dccp_v6_init); 1272 module_exit(dccp_v6_exit); 1273 1274 /* 1275 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) 1276 * values directly, Also cover the case where the protocol is not specified, 1277 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP 1278 */ 1279 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6"); 1280 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6"); 1281 MODULE_LICENSE("GPL"); 1282 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); 1283 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol"); 1284