1 /* 2 * net/dccp/proto.c 3 * 4 * An implementation of the DCCP protocol 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/dccp.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/sched.h> 16 #include <linux/kernel.h> 17 #include <linux/skbuff.h> 18 #include <linux/netdevice.h> 19 #include <linux/in.h> 20 #include <linux/if_arp.h> 21 #include <linux/init.h> 22 #include <linux/random.h> 23 #include <linux/slab.h> 24 #include <net/checksum.h> 25 26 #include <net/inet_sock.h> 27 #include <net/sock.h> 28 #include <net/xfrm.h> 29 30 #include <asm/ioctls.h> 31 #include <linux/spinlock.h> 32 #include <linux/timer.h> 33 #include <linux/delay.h> 34 #include <linux/poll.h> 35 36 #include "ccid.h" 37 #include "dccp.h" 38 #include "feat.h" 39 40 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly; 41 42 EXPORT_SYMBOL_GPL(dccp_statistics); 43 44 struct percpu_counter dccp_orphan_count; 45 EXPORT_SYMBOL_GPL(dccp_orphan_count); 46 47 struct inet_hashinfo dccp_hashinfo; 48 EXPORT_SYMBOL_GPL(dccp_hashinfo); 49 50 /* the maximum queue length for tx in packets. 0 is no limit */ 51 int sysctl_dccp_tx_qlen __read_mostly = 5; 52 53 void dccp_set_state(struct sock *sk, const int state) 54 { 55 const int oldstate = sk->sk_state; 56 57 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk, 58 dccp_state_name(oldstate), dccp_state_name(state)); 59 WARN_ON(state == oldstate); 60 61 switch (state) { 62 case DCCP_OPEN: 63 if (oldstate != DCCP_OPEN) 64 DCCP_INC_STATS(DCCP_MIB_CURRESTAB); 65 /* Client retransmits all Confirm options until entering OPEN */ 66 if (oldstate == DCCP_PARTOPEN) 67 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg); 68 break; 69 70 case DCCP_CLOSED: 71 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ || 72 oldstate == DCCP_CLOSING) 73 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS); 74 75 sk->sk_prot->unhash(sk); 76 if (inet_csk(sk)->icsk_bind_hash != NULL && 77 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 78 inet_put_port(sk); 79 /* fall through */ 80 default: 81 if (oldstate == DCCP_OPEN) 82 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB); 83 } 84 85 /* Change state AFTER socket is unhashed to avoid closed 86 * socket sitting in hash tables. 87 */ 88 sk->sk_state = state; 89 } 90 91 EXPORT_SYMBOL_GPL(dccp_set_state); 92 93 static void dccp_finish_passive_close(struct sock *sk) 94 { 95 switch (sk->sk_state) { 96 case DCCP_PASSIVE_CLOSE: 97 /* Node (client or server) has received Close packet. */ 98 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); 99 dccp_set_state(sk, DCCP_CLOSED); 100 break; 101 case DCCP_PASSIVE_CLOSEREQ: 102 /* 103 * Client received CloseReq. We set the `active' flag so that 104 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3. 105 */ 106 dccp_send_close(sk, 1); 107 dccp_set_state(sk, DCCP_CLOSING); 108 } 109 } 110 111 void dccp_done(struct sock *sk) 112 { 113 dccp_set_state(sk, DCCP_CLOSED); 114 dccp_clear_xmit_timers(sk); 115 116 sk->sk_shutdown = SHUTDOWN_MASK; 117 118 if (!sock_flag(sk, SOCK_DEAD)) 119 sk->sk_state_change(sk); 120 else 121 inet_csk_destroy_sock(sk); 122 } 123 124 EXPORT_SYMBOL_GPL(dccp_done); 125 126 const char *dccp_packet_name(const int type) 127 { 128 static const char *const dccp_packet_names[] = { 129 [DCCP_PKT_REQUEST] = "REQUEST", 130 [DCCP_PKT_RESPONSE] = "RESPONSE", 131 [DCCP_PKT_DATA] = "DATA", 132 [DCCP_PKT_ACK] = "ACK", 133 [DCCP_PKT_DATAACK] = "DATAACK", 134 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ", 135 [DCCP_PKT_CLOSE] = "CLOSE", 136 [DCCP_PKT_RESET] = "RESET", 137 [DCCP_PKT_SYNC] = "SYNC", 138 [DCCP_PKT_SYNCACK] = "SYNCACK", 139 }; 140 141 if (type >= DCCP_NR_PKT_TYPES) 142 return "INVALID"; 143 else 144 return dccp_packet_names[type]; 145 } 146 147 EXPORT_SYMBOL_GPL(dccp_packet_name); 148 149 const char *dccp_state_name(const int state) 150 { 151 static const char *const dccp_state_names[] = { 152 [DCCP_OPEN] = "OPEN", 153 [DCCP_REQUESTING] = "REQUESTING", 154 [DCCP_PARTOPEN] = "PARTOPEN", 155 [DCCP_LISTEN] = "LISTEN", 156 [DCCP_RESPOND] = "RESPOND", 157 [DCCP_CLOSING] = "CLOSING", 158 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ", 159 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE", 160 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ", 161 [DCCP_TIME_WAIT] = "TIME_WAIT", 162 [DCCP_CLOSED] = "CLOSED", 163 }; 164 165 if (state >= DCCP_MAX_STATES) 166 return "INVALID STATE!"; 167 else 168 return dccp_state_names[state]; 169 } 170 171 EXPORT_SYMBOL_GPL(dccp_state_name); 172 173 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) 174 { 175 struct dccp_sock *dp = dccp_sk(sk); 176 struct inet_connection_sock *icsk = inet_csk(sk); 177 178 icsk->icsk_rto = DCCP_TIMEOUT_INIT; 179 icsk->icsk_syn_retries = sysctl_dccp_request_retries; 180 sk->sk_state = DCCP_CLOSED; 181 sk->sk_write_space = dccp_write_space; 182 icsk->icsk_sync_mss = dccp_sync_mss; 183 dp->dccps_mss_cache = 536; 184 dp->dccps_rate_last = jiffies; 185 dp->dccps_role = DCCP_ROLE_UNDEFINED; 186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; 187 dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1; 188 189 dccp_init_xmit_timers(sk); 190 191 INIT_LIST_HEAD(&dp->dccps_featneg); 192 /* control socket doesn't need feat nego */ 193 if (likely(ctl_sock_initialized)) 194 return dccp_feat_init(sk); 195 return 0; 196 } 197 198 EXPORT_SYMBOL_GPL(dccp_init_sock); 199 200 void dccp_destroy_sock(struct sock *sk) 201 { 202 struct dccp_sock *dp = dccp_sk(sk); 203 204 /* 205 * DCCP doesn't use sk_write_queue, just sk_send_head 206 * for retransmissions 207 */ 208 if (sk->sk_send_head != NULL) { 209 kfree_skb(sk->sk_send_head); 210 sk->sk_send_head = NULL; 211 } 212 213 /* Clean up a referenced DCCP bind bucket. */ 214 if (inet_csk(sk)->icsk_bind_hash != NULL) 215 inet_put_port(sk); 216 217 kfree(dp->dccps_service_list); 218 dp->dccps_service_list = NULL; 219 220 if (dp->dccps_hc_rx_ackvec != NULL) { 221 dccp_ackvec_free(dp->dccps_hc_rx_ackvec); 222 dp->dccps_hc_rx_ackvec = NULL; 223 } 224 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); 225 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); 226 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; 227 228 /* clean up feature negotiation state */ 229 dccp_feat_list_purge(&dp->dccps_featneg); 230 } 231 232 EXPORT_SYMBOL_GPL(dccp_destroy_sock); 233 234 static inline int dccp_listen_start(struct sock *sk, int backlog) 235 { 236 struct dccp_sock *dp = dccp_sk(sk); 237 238 dp->dccps_role = DCCP_ROLE_LISTEN; 239 /* do not start to listen if feature negotiation setup fails */ 240 if (dccp_feat_finalise_settings(dp)) 241 return -EPROTO; 242 return inet_csk_listen_start(sk, backlog); 243 } 244 245 static inline int dccp_need_reset(int state) 246 { 247 return state != DCCP_CLOSED && state != DCCP_LISTEN && 248 state != DCCP_REQUESTING; 249 } 250 251 int dccp_disconnect(struct sock *sk, int flags) 252 { 253 struct inet_connection_sock *icsk = inet_csk(sk); 254 struct inet_sock *inet = inet_sk(sk); 255 int err = 0; 256 const int old_state = sk->sk_state; 257 258 if (old_state != DCCP_CLOSED) 259 dccp_set_state(sk, DCCP_CLOSED); 260 261 /* 262 * This corresponds to the ABORT function of RFC793, sec. 3.8 263 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted". 264 */ 265 if (old_state == DCCP_LISTEN) { 266 inet_csk_listen_stop(sk); 267 } else if (dccp_need_reset(old_state)) { 268 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); 269 sk->sk_err = ECONNRESET; 270 } else if (old_state == DCCP_REQUESTING) 271 sk->sk_err = ECONNRESET; 272 273 dccp_clear_xmit_timers(sk); 274 275 __skb_queue_purge(&sk->sk_receive_queue); 276 __skb_queue_purge(&sk->sk_write_queue); 277 if (sk->sk_send_head != NULL) { 278 __kfree_skb(sk->sk_send_head); 279 sk->sk_send_head = NULL; 280 } 281 282 inet->inet_dport = 0; 283 284 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 285 inet_reset_saddr(sk); 286 287 sk->sk_shutdown = 0; 288 sock_reset_flag(sk, SOCK_DONE); 289 290 icsk->icsk_backoff = 0; 291 inet_csk_delack_init(sk); 292 __sk_dst_reset(sk); 293 294 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 295 296 sk->sk_error_report(sk); 297 return err; 298 } 299 300 EXPORT_SYMBOL_GPL(dccp_disconnect); 301 302 /* 303 * Wait for a DCCP event. 304 * 305 * Note that we don't need to lock the socket, as the upper poll layers 306 * take care of normal races (between the test and the event) and we don't 307 * go look at any of the socket buffers directly. 308 */ 309 unsigned int dccp_poll(struct file *file, struct socket *sock, 310 poll_table *wait) 311 { 312 unsigned int mask; 313 struct sock *sk = sock->sk; 314 315 sock_poll_wait(file, sk_sleep(sk), wait); 316 if (sk->sk_state == DCCP_LISTEN) 317 return inet_csk_listen_poll(sk); 318 319 /* Socket is not locked. We are protected from async events 320 by poll logic and correct handling of state changes 321 made by another threads is impossible in any case. 322 */ 323 324 mask = 0; 325 if (sk->sk_err) 326 mask = POLLERR; 327 328 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) 329 mask |= POLLHUP; 330 if (sk->sk_shutdown & RCV_SHUTDOWN) 331 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 332 333 /* Connected? */ 334 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { 335 if (atomic_read(&sk->sk_rmem_alloc) > 0) 336 mask |= POLLIN | POLLRDNORM; 337 338 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 339 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 340 mask |= POLLOUT | POLLWRNORM; 341 } else { /* send SIGIO later */ 342 set_bit(SOCK_ASYNC_NOSPACE, 343 &sk->sk_socket->flags); 344 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 345 346 /* Race breaker. If space is freed after 347 * wspace test but before the flags are set, 348 * IO signal will be lost. 349 */ 350 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 351 mask |= POLLOUT | POLLWRNORM; 352 } 353 } 354 } 355 return mask; 356 } 357 358 EXPORT_SYMBOL_GPL(dccp_poll); 359 360 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) 361 { 362 int rc = -ENOTCONN; 363 364 lock_sock(sk); 365 366 if (sk->sk_state == DCCP_LISTEN) 367 goto out; 368 369 switch (cmd) { 370 case SIOCINQ: { 371 struct sk_buff *skb; 372 unsigned long amount = 0; 373 374 skb = skb_peek(&sk->sk_receive_queue); 375 if (skb != NULL) { 376 /* 377 * We will only return the amount of this packet since 378 * that is all that will be read. 379 */ 380 amount = skb->len; 381 } 382 rc = put_user(amount, (int __user *)arg); 383 } 384 break; 385 default: 386 rc = -ENOIOCTLCMD; 387 break; 388 } 389 out: 390 release_sock(sk); 391 return rc; 392 } 393 394 EXPORT_SYMBOL_GPL(dccp_ioctl); 395 396 static int dccp_setsockopt_service(struct sock *sk, const __be32 service, 397 char __user *optval, unsigned int optlen) 398 { 399 struct dccp_sock *dp = dccp_sk(sk); 400 struct dccp_service_list *sl = NULL; 401 402 if (service == DCCP_SERVICE_INVALID_VALUE || 403 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32)) 404 return -EINVAL; 405 406 if (optlen > sizeof(service)) { 407 sl = kmalloc(optlen, GFP_KERNEL); 408 if (sl == NULL) 409 return -ENOMEM; 410 411 sl->dccpsl_nr = optlen / sizeof(u32) - 1; 412 if (copy_from_user(sl->dccpsl_list, 413 optval + sizeof(service), 414 optlen - sizeof(service)) || 415 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { 416 kfree(sl); 417 return -EFAULT; 418 } 419 } 420 421 lock_sock(sk); 422 dp->dccps_service = service; 423 424 kfree(dp->dccps_service_list); 425 426 dp->dccps_service_list = sl; 427 release_sock(sk); 428 return 0; 429 } 430 431 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx) 432 { 433 u8 *list, len; 434 int i, rc; 435 436 if (cscov < 0 || cscov > 15) 437 return -EINVAL; 438 /* 439 * Populate a list of permissible values, in the range cscov...15. This 440 * is necessary since feature negotiation of single values only works if 441 * both sides incidentally choose the same value. Since the list starts 442 * lowest-value first, negotiation will pick the smallest shared value. 443 */ 444 if (cscov == 0) 445 return 0; 446 len = 16 - cscov; 447 448 list = kmalloc(len, GFP_KERNEL); 449 if (list == NULL) 450 return -ENOBUFS; 451 452 for (i = 0; i < len; i++) 453 list[i] = cscov++; 454 455 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len); 456 457 if (rc == 0) { 458 if (rx) 459 dccp_sk(sk)->dccps_pcrlen = cscov; 460 else 461 dccp_sk(sk)->dccps_pcslen = cscov; 462 } 463 kfree(list); 464 return rc; 465 } 466 467 static int dccp_setsockopt_ccid(struct sock *sk, int type, 468 char __user *optval, unsigned int optlen) 469 { 470 u8 *val; 471 int rc = 0; 472 473 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) 474 return -EINVAL; 475 476 val = memdup_user(optval, optlen); 477 if (IS_ERR(val)) 478 return PTR_ERR(val); 479 480 lock_sock(sk); 481 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) 482 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen); 483 484 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID)) 485 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen); 486 release_sock(sk); 487 488 kfree(val); 489 return rc; 490 } 491 492 static int do_dccp_setsockopt(struct sock *sk, int level, int optname, 493 char __user *optval, unsigned int optlen) 494 { 495 struct dccp_sock *dp = dccp_sk(sk); 496 int val, err = 0; 497 498 switch (optname) { 499 case DCCP_SOCKOPT_PACKET_SIZE: 500 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); 501 return 0; 502 case DCCP_SOCKOPT_CHANGE_L: 503 case DCCP_SOCKOPT_CHANGE_R: 504 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n"); 505 return 0; 506 case DCCP_SOCKOPT_CCID: 507 case DCCP_SOCKOPT_RX_CCID: 508 case DCCP_SOCKOPT_TX_CCID: 509 return dccp_setsockopt_ccid(sk, optname, optval, optlen); 510 } 511 512 if (optlen < (int)sizeof(int)) 513 return -EINVAL; 514 515 if (get_user(val, (int __user *)optval)) 516 return -EFAULT; 517 518 if (optname == DCCP_SOCKOPT_SERVICE) 519 return dccp_setsockopt_service(sk, val, optval, optlen); 520 521 lock_sock(sk); 522 switch (optname) { 523 case DCCP_SOCKOPT_SERVER_TIMEWAIT: 524 if (dp->dccps_role != DCCP_ROLE_SERVER) 525 err = -EOPNOTSUPP; 526 else 527 dp->dccps_server_timewait = (val != 0); 528 break; 529 case DCCP_SOCKOPT_SEND_CSCOV: 530 err = dccp_setsockopt_cscov(sk, val, false); 531 break; 532 case DCCP_SOCKOPT_RECV_CSCOV: 533 err = dccp_setsockopt_cscov(sk, val, true); 534 break; 535 default: 536 err = -ENOPROTOOPT; 537 break; 538 } 539 release_sock(sk); 540 541 return err; 542 } 543 544 int dccp_setsockopt(struct sock *sk, int level, int optname, 545 char __user *optval, unsigned int optlen) 546 { 547 if (level != SOL_DCCP) 548 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, 549 optname, optval, 550 optlen); 551 return do_dccp_setsockopt(sk, level, optname, optval, optlen); 552 } 553 554 EXPORT_SYMBOL_GPL(dccp_setsockopt); 555 556 #ifdef CONFIG_COMPAT 557 int compat_dccp_setsockopt(struct sock *sk, int level, int optname, 558 char __user *optval, unsigned int optlen) 559 { 560 if (level != SOL_DCCP) 561 return inet_csk_compat_setsockopt(sk, level, optname, 562 optval, optlen); 563 return do_dccp_setsockopt(sk, level, optname, optval, optlen); 564 } 565 566 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt); 567 #endif 568 569 static int dccp_getsockopt_service(struct sock *sk, int len, 570 __be32 __user *optval, 571 int __user *optlen) 572 { 573 const struct dccp_sock *dp = dccp_sk(sk); 574 const struct dccp_service_list *sl; 575 int err = -ENOENT, slen = 0, total_len = sizeof(u32); 576 577 lock_sock(sk); 578 if ((sl = dp->dccps_service_list) != NULL) { 579 slen = sl->dccpsl_nr * sizeof(u32); 580 total_len += slen; 581 } 582 583 err = -EINVAL; 584 if (total_len > len) 585 goto out; 586 587 err = 0; 588 if (put_user(total_len, optlen) || 589 put_user(dp->dccps_service, optval) || 590 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen))) 591 err = -EFAULT; 592 out: 593 release_sock(sk); 594 return err; 595 } 596 597 static int do_dccp_getsockopt(struct sock *sk, int level, int optname, 598 char __user *optval, int __user *optlen) 599 { 600 struct dccp_sock *dp; 601 int val, len; 602 603 if (get_user(len, optlen)) 604 return -EFAULT; 605 606 if (len < (int)sizeof(int)) 607 return -EINVAL; 608 609 dp = dccp_sk(sk); 610 611 switch (optname) { 612 case DCCP_SOCKOPT_PACKET_SIZE: 613 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); 614 return 0; 615 case DCCP_SOCKOPT_SERVICE: 616 return dccp_getsockopt_service(sk, len, 617 (__be32 __user *)optval, optlen); 618 case DCCP_SOCKOPT_GET_CUR_MPS: 619 val = dp->dccps_mss_cache; 620 break; 621 case DCCP_SOCKOPT_AVAILABLE_CCIDS: 622 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); 623 case DCCP_SOCKOPT_TX_CCID: 624 val = ccid_get_current_tx_ccid(dp); 625 if (val < 0) 626 return -ENOPROTOOPT; 627 break; 628 case DCCP_SOCKOPT_RX_CCID: 629 val = ccid_get_current_rx_ccid(dp); 630 if (val < 0) 631 return -ENOPROTOOPT; 632 break; 633 case DCCP_SOCKOPT_SERVER_TIMEWAIT: 634 val = dp->dccps_server_timewait; 635 break; 636 case DCCP_SOCKOPT_SEND_CSCOV: 637 val = dp->dccps_pcslen; 638 break; 639 case DCCP_SOCKOPT_RECV_CSCOV: 640 val = dp->dccps_pcrlen; 641 break; 642 case 128 ... 191: 643 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, 644 len, (u32 __user *)optval, optlen); 645 case 192 ... 255: 646 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, 647 len, (u32 __user *)optval, optlen); 648 default: 649 return -ENOPROTOOPT; 650 } 651 652 len = sizeof(val); 653 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 654 return -EFAULT; 655 656 return 0; 657 } 658 659 int dccp_getsockopt(struct sock *sk, int level, int optname, 660 char __user *optval, int __user *optlen) 661 { 662 if (level != SOL_DCCP) 663 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, 664 optname, optval, 665 optlen); 666 return do_dccp_getsockopt(sk, level, optname, optval, optlen); 667 } 668 669 EXPORT_SYMBOL_GPL(dccp_getsockopt); 670 671 #ifdef CONFIG_COMPAT 672 int compat_dccp_getsockopt(struct sock *sk, int level, int optname, 673 char __user *optval, int __user *optlen) 674 { 675 if (level != SOL_DCCP) 676 return inet_csk_compat_getsockopt(sk, level, optname, 677 optval, optlen); 678 return do_dccp_getsockopt(sk, level, optname, optval, optlen); 679 } 680 681 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); 682 #endif 683 684 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 685 size_t len) 686 { 687 const struct dccp_sock *dp = dccp_sk(sk); 688 const int flags = msg->msg_flags; 689 const int noblock = flags & MSG_DONTWAIT; 690 struct sk_buff *skb; 691 int rc, size; 692 long timeo; 693 694 if (len > dp->dccps_mss_cache) 695 return -EMSGSIZE; 696 697 lock_sock(sk); 698 699 if (sysctl_dccp_tx_qlen && 700 (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) { 701 rc = -EAGAIN; 702 goto out_release; 703 } 704 705 timeo = sock_sndtimeo(sk, noblock); 706 707 /* 708 * We have to use sk_stream_wait_connect here to set sk_write_pending, 709 * so that the trick in dccp_rcv_request_sent_state_process. 710 */ 711 /* Wait for a connection to finish. */ 712 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) 713 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0) 714 goto out_release; 715 716 size = sk->sk_prot->max_header + len; 717 release_sock(sk); 718 skb = sock_alloc_send_skb(sk, size, noblock, &rc); 719 lock_sock(sk); 720 if (skb == NULL) 721 goto out_release; 722 723 skb_reserve(skb, sk->sk_prot->max_header); 724 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 725 if (rc != 0) 726 goto out_discard; 727 728 skb_queue_tail(&sk->sk_write_queue, skb); 729 dccp_write_xmit(sk,0); 730 out_release: 731 release_sock(sk); 732 return rc ? : len; 733 out_discard: 734 kfree_skb(skb); 735 goto out_release; 736 } 737 738 EXPORT_SYMBOL_GPL(dccp_sendmsg); 739 740 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 741 size_t len, int nonblock, int flags, int *addr_len) 742 { 743 const struct dccp_hdr *dh; 744 long timeo; 745 746 lock_sock(sk); 747 748 if (sk->sk_state == DCCP_LISTEN) { 749 len = -ENOTCONN; 750 goto out; 751 } 752 753 timeo = sock_rcvtimeo(sk, nonblock); 754 755 do { 756 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 757 758 if (skb == NULL) 759 goto verify_sock_status; 760 761 dh = dccp_hdr(skb); 762 763 switch (dh->dccph_type) { 764 case DCCP_PKT_DATA: 765 case DCCP_PKT_DATAACK: 766 goto found_ok_skb; 767 768 case DCCP_PKT_CLOSE: 769 case DCCP_PKT_CLOSEREQ: 770 if (!(flags & MSG_PEEK)) 771 dccp_finish_passive_close(sk); 772 /* fall through */ 773 case DCCP_PKT_RESET: 774 dccp_pr_debug("found fin (%s) ok!\n", 775 dccp_packet_name(dh->dccph_type)); 776 len = 0; 777 goto found_fin_ok; 778 default: 779 dccp_pr_debug("packet_type=%s\n", 780 dccp_packet_name(dh->dccph_type)); 781 sk_eat_skb(sk, skb, 0); 782 } 783 verify_sock_status: 784 if (sock_flag(sk, SOCK_DONE)) { 785 len = 0; 786 break; 787 } 788 789 if (sk->sk_err) { 790 len = sock_error(sk); 791 break; 792 } 793 794 if (sk->sk_shutdown & RCV_SHUTDOWN) { 795 len = 0; 796 break; 797 } 798 799 if (sk->sk_state == DCCP_CLOSED) { 800 if (!sock_flag(sk, SOCK_DONE)) { 801 /* This occurs when user tries to read 802 * from never connected socket. 803 */ 804 len = -ENOTCONN; 805 break; 806 } 807 len = 0; 808 break; 809 } 810 811 if (!timeo) { 812 len = -EAGAIN; 813 break; 814 } 815 816 if (signal_pending(current)) { 817 len = sock_intr_errno(timeo); 818 break; 819 } 820 821 sk_wait_data(sk, &timeo); 822 continue; 823 found_ok_skb: 824 if (len > skb->len) 825 len = skb->len; 826 else if (len < skb->len) 827 msg->msg_flags |= MSG_TRUNC; 828 829 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) { 830 /* Exception. Bailout! */ 831 len = -EFAULT; 832 break; 833 } 834 if (flags & MSG_TRUNC) 835 len = skb->len; 836 found_fin_ok: 837 if (!(flags & MSG_PEEK)) 838 sk_eat_skb(sk, skb, 0); 839 break; 840 } while (1); 841 out: 842 release_sock(sk); 843 return len; 844 } 845 846 EXPORT_SYMBOL_GPL(dccp_recvmsg); 847 848 int inet_dccp_listen(struct socket *sock, int backlog) 849 { 850 struct sock *sk = sock->sk; 851 unsigned char old_state; 852 int err; 853 854 lock_sock(sk); 855 856 err = -EINVAL; 857 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP) 858 goto out; 859 860 old_state = sk->sk_state; 861 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) 862 goto out; 863 864 /* Really, if the socket is already in listen state 865 * we can only allow the backlog to be adjusted. 866 */ 867 if (old_state != DCCP_LISTEN) { 868 /* 869 * FIXME: here it probably should be sk->sk_prot->listen_start 870 * see tcp_listen_start 871 */ 872 err = dccp_listen_start(sk, backlog); 873 if (err) 874 goto out; 875 } 876 sk->sk_max_ack_backlog = backlog; 877 err = 0; 878 879 out: 880 release_sock(sk); 881 return err; 882 } 883 884 EXPORT_SYMBOL_GPL(inet_dccp_listen); 885 886 static void dccp_terminate_connection(struct sock *sk) 887 { 888 u8 next_state = DCCP_CLOSED; 889 890 switch (sk->sk_state) { 891 case DCCP_PASSIVE_CLOSE: 892 case DCCP_PASSIVE_CLOSEREQ: 893 dccp_finish_passive_close(sk); 894 break; 895 case DCCP_PARTOPEN: 896 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk); 897 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 898 /* fall through */ 899 case DCCP_OPEN: 900 dccp_send_close(sk, 1); 901 902 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER && 903 !dccp_sk(sk)->dccps_server_timewait) 904 next_state = DCCP_ACTIVE_CLOSEREQ; 905 else 906 next_state = DCCP_CLOSING; 907 /* fall through */ 908 default: 909 dccp_set_state(sk, next_state); 910 } 911 } 912 913 void dccp_close(struct sock *sk, long timeout) 914 { 915 struct dccp_sock *dp = dccp_sk(sk); 916 struct sk_buff *skb; 917 u32 data_was_unread = 0; 918 int state; 919 920 lock_sock(sk); 921 922 sk->sk_shutdown = SHUTDOWN_MASK; 923 924 if (sk->sk_state == DCCP_LISTEN) { 925 dccp_set_state(sk, DCCP_CLOSED); 926 927 /* Special case. */ 928 inet_csk_listen_stop(sk); 929 930 goto adjudge_to_death; 931 } 932 933 sk_stop_timer(sk, &dp->dccps_xmit_timer); 934 935 /* 936 * We need to flush the recv. buffs. We do this only on the 937 * descriptor close, not protocol-sourced closes, because the 938 *reader process may not have drained the data yet! 939 */ 940 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 941 data_was_unread += skb->len; 942 __kfree_skb(skb); 943 } 944 945 if (data_was_unread) { 946 /* Unread data was tossed, send an appropriate Reset Code */ 947 DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread); 948 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); 949 dccp_set_state(sk, DCCP_CLOSED); 950 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 951 /* Check zero linger _after_ checking for unread data. */ 952 sk->sk_prot->disconnect(sk, 0); 953 } else if (sk->sk_state != DCCP_CLOSED) { 954 dccp_terminate_connection(sk); 955 } 956 957 sk_stream_wait_close(sk, timeout); 958 959 adjudge_to_death: 960 state = sk->sk_state; 961 sock_hold(sk); 962 sock_orphan(sk); 963 964 /* 965 * It is the last release_sock in its life. It will remove backlog. 966 */ 967 release_sock(sk); 968 /* 969 * Now socket is owned by kernel and we acquire BH lock 970 * to finish close. No need to check for user refs. 971 */ 972 local_bh_disable(); 973 bh_lock_sock(sk); 974 WARN_ON(sock_owned_by_user(sk)); 975 976 percpu_counter_inc(sk->sk_prot->orphan_count); 977 978 /* Have we already been destroyed by a softirq or backlog? */ 979 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) 980 goto out; 981 982 if (sk->sk_state == DCCP_CLOSED) 983 inet_csk_destroy_sock(sk); 984 985 /* Otherwise, socket is reprieved until protocol close. */ 986 987 out: 988 bh_unlock_sock(sk); 989 local_bh_enable(); 990 sock_put(sk); 991 } 992 993 EXPORT_SYMBOL_GPL(dccp_close); 994 995 void dccp_shutdown(struct sock *sk, int how) 996 { 997 dccp_pr_debug("called shutdown(%x)\n", how); 998 } 999 1000 EXPORT_SYMBOL_GPL(dccp_shutdown); 1001 1002 static inline int dccp_mib_init(void) 1003 { 1004 return snmp_mib_init((void __percpu **)dccp_statistics, 1005 sizeof(struct dccp_mib), 1006 __alignof__(struct dccp_mib)); 1007 } 1008 1009 static inline void dccp_mib_exit(void) 1010 { 1011 snmp_mib_free((void __percpu **)dccp_statistics); 1012 } 1013 1014 static int thash_entries; 1015 module_param(thash_entries, int, 0444); 1016 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); 1017 1018 #ifdef CONFIG_IP_DCCP_DEBUG 1019 int dccp_debug; 1020 module_param(dccp_debug, bool, 0644); 1021 MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); 1022 1023 EXPORT_SYMBOL_GPL(dccp_debug); 1024 #endif 1025 1026 static int __init dccp_init(void) 1027 { 1028 unsigned long goal; 1029 int ehash_order, bhash_order, i; 1030 int rc; 1031 1032 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) > 1033 FIELD_SIZEOF(struct sk_buff, cb)); 1034 rc = percpu_counter_init(&dccp_orphan_count, 0); 1035 if (rc) 1036 goto out_fail; 1037 rc = -ENOBUFS; 1038 inet_hashinfo_init(&dccp_hashinfo); 1039 dccp_hashinfo.bind_bucket_cachep = 1040 kmem_cache_create("dccp_bind_bucket", 1041 sizeof(struct inet_bind_bucket), 0, 1042 SLAB_HWCACHE_ALIGN, NULL); 1043 if (!dccp_hashinfo.bind_bucket_cachep) 1044 goto out_free_percpu; 1045 1046 /* 1047 * Size and allocate the main established and bind bucket 1048 * hash tables. 1049 * 1050 * The methodology is similar to that of the buffer cache. 1051 */ 1052 if (totalram_pages >= (128 * 1024)) 1053 goal = totalram_pages >> (21 - PAGE_SHIFT); 1054 else 1055 goal = totalram_pages >> (23 - PAGE_SHIFT); 1056 1057 if (thash_entries) 1058 goal = (thash_entries * 1059 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT; 1060 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) 1061 ; 1062 do { 1063 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE / 1064 sizeof(struct inet_ehash_bucket); 1065 1066 while (hash_size & (hash_size - 1)) 1067 hash_size--; 1068 dccp_hashinfo.ehash_mask = hash_size - 1; 1069 dccp_hashinfo.ehash = (struct inet_ehash_bucket *) 1070 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); 1071 } while (!dccp_hashinfo.ehash && --ehash_order > 0); 1072 1073 if (!dccp_hashinfo.ehash) { 1074 DCCP_CRIT("Failed to allocate DCCP established hash table"); 1075 goto out_free_bind_bucket_cachep; 1076 } 1077 1078 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) { 1079 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); 1080 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); 1081 } 1082 1083 if (inet_ehash_locks_alloc(&dccp_hashinfo)) 1084 goto out_free_dccp_ehash; 1085 1086 bhash_order = ehash_order; 1087 1088 do { 1089 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE / 1090 sizeof(struct inet_bind_hashbucket); 1091 if ((dccp_hashinfo.bhash_size > (64 * 1024)) && 1092 bhash_order > 0) 1093 continue; 1094 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *) 1095 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order); 1096 } while (!dccp_hashinfo.bhash && --bhash_order >= 0); 1097 1098 if (!dccp_hashinfo.bhash) { 1099 DCCP_CRIT("Failed to allocate DCCP bind hash table"); 1100 goto out_free_dccp_locks; 1101 } 1102 1103 for (i = 0; i < dccp_hashinfo.bhash_size; i++) { 1104 spin_lock_init(&dccp_hashinfo.bhash[i].lock); 1105 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain); 1106 } 1107 1108 rc = dccp_mib_init(); 1109 if (rc) 1110 goto out_free_dccp_bhash; 1111 1112 rc = dccp_ackvec_init(); 1113 if (rc) 1114 goto out_free_dccp_mib; 1115 1116 rc = dccp_sysctl_init(); 1117 if (rc) 1118 goto out_ackvec_exit; 1119 1120 rc = ccid_initialize_builtins(); 1121 if (rc) 1122 goto out_sysctl_exit; 1123 1124 dccp_timestamping_init(); 1125 1126 return 0; 1127 1128 out_sysctl_exit: 1129 dccp_sysctl_exit(); 1130 out_ackvec_exit: 1131 dccp_ackvec_exit(); 1132 out_free_dccp_mib: 1133 dccp_mib_exit(); 1134 out_free_dccp_bhash: 1135 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); 1136 out_free_dccp_locks: 1137 inet_ehash_locks_free(&dccp_hashinfo); 1138 out_free_dccp_ehash: 1139 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); 1140 out_free_bind_bucket_cachep: 1141 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1142 out_free_percpu: 1143 percpu_counter_destroy(&dccp_orphan_count); 1144 out_fail: 1145 dccp_hashinfo.bhash = NULL; 1146 dccp_hashinfo.ehash = NULL; 1147 dccp_hashinfo.bind_bucket_cachep = NULL; 1148 return rc; 1149 } 1150 1151 static void __exit dccp_fini(void) 1152 { 1153 ccid_cleanup_builtins(); 1154 dccp_mib_exit(); 1155 free_pages((unsigned long)dccp_hashinfo.bhash, 1156 get_order(dccp_hashinfo.bhash_size * 1157 sizeof(struct inet_bind_hashbucket))); 1158 free_pages((unsigned long)dccp_hashinfo.ehash, 1159 get_order((dccp_hashinfo.ehash_mask + 1) * 1160 sizeof(struct inet_ehash_bucket))); 1161 inet_ehash_locks_free(&dccp_hashinfo); 1162 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); 1163 dccp_ackvec_exit(); 1164 dccp_sysctl_exit(); 1165 percpu_counter_destroy(&dccp_orphan_count); 1166 } 1167 1168 module_init(dccp_init); 1169 module_exit(dccp_fini); 1170 1171 MODULE_LICENSE("GPL"); 1172 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>"); 1173 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol"); 1174