1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) 10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) 11 */ 12 13 #include <linux/config.h> 14 #include <linux/capability.h> 15 #include <linux/module.h> 16 #include <linux/moduleparam.h> 17 #include <linux/init.h> 18 #include <linux/errno.h> 19 #include <linux/types.h> 20 #include <linux/socket.h> 21 #include <linux/in.h> 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/spinlock.h> 25 #include <linux/timer.h> 26 #include <linux/string.h> 27 #include <linux/sockios.h> 28 #include <linux/net.h> 29 #include <linux/stat.h> 30 #include <net/ax25.h> 31 #include <linux/inet.h> 32 #include <linux/netdevice.h> 33 #include <linux/if_arp.h> 34 #include <linux/skbuff.h> 35 #include <net/sock.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <linux/fcntl.h> 39 #include <linux/termios.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/notifier.h> 43 #include <net/rose.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <net/tcp_states.h> 47 #include <net/ip.h> 48 #include <net/arp.h> 49 50 static int rose_ndevs = 10; 51 52 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; 53 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; 54 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; 55 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; 56 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; 57 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; 58 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; 59 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; 60 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; 61 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; 62 63 static HLIST_HEAD(rose_list); 64 static DEFINE_SPINLOCK(rose_list_lock); 65 66 static struct proto_ops rose_proto_ops; 67 68 ax25_address rose_callsign; 69 70 /* 71 * Convert a ROSE address into text. 72 */ 73 const char *rose2asc(const rose_address *addr) 74 { 75 static char buffer[11]; 76 77 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && 78 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && 79 addr->rose_addr[4] == 0x00) { 80 strcpy(buffer, "*"); 81 } else { 82 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, 83 addr->rose_addr[1] & 0xFF, 84 addr->rose_addr[2] & 0xFF, 85 addr->rose_addr[3] & 0xFF, 86 addr->rose_addr[4] & 0xFF); 87 } 88 89 return buffer; 90 } 91 92 /* 93 * Compare two ROSE addresses, 0 == equal. 94 */ 95 int rosecmp(rose_address *addr1, rose_address *addr2) 96 { 97 int i; 98 99 for (i = 0; i < 5; i++) 100 if (addr1->rose_addr[i] != addr2->rose_addr[i]) 101 return 1; 102 103 return 0; 104 } 105 106 /* 107 * Compare two ROSE addresses for only mask digits, 0 == equal. 108 */ 109 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) 110 { 111 int i, j; 112 113 if (mask > 10) 114 return 1; 115 116 for (i = 0; i < mask; i++) { 117 j = i / 2; 118 119 if ((i % 2) != 0) { 120 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) 121 return 1; 122 } else { 123 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) 124 return 1; 125 } 126 } 127 128 return 0; 129 } 130 131 /* 132 * Socket removal during an interrupt is now safe. 133 */ 134 static void rose_remove_socket(struct sock *sk) 135 { 136 spin_lock_bh(&rose_list_lock); 137 sk_del_node_init(sk); 138 spin_unlock_bh(&rose_list_lock); 139 } 140 141 /* 142 * Kill all bound sockets on a broken link layer connection to a 143 * particular neighbour. 144 */ 145 void rose_kill_by_neigh(struct rose_neigh *neigh) 146 { 147 struct sock *s; 148 struct hlist_node *node; 149 150 spin_lock_bh(&rose_list_lock); 151 sk_for_each(s, node, &rose_list) { 152 struct rose_sock *rose = rose_sk(s); 153 154 if (rose->neighbour == neigh) { 155 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 156 rose->neighbour->use--; 157 rose->neighbour = NULL; 158 } 159 } 160 spin_unlock_bh(&rose_list_lock); 161 } 162 163 /* 164 * Kill all bound sockets on a dropped device. 165 */ 166 static void rose_kill_by_device(struct net_device *dev) 167 { 168 struct sock *s; 169 struct hlist_node *node; 170 171 spin_lock_bh(&rose_list_lock); 172 sk_for_each(s, node, &rose_list) { 173 struct rose_sock *rose = rose_sk(s); 174 175 if (rose->device == dev) { 176 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 177 rose->neighbour->use--; 178 rose->device = NULL; 179 } 180 } 181 spin_unlock_bh(&rose_list_lock); 182 } 183 184 /* 185 * Handle device status changes. 186 */ 187 static int rose_device_event(struct notifier_block *this, unsigned long event, 188 void *ptr) 189 { 190 struct net_device *dev = (struct net_device *)ptr; 191 192 if (event != NETDEV_DOWN) 193 return NOTIFY_DONE; 194 195 switch (dev->type) { 196 case ARPHRD_ROSE: 197 rose_kill_by_device(dev); 198 break; 199 case ARPHRD_AX25: 200 rose_link_device_down(dev); 201 rose_rt_device_down(dev); 202 break; 203 } 204 205 return NOTIFY_DONE; 206 } 207 208 /* 209 * Add a socket to the bound sockets list. 210 */ 211 static void rose_insert_socket(struct sock *sk) 212 { 213 214 spin_lock_bh(&rose_list_lock); 215 sk_add_node(sk, &rose_list); 216 spin_unlock_bh(&rose_list_lock); 217 } 218 219 /* 220 * Find a socket that wants to accept the Call Request we just 221 * received. 222 */ 223 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 224 { 225 struct sock *s; 226 struct hlist_node *node; 227 228 spin_lock_bh(&rose_list_lock); 229 sk_for_each(s, node, &rose_list) { 230 struct rose_sock *rose = rose_sk(s); 231 232 if (!rosecmp(&rose->source_addr, addr) && 233 !ax25cmp(&rose->source_call, call) && 234 !rose->source_ndigis && s->sk_state == TCP_LISTEN) 235 goto found; 236 } 237 238 sk_for_each(s, node, &rose_list) { 239 struct rose_sock *rose = rose_sk(s); 240 241 if (!rosecmp(&rose->source_addr, addr) && 242 !ax25cmp(&rose->source_call, &null_ax25_address) && 243 s->sk_state == TCP_LISTEN) 244 goto found; 245 } 246 s = NULL; 247 found: 248 spin_unlock_bh(&rose_list_lock); 249 return s; 250 } 251 252 /* 253 * Find a connected ROSE socket given my LCI and device. 254 */ 255 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 256 { 257 struct sock *s; 258 struct hlist_node *node; 259 260 spin_lock_bh(&rose_list_lock); 261 sk_for_each(s, node, &rose_list) { 262 struct rose_sock *rose = rose_sk(s); 263 264 if (rose->lci == lci && rose->neighbour == neigh) 265 goto found; 266 } 267 s = NULL; 268 found: 269 spin_unlock_bh(&rose_list_lock); 270 return s; 271 } 272 273 /* 274 * Find a unique LCI for a given device. 275 */ 276 unsigned int rose_new_lci(struct rose_neigh *neigh) 277 { 278 int lci; 279 280 if (neigh->dce_mode) { 281 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) 282 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 283 return lci; 284 } else { 285 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) 286 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 287 return lci; 288 } 289 290 return 0; 291 } 292 293 /* 294 * Deferred destroy. 295 */ 296 void rose_destroy_socket(struct sock *); 297 298 /* 299 * Handler for deferred kills. 300 */ 301 static void rose_destroy_timer(unsigned long data) 302 { 303 rose_destroy_socket((struct sock *)data); 304 } 305 306 /* 307 * This is called from user mode and the timers. Thus it protects itself 308 * against interrupt users but doesn't worry about being called during 309 * work. Once it is removed from the queue no interrupt or bottom half 310 * will touch it and we are (fairly 8-) ) safe. 311 */ 312 void rose_destroy_socket(struct sock *sk) 313 { 314 struct sk_buff *skb; 315 316 rose_remove_socket(sk); 317 rose_stop_heartbeat(sk); 318 rose_stop_idletimer(sk); 319 rose_stop_timer(sk); 320 321 rose_clear_queues(sk); /* Flush the queues */ 322 323 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 324 if (skb->sk != sk) { /* A pending connection */ 325 /* Queue the unaccepted socket for death */ 326 sock_set_flag(skb->sk, SOCK_DEAD); 327 rose_start_heartbeat(skb->sk); 328 rose_sk(skb->sk)->state = ROSE_STATE_0; 329 } 330 331 kfree_skb(skb); 332 } 333 334 if (atomic_read(&sk->sk_wmem_alloc) || 335 atomic_read(&sk->sk_rmem_alloc)) { 336 /* Defer: outstanding buffers */ 337 init_timer(&sk->sk_timer); 338 sk->sk_timer.expires = jiffies + 10 * HZ; 339 sk->sk_timer.function = rose_destroy_timer; 340 sk->sk_timer.data = (unsigned long)sk; 341 add_timer(&sk->sk_timer); 342 } else 343 sock_put(sk); 344 } 345 346 /* 347 * Handling for system calls applied via the various interfaces to a 348 * ROSE socket object. 349 */ 350 351 static int rose_setsockopt(struct socket *sock, int level, int optname, 352 char __user *optval, int optlen) 353 { 354 struct sock *sk = sock->sk; 355 struct rose_sock *rose = rose_sk(sk); 356 int opt; 357 358 if (level != SOL_ROSE) 359 return -ENOPROTOOPT; 360 361 if (optlen < sizeof(int)) 362 return -EINVAL; 363 364 if (get_user(opt, (int __user *)optval)) 365 return -EFAULT; 366 367 switch (optname) { 368 case ROSE_DEFER: 369 rose->defer = opt ? 1 : 0; 370 return 0; 371 372 case ROSE_T1: 373 if (opt < 1) 374 return -EINVAL; 375 rose->t1 = opt * HZ; 376 return 0; 377 378 case ROSE_T2: 379 if (opt < 1) 380 return -EINVAL; 381 rose->t2 = opt * HZ; 382 return 0; 383 384 case ROSE_T3: 385 if (opt < 1) 386 return -EINVAL; 387 rose->t3 = opt * HZ; 388 return 0; 389 390 case ROSE_HOLDBACK: 391 if (opt < 1) 392 return -EINVAL; 393 rose->hb = opt * HZ; 394 return 0; 395 396 case ROSE_IDLE: 397 if (opt < 0) 398 return -EINVAL; 399 rose->idle = opt * 60 * HZ; 400 return 0; 401 402 case ROSE_QBITINCL: 403 rose->qbitincl = opt ? 1 : 0; 404 return 0; 405 406 default: 407 return -ENOPROTOOPT; 408 } 409 } 410 411 static int rose_getsockopt(struct socket *sock, int level, int optname, 412 char __user *optval, int __user *optlen) 413 { 414 struct sock *sk = sock->sk; 415 struct rose_sock *rose = rose_sk(sk); 416 int val = 0; 417 int len; 418 419 if (level != SOL_ROSE) 420 return -ENOPROTOOPT; 421 422 if (get_user(len, optlen)) 423 return -EFAULT; 424 425 if (len < 0) 426 return -EINVAL; 427 428 switch (optname) { 429 case ROSE_DEFER: 430 val = rose->defer; 431 break; 432 433 case ROSE_T1: 434 val = rose->t1 / HZ; 435 break; 436 437 case ROSE_T2: 438 val = rose->t2 / HZ; 439 break; 440 441 case ROSE_T3: 442 val = rose->t3 / HZ; 443 break; 444 445 case ROSE_HOLDBACK: 446 val = rose->hb / HZ; 447 break; 448 449 case ROSE_IDLE: 450 val = rose->idle / (60 * HZ); 451 break; 452 453 case ROSE_QBITINCL: 454 val = rose->qbitincl; 455 break; 456 457 default: 458 return -ENOPROTOOPT; 459 } 460 461 len = min_t(unsigned int, len, sizeof(int)); 462 463 if (put_user(len, optlen)) 464 return -EFAULT; 465 466 return copy_to_user(optval, &val, len) ? -EFAULT : 0; 467 } 468 469 static int rose_listen(struct socket *sock, int backlog) 470 { 471 struct sock *sk = sock->sk; 472 473 if (sk->sk_state != TCP_LISTEN) { 474 struct rose_sock *rose = rose_sk(sk); 475 476 rose->dest_ndigis = 0; 477 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); 478 memset(&rose->dest_call, 0, AX25_ADDR_LEN); 479 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); 480 sk->sk_max_ack_backlog = backlog; 481 sk->sk_state = TCP_LISTEN; 482 return 0; 483 } 484 485 return -EOPNOTSUPP; 486 } 487 488 static struct proto rose_proto = { 489 .name = "ROSE", 490 .owner = THIS_MODULE, 491 .obj_size = sizeof(struct rose_sock), 492 }; 493 494 static int rose_create(struct socket *sock, int protocol) 495 { 496 struct sock *sk; 497 struct rose_sock *rose; 498 499 if (sock->type != SOCK_SEQPACKET || protocol != 0) 500 return -ESOCKTNOSUPPORT; 501 502 if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) 503 return -ENOMEM; 504 505 rose = rose_sk(sk); 506 507 sock_init_data(sock, sk); 508 509 skb_queue_head_init(&rose->ack_queue); 510 #ifdef M_BIT 511 skb_queue_head_init(&rose->frag_queue); 512 rose->fraglen = 0; 513 #endif 514 515 sock->ops = &rose_proto_ops; 516 sk->sk_protocol = protocol; 517 518 init_timer(&rose->timer); 519 init_timer(&rose->idletimer); 520 521 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); 522 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); 523 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); 524 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); 525 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); 526 527 rose->state = ROSE_STATE_0; 528 529 return 0; 530 } 531 532 static struct sock *rose_make_new(struct sock *osk) 533 { 534 struct sock *sk; 535 struct rose_sock *rose, *orose; 536 537 if (osk->sk_type != SOCK_SEQPACKET) 538 return NULL; 539 540 if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) 541 return NULL; 542 543 rose = rose_sk(sk); 544 545 sock_init_data(NULL, sk); 546 547 skb_queue_head_init(&rose->ack_queue); 548 #ifdef M_BIT 549 skb_queue_head_init(&rose->frag_queue); 550 rose->fraglen = 0; 551 #endif 552 553 sk->sk_type = osk->sk_type; 554 sk->sk_socket = osk->sk_socket; 555 sk->sk_priority = osk->sk_priority; 556 sk->sk_protocol = osk->sk_protocol; 557 sk->sk_rcvbuf = osk->sk_rcvbuf; 558 sk->sk_sndbuf = osk->sk_sndbuf; 559 sk->sk_state = TCP_ESTABLISHED; 560 sk->sk_sleep = osk->sk_sleep; 561 sock_copy_flags(sk, osk); 562 563 init_timer(&rose->timer); 564 init_timer(&rose->idletimer); 565 566 orose = rose_sk(osk); 567 rose->t1 = orose->t1; 568 rose->t2 = orose->t2; 569 rose->t3 = orose->t3; 570 rose->hb = orose->hb; 571 rose->idle = orose->idle; 572 rose->defer = orose->defer; 573 rose->device = orose->device; 574 rose->qbitincl = orose->qbitincl; 575 576 return sk; 577 } 578 579 static int rose_release(struct socket *sock) 580 { 581 struct sock *sk = sock->sk; 582 struct rose_sock *rose; 583 584 if (sk == NULL) return 0; 585 586 rose = rose_sk(sk); 587 588 switch (rose->state) { 589 case ROSE_STATE_0: 590 rose_disconnect(sk, 0, -1, -1); 591 rose_destroy_socket(sk); 592 break; 593 594 case ROSE_STATE_2: 595 rose->neighbour->use--; 596 rose_disconnect(sk, 0, -1, -1); 597 rose_destroy_socket(sk); 598 break; 599 600 case ROSE_STATE_1: 601 case ROSE_STATE_3: 602 case ROSE_STATE_4: 603 case ROSE_STATE_5: 604 rose_clear_queues(sk); 605 rose_stop_idletimer(sk); 606 rose_write_internal(sk, ROSE_CLEAR_REQUEST); 607 rose_start_t3timer(sk); 608 rose->state = ROSE_STATE_2; 609 sk->sk_state = TCP_CLOSE; 610 sk->sk_shutdown |= SEND_SHUTDOWN; 611 sk->sk_state_change(sk); 612 sock_set_flag(sk, SOCK_DEAD); 613 sock_set_flag(sk, SOCK_DESTROY); 614 break; 615 616 default: 617 break; 618 } 619 620 sock->sk = NULL; 621 622 return 0; 623 } 624 625 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 626 { 627 struct sock *sk = sock->sk; 628 struct rose_sock *rose = rose_sk(sk); 629 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 630 struct net_device *dev; 631 ax25_address *source; 632 ax25_uid_assoc *user; 633 int n; 634 635 if (!sock_flag(sk, SOCK_ZAPPED)) 636 return -EINVAL; 637 638 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 639 return -EINVAL; 640 641 if (addr->srose_family != AF_ROSE) 642 return -EINVAL; 643 644 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 645 return -EINVAL; 646 647 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 648 return -EINVAL; 649 650 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 651 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); 652 return -EADDRNOTAVAIL; 653 } 654 655 source = &addr->srose_call; 656 657 user = ax25_findbyuid(current->euid); 658 if (user) { 659 rose->source_call = user->call; 660 ax25_uid_put(user); 661 } else { 662 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) 663 return -EACCES; 664 rose->source_call = *source; 665 } 666 667 rose->source_addr = addr->srose_addr; 668 rose->device = dev; 669 rose->source_ndigis = addr->srose_ndigis; 670 671 if (addr_len == sizeof(struct full_sockaddr_rose)) { 672 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 673 for (n = 0 ; n < addr->srose_ndigis ; n++) 674 rose->source_digis[n] = full_addr->srose_digis[n]; 675 } else { 676 if (rose->source_ndigis == 1) { 677 rose->source_digis[0] = addr->srose_digi; 678 } 679 } 680 681 rose_insert_socket(sk); 682 683 sock_reset_flag(sk, SOCK_ZAPPED); 684 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 685 return 0; 686 } 687 688 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) 689 { 690 struct sock *sk = sock->sk; 691 struct rose_sock *rose = rose_sk(sk); 692 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 693 unsigned char cause, diagnostic; 694 struct net_device *dev; 695 ax25_uid_assoc *user; 696 int n; 697 698 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 699 sock->state = SS_CONNECTED; 700 return 0; /* Connect completed during a ERESTARTSYS event */ 701 } 702 703 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 704 sock->state = SS_UNCONNECTED; 705 return -ECONNREFUSED; 706 } 707 708 if (sk->sk_state == TCP_ESTABLISHED) 709 return -EISCONN; /* No reconnect on a seqpacket socket */ 710 711 sk->sk_state = TCP_CLOSE; 712 sock->state = SS_UNCONNECTED; 713 714 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 715 return -EINVAL; 716 717 if (addr->srose_family != AF_ROSE) 718 return -EINVAL; 719 720 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 721 return -EINVAL; 722 723 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 724 return -EINVAL; 725 726 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 727 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) 728 return -EINVAL; 729 730 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 731 &diagnostic); 732 if (!rose->neighbour) 733 return -ENETUNREACH; 734 735 rose->lci = rose_new_lci(rose->neighbour); 736 if (!rose->lci) 737 return -ENETUNREACH; 738 739 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ 740 sock_reset_flag(sk, SOCK_ZAPPED); 741 742 if ((dev = rose_dev_first()) == NULL) 743 return -ENETUNREACH; 744 745 user = ax25_findbyuid(current->euid); 746 if (!user) 747 return -EINVAL; 748 749 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); 750 rose->source_call = user->call; 751 rose->device = dev; 752 ax25_uid_put(user); 753 754 rose_insert_socket(sk); /* Finish the bind */ 755 } 756 757 rose->dest_addr = addr->srose_addr; 758 rose->dest_call = addr->srose_call; 759 rose->rand = ((long)rose & 0xFFFF) + rose->lci; 760 rose->dest_ndigis = addr->srose_ndigis; 761 762 if (addr_len == sizeof(struct full_sockaddr_rose)) { 763 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 764 for (n = 0 ; n < addr->srose_ndigis ; n++) 765 rose->dest_digis[n] = full_addr->srose_digis[n]; 766 } else { 767 if (rose->dest_ndigis == 1) { 768 rose->dest_digis[0] = addr->srose_digi; 769 } 770 } 771 772 /* Move to connecting socket, start sending Connect Requests */ 773 sock->state = SS_CONNECTING; 774 sk->sk_state = TCP_SYN_SENT; 775 776 rose->state = ROSE_STATE_1; 777 778 rose->neighbour->use++; 779 780 rose_write_internal(sk, ROSE_CALL_REQUEST); 781 rose_start_heartbeat(sk); 782 rose_start_t1timer(sk); 783 784 /* Now the loop */ 785 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) 786 return -EINPROGRESS; 787 788 /* 789 * A Connect Ack with Choke or timeout or failed routing will go to 790 * closed. 791 */ 792 if (sk->sk_state == TCP_SYN_SENT) { 793 struct task_struct *tsk = current; 794 DECLARE_WAITQUEUE(wait, tsk); 795 796 add_wait_queue(sk->sk_sleep, &wait); 797 for (;;) { 798 set_current_state(TASK_INTERRUPTIBLE); 799 if (sk->sk_state != TCP_SYN_SENT) 800 break; 801 if (!signal_pending(tsk)) { 802 schedule(); 803 continue; 804 } 805 current->state = TASK_RUNNING; 806 remove_wait_queue(sk->sk_sleep, &wait); 807 return -ERESTARTSYS; 808 } 809 current->state = TASK_RUNNING; 810 remove_wait_queue(sk->sk_sleep, &wait); 811 } 812 813 if (sk->sk_state != TCP_ESTABLISHED) { 814 sock->state = SS_UNCONNECTED; 815 return sock_error(sk); /* Always set at this point */ 816 } 817 818 sock->state = SS_CONNECTED; 819 820 return 0; 821 } 822 823 static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 824 { 825 struct task_struct *tsk = current; 826 DECLARE_WAITQUEUE(wait, tsk); 827 struct sk_buff *skb; 828 struct sock *newsk; 829 struct sock *sk; 830 int err = 0; 831 832 if ((sk = sock->sk) == NULL) 833 return -EINVAL; 834 835 lock_sock(sk); 836 if (sk->sk_type != SOCK_SEQPACKET) { 837 err = -EOPNOTSUPP; 838 goto out; 839 } 840 841 if (sk->sk_state != TCP_LISTEN) { 842 err = -EINVAL; 843 goto out; 844 } 845 846 /* 847 * The write queue this time is holding sockets ready to use 848 * hooked into the SABM we saved 849 */ 850 add_wait_queue(sk->sk_sleep, &wait); 851 for (;;) { 852 skb = skb_dequeue(&sk->sk_receive_queue); 853 if (skb) 854 break; 855 856 current->state = TASK_INTERRUPTIBLE; 857 release_sock(sk); 858 if (flags & O_NONBLOCK) { 859 current->state = TASK_RUNNING; 860 remove_wait_queue(sk->sk_sleep, &wait); 861 return -EWOULDBLOCK; 862 } 863 if (!signal_pending(tsk)) { 864 schedule(); 865 lock_sock(sk); 866 continue; 867 } 868 return -ERESTARTSYS; 869 } 870 current->state = TASK_RUNNING; 871 remove_wait_queue(sk->sk_sleep, &wait); 872 873 newsk = skb->sk; 874 newsk->sk_socket = newsock; 875 newsk->sk_sleep = &newsock->wait; 876 877 /* Now attach up the new socket */ 878 skb->sk = NULL; 879 kfree_skb(skb); 880 sk->sk_ack_backlog--; 881 newsock->sk = newsk; 882 883 out: 884 release_sock(sk); 885 886 return err; 887 } 888 889 static int rose_getname(struct socket *sock, struct sockaddr *uaddr, 890 int *uaddr_len, int peer) 891 { 892 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; 893 struct sock *sk = sock->sk; 894 struct rose_sock *rose = rose_sk(sk); 895 int n; 896 897 if (peer != 0) { 898 if (sk->sk_state != TCP_ESTABLISHED) 899 return -ENOTCONN; 900 srose->srose_family = AF_ROSE; 901 srose->srose_addr = rose->dest_addr; 902 srose->srose_call = rose->dest_call; 903 srose->srose_ndigis = rose->dest_ndigis; 904 for (n = 0; n < rose->dest_ndigis; n++) 905 srose->srose_digis[n] = rose->dest_digis[n]; 906 } else { 907 srose->srose_family = AF_ROSE; 908 srose->srose_addr = rose->source_addr; 909 srose->srose_call = rose->source_call; 910 srose->srose_ndigis = rose->source_ndigis; 911 for (n = 0; n < rose->source_ndigis; n++) 912 srose->srose_digis[n] = rose->source_digis[n]; 913 } 914 915 *uaddr_len = sizeof(struct full_sockaddr_rose); 916 return 0; 917 } 918 919 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) 920 { 921 struct sock *sk; 922 struct sock *make; 923 struct rose_sock *make_rose; 924 struct rose_facilities_struct facilities; 925 int n, len; 926 927 skb->sk = NULL; /* Initially we don't know who it's for */ 928 929 /* 930 * skb->data points to the rose frame start 931 */ 932 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 933 934 len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2; 935 len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2; 936 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { 937 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); 938 return 0; 939 } 940 941 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); 942 943 /* 944 * We can't accept the Call Request. 945 */ 946 if (sk == NULL || sk_acceptq_is_full(sk) || 947 (make = rose_make_new(sk)) == NULL) { 948 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); 949 return 0; 950 } 951 952 skb->sk = make; 953 make->sk_state = TCP_ESTABLISHED; 954 make_rose = rose_sk(make); 955 956 make_rose->lci = lci; 957 make_rose->dest_addr = facilities.dest_addr; 958 make_rose->dest_call = facilities.dest_call; 959 make_rose->dest_ndigis = facilities.dest_ndigis; 960 for (n = 0 ; n < facilities.dest_ndigis ; n++) 961 make_rose->dest_digis[n] = facilities.dest_digis[n]; 962 make_rose->source_addr = facilities.source_addr; 963 make_rose->source_call = facilities.source_call; 964 make_rose->source_ndigis = facilities.source_ndigis; 965 for (n = 0 ; n < facilities.source_ndigis ; n++) 966 make_rose->source_digis[n]= facilities.source_digis[n]; 967 make_rose->neighbour = neigh; 968 make_rose->device = dev; 969 make_rose->facilities = facilities; 970 971 make_rose->neighbour->use++; 972 973 if (rose_sk(sk)->defer) { 974 make_rose->state = ROSE_STATE_5; 975 } else { 976 rose_write_internal(make, ROSE_CALL_ACCEPTED); 977 make_rose->state = ROSE_STATE_3; 978 rose_start_idletimer(make); 979 } 980 981 make_rose->condition = 0x00; 982 make_rose->vs = 0; 983 make_rose->va = 0; 984 make_rose->vr = 0; 985 make_rose->vl = 0; 986 sk->sk_ack_backlog++; 987 988 rose_insert_socket(make); 989 990 skb_queue_head(&sk->sk_receive_queue, skb); 991 992 rose_start_heartbeat(make); 993 994 if (!sock_flag(sk, SOCK_DEAD)) 995 sk->sk_data_ready(sk, skb->len); 996 997 return 1; 998 } 999 1000 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, 1001 struct msghdr *msg, size_t len) 1002 { 1003 struct sock *sk = sock->sk; 1004 struct rose_sock *rose = rose_sk(sk); 1005 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; 1006 int err; 1007 struct full_sockaddr_rose srose; 1008 struct sk_buff *skb; 1009 unsigned char *asmptr; 1010 int n, size, qbit = 0; 1011 1012 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1013 return -EINVAL; 1014 1015 if (sock_flag(sk, SOCK_ZAPPED)) 1016 return -EADDRNOTAVAIL; 1017 1018 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1019 send_sig(SIGPIPE, current, 0); 1020 return -EPIPE; 1021 } 1022 1023 if (rose->neighbour == NULL || rose->device == NULL) 1024 return -ENETUNREACH; 1025 1026 if (usrose != NULL) { 1027 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) 1028 return -EINVAL; 1029 memset(&srose, 0, sizeof(struct full_sockaddr_rose)); 1030 memcpy(&srose, usrose, msg->msg_namelen); 1031 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || 1032 ax25cmp(&rose->dest_call, &srose.srose_call) != 0) 1033 return -EISCONN; 1034 if (srose.srose_ndigis != rose->dest_ndigis) 1035 return -EISCONN; 1036 if (srose.srose_ndigis == rose->dest_ndigis) { 1037 for (n = 0 ; n < srose.srose_ndigis ; n++) 1038 if (ax25cmp(&rose->dest_digis[n], 1039 &srose.srose_digis[n])) 1040 return -EISCONN; 1041 } 1042 if (srose.srose_family != AF_ROSE) 1043 return -EINVAL; 1044 } else { 1045 if (sk->sk_state != TCP_ESTABLISHED) 1046 return -ENOTCONN; 1047 1048 srose.srose_family = AF_ROSE; 1049 srose.srose_addr = rose->dest_addr; 1050 srose.srose_call = rose->dest_call; 1051 srose.srose_ndigis = rose->dest_ndigis; 1052 for (n = 0 ; n < rose->dest_ndigis ; n++) 1053 srose.srose_digis[n] = rose->dest_digis[n]; 1054 } 1055 1056 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); 1057 1058 /* Build a packet */ 1059 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); 1060 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 1061 1062 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1063 return err; 1064 1065 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); 1066 1067 /* 1068 * Put the data on the end 1069 */ 1070 SOCK_DEBUG(sk, "ROSE: Appending user data\n"); 1071 1072 asmptr = skb->h.raw = skb_put(skb, len); 1073 1074 err = memcpy_fromiovec(asmptr, msg->msg_iov, len); 1075 if (err) { 1076 kfree_skb(skb); 1077 return err; 1078 } 1079 1080 /* 1081 * If the Q BIT Include socket option is in force, the first 1082 * byte of the user data is the logical value of the Q Bit. 1083 */ 1084 if (rose->qbitincl) { 1085 qbit = skb->data[0]; 1086 skb_pull(skb, 1); 1087 } 1088 1089 /* 1090 * Push down the ROSE header 1091 */ 1092 asmptr = skb_push(skb, ROSE_MIN_LEN); 1093 1094 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); 1095 1096 /* Build a ROSE Network header */ 1097 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1098 asmptr[1] = (rose->lci >> 0) & 0xFF; 1099 asmptr[2] = ROSE_DATA; 1100 1101 if (qbit) 1102 asmptr[0] |= ROSE_Q_BIT; 1103 1104 SOCK_DEBUG(sk, "ROSE: Built header.\n"); 1105 1106 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); 1107 1108 if (sk->sk_state != TCP_ESTABLISHED) { 1109 kfree_skb(skb); 1110 return -ENOTCONN; 1111 } 1112 1113 #ifdef M_BIT 1114 #define ROSE_PACLEN (256-ROSE_MIN_LEN) 1115 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { 1116 unsigned char header[ROSE_MIN_LEN]; 1117 struct sk_buff *skbn; 1118 int frontlen; 1119 int lg; 1120 1121 /* Save a copy of the Header */ 1122 memcpy(header, skb->data, ROSE_MIN_LEN); 1123 skb_pull(skb, ROSE_MIN_LEN); 1124 1125 frontlen = skb_headroom(skb); 1126 1127 while (skb->len > 0) { 1128 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { 1129 kfree_skb(skb); 1130 return err; 1131 } 1132 1133 skbn->sk = sk; 1134 skbn->free = 1; 1135 skbn->arp = 1; 1136 1137 skb_reserve(skbn, frontlen); 1138 1139 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; 1140 1141 /* Copy the user data */ 1142 memcpy(skb_put(skbn, lg), skb->data, lg); 1143 skb_pull(skb, lg); 1144 1145 /* Duplicate the Header */ 1146 skb_push(skbn, ROSE_MIN_LEN); 1147 memcpy(skbn->data, header, ROSE_MIN_LEN); 1148 1149 if (skb->len > 0) 1150 skbn->data[2] |= M_BIT; 1151 1152 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ 1153 } 1154 1155 skb->free = 1; 1156 kfree_skb(skb); 1157 } else { 1158 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ 1159 } 1160 #else 1161 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ 1162 #endif 1163 1164 rose_kick(sk); 1165 1166 return len; 1167 } 1168 1169 1170 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, 1171 struct msghdr *msg, size_t size, int flags) 1172 { 1173 struct sock *sk = sock->sk; 1174 struct rose_sock *rose = rose_sk(sk); 1175 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; 1176 size_t copied; 1177 unsigned char *asmptr; 1178 struct sk_buff *skb; 1179 int n, er, qbit; 1180 1181 /* 1182 * This works for seqpacket too. The receiver has ordered the queue for 1183 * us! We do one quick check first though 1184 */ 1185 if (sk->sk_state != TCP_ESTABLISHED) 1186 return -ENOTCONN; 1187 1188 /* Now we can treat all alike */ 1189 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) 1190 return er; 1191 1192 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; 1193 1194 skb_pull(skb, ROSE_MIN_LEN); 1195 1196 if (rose->qbitincl) { 1197 asmptr = skb_push(skb, 1); 1198 *asmptr = qbit; 1199 } 1200 1201 skb->h.raw = skb->data; 1202 copied = skb->len; 1203 1204 if (copied > size) { 1205 copied = size; 1206 msg->msg_flags |= MSG_TRUNC; 1207 } 1208 1209 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1210 1211 if (srose != NULL) { 1212 srose->srose_family = AF_ROSE; 1213 srose->srose_addr = rose->dest_addr; 1214 srose->srose_call = rose->dest_call; 1215 srose->srose_ndigis = rose->dest_ndigis; 1216 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1217 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1218 for (n = 0 ; n < rose->dest_ndigis ; n++) 1219 full_srose->srose_digis[n] = rose->dest_digis[n]; 1220 msg->msg_namelen = sizeof(struct full_sockaddr_rose); 1221 } else { 1222 if (rose->dest_ndigis >= 1) { 1223 srose->srose_ndigis = 1; 1224 srose->srose_digi = rose->dest_digis[0]; 1225 } 1226 msg->msg_namelen = sizeof(struct sockaddr_rose); 1227 } 1228 } 1229 1230 skb_free_datagram(sk, skb); 1231 1232 return copied; 1233 } 1234 1235 1236 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1237 { 1238 struct sock *sk = sock->sk; 1239 struct rose_sock *rose = rose_sk(sk); 1240 void __user *argp = (void __user *)arg; 1241 1242 switch (cmd) { 1243 case TIOCOUTQ: { 1244 long amount; 1245 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1246 if (amount < 0) 1247 amount = 0; 1248 return put_user(amount, (unsigned int __user *) argp); 1249 } 1250 1251 case TIOCINQ: { 1252 struct sk_buff *skb; 1253 long amount = 0L; 1254 /* These two are safe on a single CPU system as only user tasks fiddle here */ 1255 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1256 amount = skb->len; 1257 return put_user(amount, (unsigned int __user *) argp); 1258 } 1259 1260 case SIOCGSTAMP: 1261 return sock_get_timestamp(sk, (struct timeval __user *) argp); 1262 1263 case SIOCGIFADDR: 1264 case SIOCSIFADDR: 1265 case SIOCGIFDSTADDR: 1266 case SIOCSIFDSTADDR: 1267 case SIOCGIFBRDADDR: 1268 case SIOCSIFBRDADDR: 1269 case SIOCGIFNETMASK: 1270 case SIOCSIFNETMASK: 1271 case SIOCGIFMETRIC: 1272 case SIOCSIFMETRIC: 1273 return -EINVAL; 1274 1275 case SIOCADDRT: 1276 case SIOCDELRT: 1277 case SIOCRSCLRRT: 1278 if (!capable(CAP_NET_ADMIN)) 1279 return -EPERM; 1280 return rose_rt_ioctl(cmd, argp); 1281 1282 case SIOCRSGCAUSE: { 1283 struct rose_cause_struct rose_cause; 1284 rose_cause.cause = rose->cause; 1285 rose_cause.diagnostic = rose->diagnostic; 1286 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; 1287 } 1288 1289 case SIOCRSSCAUSE: { 1290 struct rose_cause_struct rose_cause; 1291 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) 1292 return -EFAULT; 1293 rose->cause = rose_cause.cause; 1294 rose->diagnostic = rose_cause.diagnostic; 1295 return 0; 1296 } 1297 1298 case SIOCRSSL2CALL: 1299 if (!capable(CAP_NET_ADMIN)) return -EPERM; 1300 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1301 ax25_listen_release(&rose_callsign, NULL); 1302 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) 1303 return -EFAULT; 1304 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1305 ax25_listen_register(&rose_callsign, NULL); 1306 return 0; 1307 1308 case SIOCRSGL2CALL: 1309 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; 1310 1311 case SIOCRSACCEPT: 1312 if (rose->state == ROSE_STATE_5) { 1313 rose_write_internal(sk, ROSE_CALL_ACCEPTED); 1314 rose_start_idletimer(sk); 1315 rose->condition = 0x00; 1316 rose->vs = 0; 1317 rose->va = 0; 1318 rose->vr = 0; 1319 rose->vl = 0; 1320 rose->state = ROSE_STATE_3; 1321 } 1322 return 0; 1323 1324 default: 1325 return -ENOIOCTLCMD; 1326 } 1327 1328 return 0; 1329 } 1330 1331 #ifdef CONFIG_PROC_FS 1332 static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1333 { 1334 int i; 1335 struct sock *s; 1336 struct hlist_node *node; 1337 1338 spin_lock_bh(&rose_list_lock); 1339 if (*pos == 0) 1340 return SEQ_START_TOKEN; 1341 1342 i = 1; 1343 sk_for_each(s, node, &rose_list) { 1344 if (i == *pos) 1345 return s; 1346 ++i; 1347 } 1348 return NULL; 1349 } 1350 1351 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1352 { 1353 ++*pos; 1354 1355 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list) 1356 : sk_next((struct sock *)v); 1357 } 1358 1359 static void rose_info_stop(struct seq_file *seq, void *v) 1360 { 1361 spin_unlock_bh(&rose_list_lock); 1362 } 1363 1364 static int rose_info_show(struct seq_file *seq, void *v) 1365 { 1366 char buf[11]; 1367 1368 if (v == SEQ_START_TOKEN) 1369 seq_puts(seq, 1370 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1371 1372 else { 1373 struct sock *s = v; 1374 struct rose_sock *rose = rose_sk(s); 1375 const char *devname, *callsign; 1376 const struct net_device *dev = rose->device; 1377 1378 if (!dev) 1379 devname = "???"; 1380 else 1381 devname = dev->name; 1382 1383 seq_printf(seq, "%-10s %-9s ", 1384 rose2asc(&rose->dest_addr), 1385 ax2asc(buf, &rose->dest_call)); 1386 1387 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) 1388 callsign = "??????-?"; 1389 else 1390 callsign = ax2asc(buf, &rose->source_call); 1391 1392 seq_printf(seq, 1393 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", 1394 rose2asc(&rose->source_addr), 1395 callsign, 1396 devname, 1397 rose->lci & 0x0FFF, 1398 (rose->neighbour) ? rose->neighbour->number : 0, 1399 rose->state, 1400 rose->vs, 1401 rose->vr, 1402 rose->va, 1403 ax25_display_timer(&rose->timer) / HZ, 1404 rose->t1 / HZ, 1405 rose->t2 / HZ, 1406 rose->t3 / HZ, 1407 rose->hb / HZ, 1408 ax25_display_timer(&rose->idletimer) / (60 * HZ), 1409 rose->idle / (60 * HZ), 1410 atomic_read(&s->sk_wmem_alloc), 1411 atomic_read(&s->sk_rmem_alloc), 1412 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1413 } 1414 1415 return 0; 1416 } 1417 1418 static struct seq_operations rose_info_seqops = { 1419 .start = rose_info_start, 1420 .next = rose_info_next, 1421 .stop = rose_info_stop, 1422 .show = rose_info_show, 1423 }; 1424 1425 static int rose_info_open(struct inode *inode, struct file *file) 1426 { 1427 return seq_open(file, &rose_info_seqops); 1428 } 1429 1430 static struct file_operations rose_info_fops = { 1431 .owner = THIS_MODULE, 1432 .open = rose_info_open, 1433 .read = seq_read, 1434 .llseek = seq_lseek, 1435 .release = seq_release, 1436 }; 1437 #endif /* CONFIG_PROC_FS */ 1438 1439 static struct net_proto_family rose_family_ops = { 1440 .family = PF_ROSE, 1441 .create = rose_create, 1442 .owner = THIS_MODULE, 1443 }; 1444 1445 static struct proto_ops rose_proto_ops = { 1446 .family = PF_ROSE, 1447 .owner = THIS_MODULE, 1448 .release = rose_release, 1449 .bind = rose_bind, 1450 .connect = rose_connect, 1451 .socketpair = sock_no_socketpair, 1452 .accept = rose_accept, 1453 .getname = rose_getname, 1454 .poll = datagram_poll, 1455 .ioctl = rose_ioctl, 1456 .listen = rose_listen, 1457 .shutdown = sock_no_shutdown, 1458 .setsockopt = rose_setsockopt, 1459 .getsockopt = rose_getsockopt, 1460 .sendmsg = rose_sendmsg, 1461 .recvmsg = rose_recvmsg, 1462 .mmap = sock_no_mmap, 1463 .sendpage = sock_no_sendpage, 1464 }; 1465 1466 static struct notifier_block rose_dev_notifier = { 1467 .notifier_call = rose_device_event, 1468 }; 1469 1470 static struct net_device **dev_rose; 1471 1472 static int __init rose_proto_init(void) 1473 { 1474 int i; 1475 int rc; 1476 1477 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { 1478 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); 1479 rc = -EINVAL; 1480 goto out; 1481 } 1482 1483 rc = proto_register(&rose_proto, 0); 1484 if (rc != 0) 1485 goto out; 1486 1487 rose_callsign = null_ax25_address; 1488 1489 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1490 if (dev_rose == NULL) { 1491 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1492 rc = -ENOMEM; 1493 goto out_proto_unregister; 1494 } 1495 1496 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); 1497 for (i = 0; i < rose_ndevs; i++) { 1498 struct net_device *dev; 1499 char name[IFNAMSIZ]; 1500 1501 sprintf(name, "rose%d", i); 1502 dev = alloc_netdev(sizeof(struct net_device_stats), 1503 name, rose_setup); 1504 if (!dev) { 1505 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1506 rc = -ENOMEM; 1507 goto fail; 1508 } 1509 rc = register_netdev(dev); 1510 if (rc) { 1511 printk(KERN_ERR "ROSE: netdevice registration failed\n"); 1512 free_netdev(dev); 1513 goto fail; 1514 } 1515 dev_rose[i] = dev; 1516 } 1517 1518 sock_register(&rose_family_ops); 1519 register_netdevice_notifier(&rose_dev_notifier); 1520 1521 ax25_protocol_register(AX25_P_ROSE, rose_route_frame); 1522 ax25_linkfail_register(rose_link_failed); 1523 1524 #ifdef CONFIG_SYSCTL 1525 rose_register_sysctl(); 1526 #endif 1527 rose_loopback_init(); 1528 1529 rose_add_loopback_neigh(); 1530 1531 proc_net_fops_create("rose", S_IRUGO, &rose_info_fops); 1532 proc_net_fops_create("rose_neigh", S_IRUGO, &rose_neigh_fops); 1533 proc_net_fops_create("rose_nodes", S_IRUGO, &rose_nodes_fops); 1534 proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops); 1535 out: 1536 return rc; 1537 fail: 1538 while (--i >= 0) { 1539 unregister_netdev(dev_rose[i]); 1540 free_netdev(dev_rose[i]); 1541 } 1542 kfree(dev_rose); 1543 out_proto_unregister: 1544 proto_unregister(&rose_proto); 1545 goto out; 1546 } 1547 module_init(rose_proto_init); 1548 1549 module_param(rose_ndevs, int, 0); 1550 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); 1551 1552 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); 1553 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); 1554 MODULE_LICENSE("GPL"); 1555 MODULE_ALIAS_NETPROTO(PF_ROSE); 1556 1557 static void __exit rose_exit(void) 1558 { 1559 int i; 1560 1561 proc_net_remove("rose"); 1562 proc_net_remove("rose_neigh"); 1563 proc_net_remove("rose_nodes"); 1564 proc_net_remove("rose_routes"); 1565 rose_loopback_clear(); 1566 1567 rose_rt_free(); 1568 1569 ax25_protocol_release(AX25_P_ROSE); 1570 ax25_linkfail_release(rose_link_failed); 1571 1572 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1573 ax25_listen_release(&rose_callsign, NULL); 1574 1575 #ifdef CONFIG_SYSCTL 1576 rose_unregister_sysctl(); 1577 #endif 1578 unregister_netdevice_notifier(&rose_dev_notifier); 1579 1580 sock_unregister(PF_ROSE); 1581 1582 for (i = 0; i < rose_ndevs; i++) { 1583 struct net_device *dev = dev_rose[i]; 1584 1585 if (dev) { 1586 unregister_netdev(dev); 1587 free_netdev(dev); 1588 } 1589 } 1590 1591 kfree(dev_rose); 1592 proto_unregister(&rose_proto); 1593 } 1594 1595 module_exit(rose_exit); 1596