1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) 10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) 11 */ 12 13 #include <linux/capability.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/errno.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/in.h> 21 #include <linux/slab.h> 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/spinlock.h> 25 #include <linux/timer.h> 26 #include <linux/string.h> 27 #include <linux/sockios.h> 28 #include <linux/net.h> 29 #include <linux/stat.h> 30 #include <net/net_namespace.h> 31 #include <net/ax25.h> 32 #include <linux/inet.h> 33 #include <linux/netdevice.h> 34 #include <linux/if_arp.h> 35 #include <linux/skbuff.h> 36 #include <net/sock.h> 37 #include <asm/system.h> 38 #include <asm/uaccess.h> 39 #include <linux/fcntl.h> 40 #include <linux/termios.h> 41 #include <linux/mm.h> 42 #include <linux/interrupt.h> 43 #include <linux/notifier.h> 44 #include <net/rose.h> 45 #include <linux/proc_fs.h> 46 #include <linux/seq_file.h> 47 #include <net/tcp_states.h> 48 #include <net/ip.h> 49 #include <net/arp.h> 50 51 static int rose_ndevs = 10; 52 53 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; 54 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; 55 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; 56 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; 57 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; 58 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; 59 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; 60 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; 61 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; 62 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; 63 64 static HLIST_HEAD(rose_list); 65 static DEFINE_SPINLOCK(rose_list_lock); 66 67 static const struct proto_ops rose_proto_ops; 68 69 ax25_address rose_callsign; 70 71 /* 72 * ROSE network devices are virtual network devices encapsulating ROSE 73 * frames into AX.25 which will be sent through an AX.25 device, so form a 74 * special "super class" of normal net devices; split their locks off into a 75 * separate class since they always nest. 76 */ 77 static struct lock_class_key rose_netdev_xmit_lock_key; 78 static struct lock_class_key rose_netdev_addr_lock_key; 79 80 static void rose_set_lockdep_one(struct net_device *dev, 81 struct netdev_queue *txq, 82 void *_unused) 83 { 84 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); 85 } 86 87 static void rose_set_lockdep_key(struct net_device *dev) 88 { 89 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); 90 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 91 } 92 93 /* 94 * Convert a ROSE address into text. 95 */ 96 char *rose2asc(char *buf, const rose_address *addr) 97 { 98 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && 99 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && 100 addr->rose_addr[4] == 0x00) { 101 strcpy(buf, "*"); 102 } else { 103 sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, 104 addr->rose_addr[1] & 0xFF, 105 addr->rose_addr[2] & 0xFF, 106 addr->rose_addr[3] & 0xFF, 107 addr->rose_addr[4] & 0xFF); 108 } 109 110 return buf; 111 } 112 113 /* 114 * Compare two ROSE addresses, 0 == equal. 115 */ 116 int rosecmp(rose_address *addr1, rose_address *addr2) 117 { 118 int i; 119 120 for (i = 0; i < 5; i++) 121 if (addr1->rose_addr[i] != addr2->rose_addr[i]) 122 return 1; 123 124 return 0; 125 } 126 127 /* 128 * Compare two ROSE addresses for only mask digits, 0 == equal. 129 */ 130 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) 131 { 132 unsigned int i, j; 133 134 if (mask > 10) 135 return 1; 136 137 for (i = 0; i < mask; i++) { 138 j = i / 2; 139 140 if ((i % 2) != 0) { 141 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) 142 return 1; 143 } else { 144 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) 145 return 1; 146 } 147 } 148 149 return 0; 150 } 151 152 /* 153 * Socket removal during an interrupt is now safe. 154 */ 155 static void rose_remove_socket(struct sock *sk) 156 { 157 spin_lock_bh(&rose_list_lock); 158 sk_del_node_init(sk); 159 spin_unlock_bh(&rose_list_lock); 160 } 161 162 /* 163 * Kill all bound sockets on a broken link layer connection to a 164 * particular neighbour. 165 */ 166 void rose_kill_by_neigh(struct rose_neigh *neigh) 167 { 168 struct sock *s; 169 struct hlist_node *node; 170 171 spin_lock_bh(&rose_list_lock); 172 sk_for_each(s, node, &rose_list) { 173 struct rose_sock *rose = rose_sk(s); 174 175 if (rose->neighbour == neigh) { 176 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 177 rose->neighbour->use--; 178 rose->neighbour = NULL; 179 } 180 } 181 spin_unlock_bh(&rose_list_lock); 182 } 183 184 /* 185 * Kill all bound sockets on a dropped device. 186 */ 187 static void rose_kill_by_device(struct net_device *dev) 188 { 189 struct sock *s; 190 struct hlist_node *node; 191 192 spin_lock_bh(&rose_list_lock); 193 sk_for_each(s, node, &rose_list) { 194 struct rose_sock *rose = rose_sk(s); 195 196 if (rose->device == dev) { 197 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 198 rose->neighbour->use--; 199 rose->device = NULL; 200 } 201 } 202 spin_unlock_bh(&rose_list_lock); 203 } 204 205 /* 206 * Handle device status changes. 207 */ 208 static int rose_device_event(struct notifier_block *this, unsigned long event, 209 void *ptr) 210 { 211 struct net_device *dev = (struct net_device *)ptr; 212 213 if (!net_eq(dev_net(dev), &init_net)) 214 return NOTIFY_DONE; 215 216 if (event != NETDEV_DOWN) 217 return NOTIFY_DONE; 218 219 switch (dev->type) { 220 case ARPHRD_ROSE: 221 rose_kill_by_device(dev); 222 break; 223 case ARPHRD_AX25: 224 rose_link_device_down(dev); 225 rose_rt_device_down(dev); 226 break; 227 } 228 229 return NOTIFY_DONE; 230 } 231 232 /* 233 * Add a socket to the bound sockets list. 234 */ 235 static void rose_insert_socket(struct sock *sk) 236 { 237 238 spin_lock_bh(&rose_list_lock); 239 sk_add_node(sk, &rose_list); 240 spin_unlock_bh(&rose_list_lock); 241 } 242 243 /* 244 * Find a socket that wants to accept the Call Request we just 245 * received. 246 */ 247 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 248 { 249 struct sock *s; 250 struct hlist_node *node; 251 252 spin_lock_bh(&rose_list_lock); 253 sk_for_each(s, node, &rose_list) { 254 struct rose_sock *rose = rose_sk(s); 255 256 if (!rosecmp(&rose->source_addr, addr) && 257 !ax25cmp(&rose->source_call, call) && 258 !rose->source_ndigis && s->sk_state == TCP_LISTEN) 259 goto found; 260 } 261 262 sk_for_each(s, node, &rose_list) { 263 struct rose_sock *rose = rose_sk(s); 264 265 if (!rosecmp(&rose->source_addr, addr) && 266 !ax25cmp(&rose->source_call, &null_ax25_address) && 267 s->sk_state == TCP_LISTEN) 268 goto found; 269 } 270 s = NULL; 271 found: 272 spin_unlock_bh(&rose_list_lock); 273 return s; 274 } 275 276 /* 277 * Find a connected ROSE socket given my LCI and device. 278 */ 279 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 280 { 281 struct sock *s; 282 struct hlist_node *node; 283 284 spin_lock_bh(&rose_list_lock); 285 sk_for_each(s, node, &rose_list) { 286 struct rose_sock *rose = rose_sk(s); 287 288 if (rose->lci == lci && rose->neighbour == neigh) 289 goto found; 290 } 291 s = NULL; 292 found: 293 spin_unlock_bh(&rose_list_lock); 294 return s; 295 } 296 297 /* 298 * Find a unique LCI for a given device. 299 */ 300 unsigned int rose_new_lci(struct rose_neigh *neigh) 301 { 302 int lci; 303 304 if (neigh->dce_mode) { 305 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) 306 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 307 return lci; 308 } else { 309 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) 310 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 311 return lci; 312 } 313 314 return 0; 315 } 316 317 /* 318 * Deferred destroy. 319 */ 320 void rose_destroy_socket(struct sock *); 321 322 /* 323 * Handler for deferred kills. 324 */ 325 static void rose_destroy_timer(unsigned long data) 326 { 327 rose_destroy_socket((struct sock *)data); 328 } 329 330 /* 331 * This is called from user mode and the timers. Thus it protects itself 332 * against interrupt users but doesn't worry about being called during 333 * work. Once it is removed from the queue no interrupt or bottom half 334 * will touch it and we are (fairly 8-) ) safe. 335 */ 336 void rose_destroy_socket(struct sock *sk) 337 { 338 struct sk_buff *skb; 339 340 rose_remove_socket(sk); 341 rose_stop_heartbeat(sk); 342 rose_stop_idletimer(sk); 343 rose_stop_timer(sk); 344 345 rose_clear_queues(sk); /* Flush the queues */ 346 347 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 348 if (skb->sk != sk) { /* A pending connection */ 349 /* Queue the unaccepted socket for death */ 350 sock_set_flag(skb->sk, SOCK_DEAD); 351 rose_start_heartbeat(skb->sk); 352 rose_sk(skb->sk)->state = ROSE_STATE_0; 353 } 354 355 kfree_skb(skb); 356 } 357 358 if (sk_has_allocations(sk)) { 359 /* Defer: outstanding buffers */ 360 setup_timer(&sk->sk_timer, rose_destroy_timer, 361 (unsigned long)sk); 362 sk->sk_timer.expires = jiffies + 10 * HZ; 363 add_timer(&sk->sk_timer); 364 } else 365 sock_put(sk); 366 } 367 368 /* 369 * Handling for system calls applied via the various interfaces to a 370 * ROSE socket object. 371 */ 372 373 static int rose_setsockopt(struct socket *sock, int level, int optname, 374 char __user *optval, unsigned int optlen) 375 { 376 struct sock *sk = sock->sk; 377 struct rose_sock *rose = rose_sk(sk); 378 int opt; 379 380 if (level != SOL_ROSE) 381 return -ENOPROTOOPT; 382 383 if (optlen < sizeof(int)) 384 return -EINVAL; 385 386 if (get_user(opt, (int __user *)optval)) 387 return -EFAULT; 388 389 switch (optname) { 390 case ROSE_DEFER: 391 rose->defer = opt ? 1 : 0; 392 return 0; 393 394 case ROSE_T1: 395 if (opt < 1) 396 return -EINVAL; 397 rose->t1 = opt * HZ; 398 return 0; 399 400 case ROSE_T2: 401 if (opt < 1) 402 return -EINVAL; 403 rose->t2 = opt * HZ; 404 return 0; 405 406 case ROSE_T3: 407 if (opt < 1) 408 return -EINVAL; 409 rose->t3 = opt * HZ; 410 return 0; 411 412 case ROSE_HOLDBACK: 413 if (opt < 1) 414 return -EINVAL; 415 rose->hb = opt * HZ; 416 return 0; 417 418 case ROSE_IDLE: 419 if (opt < 0) 420 return -EINVAL; 421 rose->idle = opt * 60 * HZ; 422 return 0; 423 424 case ROSE_QBITINCL: 425 rose->qbitincl = opt ? 1 : 0; 426 return 0; 427 428 default: 429 return -ENOPROTOOPT; 430 } 431 } 432 433 static int rose_getsockopt(struct socket *sock, int level, int optname, 434 char __user *optval, int __user *optlen) 435 { 436 struct sock *sk = sock->sk; 437 struct rose_sock *rose = rose_sk(sk); 438 int val = 0; 439 int len; 440 441 if (level != SOL_ROSE) 442 return -ENOPROTOOPT; 443 444 if (get_user(len, optlen)) 445 return -EFAULT; 446 447 if (len < 0) 448 return -EINVAL; 449 450 switch (optname) { 451 case ROSE_DEFER: 452 val = rose->defer; 453 break; 454 455 case ROSE_T1: 456 val = rose->t1 / HZ; 457 break; 458 459 case ROSE_T2: 460 val = rose->t2 / HZ; 461 break; 462 463 case ROSE_T3: 464 val = rose->t3 / HZ; 465 break; 466 467 case ROSE_HOLDBACK: 468 val = rose->hb / HZ; 469 break; 470 471 case ROSE_IDLE: 472 val = rose->idle / (60 * HZ); 473 break; 474 475 case ROSE_QBITINCL: 476 val = rose->qbitincl; 477 break; 478 479 default: 480 return -ENOPROTOOPT; 481 } 482 483 len = min_t(unsigned int, len, sizeof(int)); 484 485 if (put_user(len, optlen)) 486 return -EFAULT; 487 488 return copy_to_user(optval, &val, len) ? -EFAULT : 0; 489 } 490 491 static int rose_listen(struct socket *sock, int backlog) 492 { 493 struct sock *sk = sock->sk; 494 495 if (sk->sk_state != TCP_LISTEN) { 496 struct rose_sock *rose = rose_sk(sk); 497 498 rose->dest_ndigis = 0; 499 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); 500 memset(&rose->dest_call, 0, AX25_ADDR_LEN); 501 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); 502 sk->sk_max_ack_backlog = backlog; 503 sk->sk_state = TCP_LISTEN; 504 return 0; 505 } 506 507 return -EOPNOTSUPP; 508 } 509 510 static struct proto rose_proto = { 511 .name = "ROSE", 512 .owner = THIS_MODULE, 513 .obj_size = sizeof(struct rose_sock), 514 }; 515 516 static int rose_create(struct net *net, struct socket *sock, int protocol, 517 int kern) 518 { 519 struct sock *sk; 520 struct rose_sock *rose; 521 522 if (!net_eq(net, &init_net)) 523 return -EAFNOSUPPORT; 524 525 if (sock->type != SOCK_SEQPACKET || protocol != 0) 526 return -ESOCKTNOSUPPORT; 527 528 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); 529 if (sk == NULL) 530 return -ENOMEM; 531 532 rose = rose_sk(sk); 533 534 sock_init_data(sock, sk); 535 536 skb_queue_head_init(&rose->ack_queue); 537 #ifdef M_BIT 538 skb_queue_head_init(&rose->frag_queue); 539 rose->fraglen = 0; 540 #endif 541 542 sock->ops = &rose_proto_ops; 543 sk->sk_protocol = protocol; 544 545 init_timer(&rose->timer); 546 init_timer(&rose->idletimer); 547 548 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); 549 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); 550 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); 551 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); 552 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); 553 554 rose->state = ROSE_STATE_0; 555 556 return 0; 557 } 558 559 static struct sock *rose_make_new(struct sock *osk) 560 { 561 struct sock *sk; 562 struct rose_sock *rose, *orose; 563 564 if (osk->sk_type != SOCK_SEQPACKET) 565 return NULL; 566 567 sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); 568 if (sk == NULL) 569 return NULL; 570 571 rose = rose_sk(sk); 572 573 sock_init_data(NULL, sk); 574 575 skb_queue_head_init(&rose->ack_queue); 576 #ifdef M_BIT 577 skb_queue_head_init(&rose->frag_queue); 578 rose->fraglen = 0; 579 #endif 580 581 sk->sk_type = osk->sk_type; 582 sk->sk_priority = osk->sk_priority; 583 sk->sk_protocol = osk->sk_protocol; 584 sk->sk_rcvbuf = osk->sk_rcvbuf; 585 sk->sk_sndbuf = osk->sk_sndbuf; 586 sk->sk_state = TCP_ESTABLISHED; 587 sock_copy_flags(sk, osk); 588 589 init_timer(&rose->timer); 590 init_timer(&rose->idletimer); 591 592 orose = rose_sk(osk); 593 rose->t1 = orose->t1; 594 rose->t2 = orose->t2; 595 rose->t3 = orose->t3; 596 rose->hb = orose->hb; 597 rose->idle = orose->idle; 598 rose->defer = orose->defer; 599 rose->device = orose->device; 600 rose->qbitincl = orose->qbitincl; 601 602 return sk; 603 } 604 605 static int rose_release(struct socket *sock) 606 { 607 struct sock *sk = sock->sk; 608 struct rose_sock *rose; 609 610 if (sk == NULL) return 0; 611 612 sock_hold(sk); 613 sock_orphan(sk); 614 lock_sock(sk); 615 rose = rose_sk(sk); 616 617 switch (rose->state) { 618 case ROSE_STATE_0: 619 release_sock(sk); 620 rose_disconnect(sk, 0, -1, -1); 621 lock_sock(sk); 622 rose_destroy_socket(sk); 623 break; 624 625 case ROSE_STATE_2: 626 rose->neighbour->use--; 627 release_sock(sk); 628 rose_disconnect(sk, 0, -1, -1); 629 lock_sock(sk); 630 rose_destroy_socket(sk); 631 break; 632 633 case ROSE_STATE_1: 634 case ROSE_STATE_3: 635 case ROSE_STATE_4: 636 case ROSE_STATE_5: 637 rose_clear_queues(sk); 638 rose_stop_idletimer(sk); 639 rose_write_internal(sk, ROSE_CLEAR_REQUEST); 640 rose_start_t3timer(sk); 641 rose->state = ROSE_STATE_2; 642 sk->sk_state = TCP_CLOSE; 643 sk->sk_shutdown |= SEND_SHUTDOWN; 644 sk->sk_state_change(sk); 645 sock_set_flag(sk, SOCK_DEAD); 646 sock_set_flag(sk, SOCK_DESTROY); 647 break; 648 649 default: 650 break; 651 } 652 653 sock->sk = NULL; 654 release_sock(sk); 655 sock_put(sk); 656 657 return 0; 658 } 659 660 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 661 { 662 struct sock *sk = sock->sk; 663 struct rose_sock *rose = rose_sk(sk); 664 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 665 struct net_device *dev; 666 ax25_address *source; 667 ax25_uid_assoc *user; 668 int n; 669 670 if (!sock_flag(sk, SOCK_ZAPPED)) 671 return -EINVAL; 672 673 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 674 return -EINVAL; 675 676 if (addr->srose_family != AF_ROSE) 677 return -EINVAL; 678 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 680 return -EINVAL; 681 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 683 return -EINVAL; 684 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) 686 return -EADDRNOTAVAIL; 687 688 source = &addr->srose_call; 689 690 user = ax25_findbyuid(current_euid()); 691 if (user) { 692 rose->source_call = user->call; 693 ax25_uid_put(user); 694 } else { 695 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) 696 return -EACCES; 697 rose->source_call = *source; 698 } 699 700 rose->source_addr = addr->srose_addr; 701 rose->device = dev; 702 rose->source_ndigis = addr->srose_ndigis; 703 704 if (addr_len == sizeof(struct full_sockaddr_rose)) { 705 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 706 for (n = 0 ; n < addr->srose_ndigis ; n++) 707 rose->source_digis[n] = full_addr->srose_digis[n]; 708 } else { 709 if (rose->source_ndigis == 1) { 710 rose->source_digis[0] = addr->srose_digi; 711 } 712 } 713 714 rose_insert_socket(sk); 715 716 sock_reset_flag(sk, SOCK_ZAPPED); 717 718 return 0; 719 } 720 721 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) 722 { 723 struct sock *sk = sock->sk; 724 struct rose_sock *rose = rose_sk(sk); 725 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 726 unsigned char cause, diagnostic; 727 struct net_device *dev; 728 ax25_uid_assoc *user; 729 int n, err = 0; 730 731 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 732 return -EINVAL; 733 734 if (addr->srose_family != AF_ROSE) 735 return -EINVAL; 736 737 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 738 return -EINVAL; 739 740 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 741 return -EINVAL; 742 743 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 744 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) 745 return -EINVAL; 746 747 lock_sock(sk); 748 749 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 750 /* Connect completed during a ERESTARTSYS event */ 751 sock->state = SS_CONNECTED; 752 goto out_release; 753 } 754 755 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 756 sock->state = SS_UNCONNECTED; 757 err = -ECONNREFUSED; 758 goto out_release; 759 } 760 761 if (sk->sk_state == TCP_ESTABLISHED) { 762 /* No reconnect on a seqpacket socket */ 763 err = -EISCONN; 764 goto out_release; 765 } 766 767 sk->sk_state = TCP_CLOSE; 768 sock->state = SS_UNCONNECTED; 769 770 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 771 &diagnostic, 0); 772 if (!rose->neighbour) { 773 err = -ENETUNREACH; 774 goto out_release; 775 } 776 777 rose->lci = rose_new_lci(rose->neighbour); 778 if (!rose->lci) { 779 err = -ENETUNREACH; 780 goto out_release; 781 } 782 783 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ 784 sock_reset_flag(sk, SOCK_ZAPPED); 785 786 if ((dev = rose_dev_first()) == NULL) { 787 err = -ENETUNREACH; 788 goto out_release; 789 } 790 791 user = ax25_findbyuid(current_euid()); 792 if (!user) { 793 err = -EINVAL; 794 goto out_release; 795 } 796 797 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); 798 rose->source_call = user->call; 799 rose->device = dev; 800 ax25_uid_put(user); 801 802 rose_insert_socket(sk); /* Finish the bind */ 803 } 804 rose->dest_addr = addr->srose_addr; 805 rose->dest_call = addr->srose_call; 806 rose->rand = ((long)rose & 0xFFFF) + rose->lci; 807 rose->dest_ndigis = addr->srose_ndigis; 808 809 if (addr_len == sizeof(struct full_sockaddr_rose)) { 810 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 811 for (n = 0 ; n < addr->srose_ndigis ; n++) 812 rose->dest_digis[n] = full_addr->srose_digis[n]; 813 } else { 814 if (rose->dest_ndigis == 1) { 815 rose->dest_digis[0] = addr->srose_digi; 816 } 817 } 818 819 /* Move to connecting socket, start sending Connect Requests */ 820 sock->state = SS_CONNECTING; 821 sk->sk_state = TCP_SYN_SENT; 822 823 rose->state = ROSE_STATE_1; 824 825 rose->neighbour->use++; 826 827 rose_write_internal(sk, ROSE_CALL_REQUEST); 828 rose_start_heartbeat(sk); 829 rose_start_t1timer(sk); 830 831 /* Now the loop */ 832 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { 833 err = -EINPROGRESS; 834 goto out_release; 835 } 836 837 /* 838 * A Connect Ack with Choke or timeout or failed routing will go to 839 * closed. 840 */ 841 if (sk->sk_state == TCP_SYN_SENT) { 842 DEFINE_WAIT(wait); 843 844 for (;;) { 845 prepare_to_wait(sk_sleep(sk), &wait, 846 TASK_INTERRUPTIBLE); 847 if (sk->sk_state != TCP_SYN_SENT) 848 break; 849 if (!signal_pending(current)) { 850 release_sock(sk); 851 schedule(); 852 lock_sock(sk); 853 continue; 854 } 855 err = -ERESTARTSYS; 856 break; 857 } 858 finish_wait(sk_sleep(sk), &wait); 859 860 if (err) 861 goto out_release; 862 } 863 864 if (sk->sk_state != TCP_ESTABLISHED) { 865 sock->state = SS_UNCONNECTED; 866 err = sock_error(sk); /* Always set at this point */ 867 goto out_release; 868 } 869 870 sock->state = SS_CONNECTED; 871 872 out_release: 873 release_sock(sk); 874 875 return err; 876 } 877 878 static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 879 { 880 struct sk_buff *skb; 881 struct sock *newsk; 882 DEFINE_WAIT(wait); 883 struct sock *sk; 884 int err = 0; 885 886 if ((sk = sock->sk) == NULL) 887 return -EINVAL; 888 889 lock_sock(sk); 890 if (sk->sk_type != SOCK_SEQPACKET) { 891 err = -EOPNOTSUPP; 892 goto out_release; 893 } 894 895 if (sk->sk_state != TCP_LISTEN) { 896 err = -EINVAL; 897 goto out_release; 898 } 899 900 /* 901 * The write queue this time is holding sockets ready to use 902 * hooked into the SABM we saved 903 */ 904 for (;;) { 905 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 906 907 skb = skb_dequeue(&sk->sk_receive_queue); 908 if (skb) 909 break; 910 911 if (flags & O_NONBLOCK) { 912 err = -EWOULDBLOCK; 913 break; 914 } 915 if (!signal_pending(current)) { 916 release_sock(sk); 917 schedule(); 918 lock_sock(sk); 919 continue; 920 } 921 err = -ERESTARTSYS; 922 break; 923 } 924 finish_wait(sk_sleep(sk), &wait); 925 if (err) 926 goto out_release; 927 928 newsk = skb->sk; 929 sock_graft(newsk, newsock); 930 931 /* Now attach up the new socket */ 932 skb->sk = NULL; 933 kfree_skb(skb); 934 sk->sk_ack_backlog--; 935 936 out_release: 937 release_sock(sk); 938 939 return err; 940 } 941 942 static int rose_getname(struct socket *sock, struct sockaddr *uaddr, 943 int *uaddr_len, int peer) 944 { 945 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; 946 struct sock *sk = sock->sk; 947 struct rose_sock *rose = rose_sk(sk); 948 int n; 949 950 memset(srose, 0, sizeof(*srose)); 951 if (peer != 0) { 952 if (sk->sk_state != TCP_ESTABLISHED) 953 return -ENOTCONN; 954 srose->srose_family = AF_ROSE; 955 srose->srose_addr = rose->dest_addr; 956 srose->srose_call = rose->dest_call; 957 srose->srose_ndigis = rose->dest_ndigis; 958 for (n = 0; n < rose->dest_ndigis; n++) 959 srose->srose_digis[n] = rose->dest_digis[n]; 960 } else { 961 srose->srose_family = AF_ROSE; 962 srose->srose_addr = rose->source_addr; 963 srose->srose_call = rose->source_call; 964 srose->srose_ndigis = rose->source_ndigis; 965 for (n = 0; n < rose->source_ndigis; n++) 966 srose->srose_digis[n] = rose->source_digis[n]; 967 } 968 969 *uaddr_len = sizeof(struct full_sockaddr_rose); 970 return 0; 971 } 972 973 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) 974 { 975 struct sock *sk; 976 struct sock *make; 977 struct rose_sock *make_rose; 978 struct rose_facilities_struct facilities; 979 int n; 980 981 skb->sk = NULL; /* Initially we don't know who it's for */ 982 983 /* 984 * skb->data points to the rose frame start 985 */ 986 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 987 988 if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, 989 skb->len - ROSE_CALL_REQ_FACILITIES_OFF, 990 &facilities)) { 991 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); 992 return 0; 993 } 994 995 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); 996 997 /* 998 * We can't accept the Call Request. 999 */ 1000 if (sk == NULL || sk_acceptq_is_full(sk) || 1001 (make = rose_make_new(sk)) == NULL) { 1002 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); 1003 return 0; 1004 } 1005 1006 skb->sk = make; 1007 make->sk_state = TCP_ESTABLISHED; 1008 make_rose = rose_sk(make); 1009 1010 make_rose->lci = lci; 1011 make_rose->dest_addr = facilities.dest_addr; 1012 make_rose->dest_call = facilities.dest_call; 1013 make_rose->dest_ndigis = facilities.dest_ndigis; 1014 for (n = 0 ; n < facilities.dest_ndigis ; n++) 1015 make_rose->dest_digis[n] = facilities.dest_digis[n]; 1016 make_rose->source_addr = facilities.source_addr; 1017 make_rose->source_call = facilities.source_call; 1018 make_rose->source_ndigis = facilities.source_ndigis; 1019 for (n = 0 ; n < facilities.source_ndigis ; n++) 1020 make_rose->source_digis[n]= facilities.source_digis[n]; 1021 make_rose->neighbour = neigh; 1022 make_rose->device = dev; 1023 make_rose->facilities = facilities; 1024 1025 make_rose->neighbour->use++; 1026 1027 if (rose_sk(sk)->defer) { 1028 make_rose->state = ROSE_STATE_5; 1029 } else { 1030 rose_write_internal(make, ROSE_CALL_ACCEPTED); 1031 make_rose->state = ROSE_STATE_3; 1032 rose_start_idletimer(make); 1033 } 1034 1035 make_rose->condition = 0x00; 1036 make_rose->vs = 0; 1037 make_rose->va = 0; 1038 make_rose->vr = 0; 1039 make_rose->vl = 0; 1040 sk->sk_ack_backlog++; 1041 1042 rose_insert_socket(make); 1043 1044 skb_queue_head(&sk->sk_receive_queue, skb); 1045 1046 rose_start_heartbeat(make); 1047 1048 if (!sock_flag(sk, SOCK_DEAD)) 1049 sk->sk_data_ready(sk, skb->len); 1050 1051 return 1; 1052 } 1053 1054 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, 1055 struct msghdr *msg, size_t len) 1056 { 1057 struct sock *sk = sock->sk; 1058 struct rose_sock *rose = rose_sk(sk); 1059 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; 1060 int err; 1061 struct full_sockaddr_rose srose; 1062 struct sk_buff *skb; 1063 unsigned char *asmptr; 1064 int n, size, qbit = 0; 1065 1066 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1067 return -EINVAL; 1068 1069 if (sock_flag(sk, SOCK_ZAPPED)) 1070 return -EADDRNOTAVAIL; 1071 1072 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1073 send_sig(SIGPIPE, current, 0); 1074 return -EPIPE; 1075 } 1076 1077 if (rose->neighbour == NULL || rose->device == NULL) 1078 return -ENETUNREACH; 1079 1080 if (usrose != NULL) { 1081 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) 1082 return -EINVAL; 1083 memset(&srose, 0, sizeof(struct full_sockaddr_rose)); 1084 memcpy(&srose, usrose, msg->msg_namelen); 1085 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || 1086 ax25cmp(&rose->dest_call, &srose.srose_call) != 0) 1087 return -EISCONN; 1088 if (srose.srose_ndigis != rose->dest_ndigis) 1089 return -EISCONN; 1090 if (srose.srose_ndigis == rose->dest_ndigis) { 1091 for (n = 0 ; n < srose.srose_ndigis ; n++) 1092 if (ax25cmp(&rose->dest_digis[n], 1093 &srose.srose_digis[n])) 1094 return -EISCONN; 1095 } 1096 if (srose.srose_family != AF_ROSE) 1097 return -EINVAL; 1098 } else { 1099 if (sk->sk_state != TCP_ESTABLISHED) 1100 return -ENOTCONN; 1101 1102 srose.srose_family = AF_ROSE; 1103 srose.srose_addr = rose->dest_addr; 1104 srose.srose_call = rose->dest_call; 1105 srose.srose_ndigis = rose->dest_ndigis; 1106 for (n = 0 ; n < rose->dest_ndigis ; n++) 1107 srose.srose_digis[n] = rose->dest_digis[n]; 1108 } 1109 1110 /* Build a packet */ 1111 /* Sanity check the packet size */ 1112 if (len > 65535) 1113 return -EMSGSIZE; 1114 1115 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 1116 1117 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1118 return err; 1119 1120 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); 1121 1122 /* 1123 * Put the data on the end 1124 */ 1125 1126 skb_reset_transport_header(skb); 1127 skb_put(skb, len); 1128 1129 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1130 if (err) { 1131 kfree_skb(skb); 1132 return err; 1133 } 1134 1135 /* 1136 * If the Q BIT Include socket option is in force, the first 1137 * byte of the user data is the logical value of the Q Bit. 1138 */ 1139 if (rose->qbitincl) { 1140 qbit = skb->data[0]; 1141 skb_pull(skb, 1); 1142 } 1143 1144 /* 1145 * Push down the ROSE header 1146 */ 1147 asmptr = skb_push(skb, ROSE_MIN_LEN); 1148 1149 /* Build a ROSE Network header */ 1150 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1151 asmptr[1] = (rose->lci >> 0) & 0xFF; 1152 asmptr[2] = ROSE_DATA; 1153 1154 if (qbit) 1155 asmptr[0] |= ROSE_Q_BIT; 1156 1157 if (sk->sk_state != TCP_ESTABLISHED) { 1158 kfree_skb(skb); 1159 return -ENOTCONN; 1160 } 1161 1162 #ifdef M_BIT 1163 #define ROSE_PACLEN (256-ROSE_MIN_LEN) 1164 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { 1165 unsigned char header[ROSE_MIN_LEN]; 1166 struct sk_buff *skbn; 1167 int frontlen; 1168 int lg; 1169 1170 /* Save a copy of the Header */ 1171 skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); 1172 skb_pull(skb, ROSE_MIN_LEN); 1173 1174 frontlen = skb_headroom(skb); 1175 1176 while (skb->len > 0) { 1177 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { 1178 kfree_skb(skb); 1179 return err; 1180 } 1181 1182 skbn->sk = sk; 1183 skbn->free = 1; 1184 skbn->arp = 1; 1185 1186 skb_reserve(skbn, frontlen); 1187 1188 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; 1189 1190 /* Copy the user data */ 1191 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); 1192 skb_pull(skb, lg); 1193 1194 /* Duplicate the Header */ 1195 skb_push(skbn, ROSE_MIN_LEN); 1196 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); 1197 1198 if (skb->len > 0) 1199 skbn->data[2] |= M_BIT; 1200 1201 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ 1202 } 1203 1204 skb->free = 1; 1205 kfree_skb(skb); 1206 } else { 1207 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ 1208 } 1209 #else 1210 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ 1211 #endif 1212 1213 rose_kick(sk); 1214 1215 return len; 1216 } 1217 1218 1219 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, 1220 struct msghdr *msg, size_t size, int flags) 1221 { 1222 struct sock *sk = sock->sk; 1223 struct rose_sock *rose = rose_sk(sk); 1224 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; 1225 size_t copied; 1226 unsigned char *asmptr; 1227 struct sk_buff *skb; 1228 int n, er, qbit; 1229 1230 /* 1231 * This works for seqpacket too. The receiver has ordered the queue for 1232 * us! We do one quick check first though 1233 */ 1234 if (sk->sk_state != TCP_ESTABLISHED) 1235 return -ENOTCONN; 1236 1237 /* Now we can treat all alike */ 1238 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) 1239 return er; 1240 1241 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; 1242 1243 skb_pull(skb, ROSE_MIN_LEN); 1244 1245 if (rose->qbitincl) { 1246 asmptr = skb_push(skb, 1); 1247 *asmptr = qbit; 1248 } 1249 1250 skb_reset_transport_header(skb); 1251 copied = skb->len; 1252 1253 if (copied > size) { 1254 copied = size; 1255 msg->msg_flags |= MSG_TRUNC; 1256 } 1257 1258 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1259 1260 if (srose != NULL) { 1261 srose->srose_family = AF_ROSE; 1262 srose->srose_addr = rose->dest_addr; 1263 srose->srose_call = rose->dest_call; 1264 srose->srose_ndigis = rose->dest_ndigis; 1265 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1266 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1267 for (n = 0 ; n < rose->dest_ndigis ; n++) 1268 full_srose->srose_digis[n] = rose->dest_digis[n]; 1269 msg->msg_namelen = sizeof(struct full_sockaddr_rose); 1270 } else { 1271 if (rose->dest_ndigis >= 1) { 1272 srose->srose_ndigis = 1; 1273 srose->srose_digi = rose->dest_digis[0]; 1274 } 1275 msg->msg_namelen = sizeof(struct sockaddr_rose); 1276 } 1277 } 1278 1279 skb_free_datagram(sk, skb); 1280 1281 return copied; 1282 } 1283 1284 1285 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1286 { 1287 struct sock *sk = sock->sk; 1288 struct rose_sock *rose = rose_sk(sk); 1289 void __user *argp = (void __user *)arg; 1290 1291 switch (cmd) { 1292 case TIOCOUTQ: { 1293 long amount; 1294 1295 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1296 if (amount < 0) 1297 amount = 0; 1298 return put_user(amount, (unsigned int __user *) argp); 1299 } 1300 1301 case TIOCINQ: { 1302 struct sk_buff *skb; 1303 long amount = 0L; 1304 /* These two are safe on a single CPU system as only user tasks fiddle here */ 1305 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1306 amount = skb->len; 1307 return put_user(amount, (unsigned int __user *) argp); 1308 } 1309 1310 case SIOCGSTAMP: 1311 return sock_get_timestamp(sk, (struct timeval __user *) argp); 1312 1313 case SIOCGSTAMPNS: 1314 return sock_get_timestampns(sk, (struct timespec __user *) argp); 1315 1316 case SIOCGIFADDR: 1317 case SIOCSIFADDR: 1318 case SIOCGIFDSTADDR: 1319 case SIOCSIFDSTADDR: 1320 case SIOCGIFBRDADDR: 1321 case SIOCSIFBRDADDR: 1322 case SIOCGIFNETMASK: 1323 case SIOCSIFNETMASK: 1324 case SIOCGIFMETRIC: 1325 case SIOCSIFMETRIC: 1326 return -EINVAL; 1327 1328 case SIOCADDRT: 1329 case SIOCDELRT: 1330 case SIOCRSCLRRT: 1331 if (!capable(CAP_NET_ADMIN)) 1332 return -EPERM; 1333 return rose_rt_ioctl(cmd, argp); 1334 1335 case SIOCRSGCAUSE: { 1336 struct rose_cause_struct rose_cause; 1337 rose_cause.cause = rose->cause; 1338 rose_cause.diagnostic = rose->diagnostic; 1339 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; 1340 } 1341 1342 case SIOCRSSCAUSE: { 1343 struct rose_cause_struct rose_cause; 1344 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) 1345 return -EFAULT; 1346 rose->cause = rose_cause.cause; 1347 rose->diagnostic = rose_cause.diagnostic; 1348 return 0; 1349 } 1350 1351 case SIOCRSSL2CALL: 1352 if (!capable(CAP_NET_ADMIN)) return -EPERM; 1353 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1354 ax25_listen_release(&rose_callsign, NULL); 1355 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) 1356 return -EFAULT; 1357 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1358 return ax25_listen_register(&rose_callsign, NULL); 1359 1360 return 0; 1361 1362 case SIOCRSGL2CALL: 1363 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; 1364 1365 case SIOCRSACCEPT: 1366 if (rose->state == ROSE_STATE_5) { 1367 rose_write_internal(sk, ROSE_CALL_ACCEPTED); 1368 rose_start_idletimer(sk); 1369 rose->condition = 0x00; 1370 rose->vs = 0; 1371 rose->va = 0; 1372 rose->vr = 0; 1373 rose->vl = 0; 1374 rose->state = ROSE_STATE_3; 1375 } 1376 return 0; 1377 1378 default: 1379 return -ENOIOCTLCMD; 1380 } 1381 1382 return 0; 1383 } 1384 1385 #ifdef CONFIG_PROC_FS 1386 static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1387 __acquires(rose_list_lock) 1388 { 1389 spin_lock_bh(&rose_list_lock); 1390 return seq_hlist_start_head(&rose_list, *pos); 1391 } 1392 1393 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1394 { 1395 return seq_hlist_next(v, &rose_list, pos); 1396 } 1397 1398 static void rose_info_stop(struct seq_file *seq, void *v) 1399 __releases(rose_list_lock) 1400 { 1401 spin_unlock_bh(&rose_list_lock); 1402 } 1403 1404 static int rose_info_show(struct seq_file *seq, void *v) 1405 { 1406 char buf[11], rsbuf[11]; 1407 1408 if (v == SEQ_START_TOKEN) 1409 seq_puts(seq, 1410 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1411 1412 else { 1413 struct sock *s = sk_entry(v); 1414 struct rose_sock *rose = rose_sk(s); 1415 const char *devname, *callsign; 1416 const struct net_device *dev = rose->device; 1417 1418 if (!dev) 1419 devname = "???"; 1420 else 1421 devname = dev->name; 1422 1423 seq_printf(seq, "%-10s %-9s ", 1424 rose2asc(rsbuf, &rose->dest_addr), 1425 ax2asc(buf, &rose->dest_call)); 1426 1427 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) 1428 callsign = "??????-?"; 1429 else 1430 callsign = ax2asc(buf, &rose->source_call); 1431 1432 seq_printf(seq, 1433 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", 1434 rose2asc(rsbuf, &rose->source_addr), 1435 callsign, 1436 devname, 1437 rose->lci & 0x0FFF, 1438 (rose->neighbour) ? rose->neighbour->number : 0, 1439 rose->state, 1440 rose->vs, 1441 rose->vr, 1442 rose->va, 1443 ax25_display_timer(&rose->timer) / HZ, 1444 rose->t1 / HZ, 1445 rose->t2 / HZ, 1446 rose->t3 / HZ, 1447 rose->hb / HZ, 1448 ax25_display_timer(&rose->idletimer) / (60 * HZ), 1449 rose->idle / (60 * HZ), 1450 sk_wmem_alloc_get(s), 1451 sk_rmem_alloc_get(s), 1452 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1453 } 1454 1455 return 0; 1456 } 1457 1458 static const struct seq_operations rose_info_seqops = { 1459 .start = rose_info_start, 1460 .next = rose_info_next, 1461 .stop = rose_info_stop, 1462 .show = rose_info_show, 1463 }; 1464 1465 static int rose_info_open(struct inode *inode, struct file *file) 1466 { 1467 return seq_open(file, &rose_info_seqops); 1468 } 1469 1470 static const struct file_operations rose_info_fops = { 1471 .owner = THIS_MODULE, 1472 .open = rose_info_open, 1473 .read = seq_read, 1474 .llseek = seq_lseek, 1475 .release = seq_release, 1476 }; 1477 #endif /* CONFIG_PROC_FS */ 1478 1479 static const struct net_proto_family rose_family_ops = { 1480 .family = PF_ROSE, 1481 .create = rose_create, 1482 .owner = THIS_MODULE, 1483 }; 1484 1485 static const struct proto_ops rose_proto_ops = { 1486 .family = PF_ROSE, 1487 .owner = THIS_MODULE, 1488 .release = rose_release, 1489 .bind = rose_bind, 1490 .connect = rose_connect, 1491 .socketpair = sock_no_socketpair, 1492 .accept = rose_accept, 1493 .getname = rose_getname, 1494 .poll = datagram_poll, 1495 .ioctl = rose_ioctl, 1496 .listen = rose_listen, 1497 .shutdown = sock_no_shutdown, 1498 .setsockopt = rose_setsockopt, 1499 .getsockopt = rose_getsockopt, 1500 .sendmsg = rose_sendmsg, 1501 .recvmsg = rose_recvmsg, 1502 .mmap = sock_no_mmap, 1503 .sendpage = sock_no_sendpage, 1504 }; 1505 1506 static struct notifier_block rose_dev_notifier = { 1507 .notifier_call = rose_device_event, 1508 }; 1509 1510 static struct net_device **dev_rose; 1511 1512 static struct ax25_protocol rose_pid = { 1513 .pid = AX25_P_ROSE, 1514 .func = rose_route_frame 1515 }; 1516 1517 static struct ax25_linkfail rose_linkfail_notifier = { 1518 .func = rose_link_failed 1519 }; 1520 1521 static int __init rose_proto_init(void) 1522 { 1523 int i; 1524 int rc; 1525 1526 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { 1527 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); 1528 rc = -EINVAL; 1529 goto out; 1530 } 1531 1532 rc = proto_register(&rose_proto, 0); 1533 if (rc != 0) 1534 goto out; 1535 1536 rose_callsign = null_ax25_address; 1537 1538 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1539 if (dev_rose == NULL) { 1540 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1541 rc = -ENOMEM; 1542 goto out_proto_unregister; 1543 } 1544 1545 for (i = 0; i < rose_ndevs; i++) { 1546 struct net_device *dev; 1547 char name[IFNAMSIZ]; 1548 1549 sprintf(name, "rose%d", i); 1550 dev = alloc_netdev(0, name, rose_setup); 1551 if (!dev) { 1552 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1553 rc = -ENOMEM; 1554 goto fail; 1555 } 1556 rc = register_netdev(dev); 1557 if (rc) { 1558 printk(KERN_ERR "ROSE: netdevice registration failed\n"); 1559 free_netdev(dev); 1560 goto fail; 1561 } 1562 rose_set_lockdep_key(dev); 1563 dev_rose[i] = dev; 1564 } 1565 1566 sock_register(&rose_family_ops); 1567 register_netdevice_notifier(&rose_dev_notifier); 1568 1569 ax25_register_pid(&rose_pid); 1570 ax25_linkfail_register(&rose_linkfail_notifier); 1571 1572 #ifdef CONFIG_SYSCTL 1573 rose_register_sysctl(); 1574 #endif 1575 rose_loopback_init(); 1576 1577 rose_add_loopback_neigh(); 1578 1579 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); 1580 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); 1581 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); 1582 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); 1583 out: 1584 return rc; 1585 fail: 1586 while (--i >= 0) { 1587 unregister_netdev(dev_rose[i]); 1588 free_netdev(dev_rose[i]); 1589 } 1590 kfree(dev_rose); 1591 out_proto_unregister: 1592 proto_unregister(&rose_proto); 1593 goto out; 1594 } 1595 module_init(rose_proto_init); 1596 1597 module_param(rose_ndevs, int, 0); 1598 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); 1599 1600 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); 1601 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); 1602 MODULE_LICENSE("GPL"); 1603 MODULE_ALIAS_NETPROTO(PF_ROSE); 1604 1605 static void __exit rose_exit(void) 1606 { 1607 int i; 1608 1609 proc_net_remove(&init_net, "rose"); 1610 proc_net_remove(&init_net, "rose_neigh"); 1611 proc_net_remove(&init_net, "rose_nodes"); 1612 proc_net_remove(&init_net, "rose_routes"); 1613 rose_loopback_clear(); 1614 1615 rose_rt_free(); 1616 1617 ax25_protocol_release(AX25_P_ROSE); 1618 ax25_linkfail_release(&rose_linkfail_notifier); 1619 1620 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1621 ax25_listen_release(&rose_callsign, NULL); 1622 1623 #ifdef CONFIG_SYSCTL 1624 rose_unregister_sysctl(); 1625 #endif 1626 unregister_netdevice_notifier(&rose_dev_notifier); 1627 1628 sock_unregister(PF_ROSE); 1629 1630 for (i = 0; i < rose_ndevs; i++) { 1631 struct net_device *dev = dev_rose[i]; 1632 1633 if (dev) { 1634 unregister_netdev(dev); 1635 free_netdev(dev); 1636 } 1637 } 1638 1639 kfree(dev_rose); 1640 proto_unregister(&rose_proto); 1641 } 1642 1643 module_exit(rose_exit); 1644