1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) 10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) 11 */ 12 13 #include <linux/capability.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/errno.h> 18 #include <linux/types.h> 19 #include <linux/socket.h> 20 #include <linux/in.h> 21 #include <linux/kernel.h> 22 #include <linux/sched.h> 23 #include <linux/spinlock.h> 24 #include <linux/timer.h> 25 #include <linux/string.h> 26 #include <linux/sockios.h> 27 #include <linux/net.h> 28 #include <linux/stat.h> 29 #include <net/net_namespace.h> 30 #include <net/ax25.h> 31 #include <linux/inet.h> 32 #include <linux/netdevice.h> 33 #include <linux/if_arp.h> 34 #include <linux/skbuff.h> 35 #include <net/sock.h> 36 #include <asm/system.h> 37 #include <asm/uaccess.h> 38 #include <linux/fcntl.h> 39 #include <linux/termios.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/notifier.h> 43 #include <net/rose.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <net/tcp_states.h> 47 #include <net/ip.h> 48 #include <net/arp.h> 49 50 static int rose_ndevs = 10; 51 52 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; 53 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; 54 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; 55 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; 56 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; 57 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; 58 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; 59 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; 60 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; 61 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; 62 63 static HLIST_HEAD(rose_list); 64 static DEFINE_SPINLOCK(rose_list_lock); 65 66 static struct proto_ops rose_proto_ops; 67 68 ax25_address rose_callsign; 69 70 /* 71 * ROSE network devices are virtual network devices encapsulating ROSE 72 * frames into AX.25 which will be sent through an AX.25 device, so form a 73 * special "super class" of normal net devices; split their locks off into a 74 * separate class since they always nest. 75 */ 76 static struct lock_class_key rose_netdev_xmit_lock_key; 77 static struct lock_class_key rose_netdev_addr_lock_key; 78 79 static void rose_set_lockdep_one(struct net_device *dev, 80 struct netdev_queue *txq, 81 void *_unused) 82 { 83 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); 84 } 85 86 static void rose_set_lockdep_key(struct net_device *dev) 87 { 88 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); 89 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); 90 } 91 92 /* 93 * Convert a ROSE address into text. 94 */ 95 const char *rose2asc(const rose_address *addr) 96 { 97 static char buffer[11]; 98 99 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && 100 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && 101 addr->rose_addr[4] == 0x00) { 102 strcpy(buffer, "*"); 103 } else { 104 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, 105 addr->rose_addr[1] & 0xFF, 106 addr->rose_addr[2] & 0xFF, 107 addr->rose_addr[3] & 0xFF, 108 addr->rose_addr[4] & 0xFF); 109 } 110 111 return buffer; 112 } 113 114 /* 115 * Compare two ROSE addresses, 0 == equal. 116 */ 117 int rosecmp(rose_address *addr1, rose_address *addr2) 118 { 119 int i; 120 121 for (i = 0; i < 5; i++) 122 if (addr1->rose_addr[i] != addr2->rose_addr[i]) 123 return 1; 124 125 return 0; 126 } 127 128 /* 129 * Compare two ROSE addresses for only mask digits, 0 == equal. 130 */ 131 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) 132 { 133 unsigned int i, j; 134 135 if (mask > 10) 136 return 1; 137 138 for (i = 0; i < mask; i++) { 139 j = i / 2; 140 141 if ((i % 2) != 0) { 142 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) 143 return 1; 144 } else { 145 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) 146 return 1; 147 } 148 } 149 150 return 0; 151 } 152 153 /* 154 * Socket removal during an interrupt is now safe. 155 */ 156 static void rose_remove_socket(struct sock *sk) 157 { 158 spin_lock_bh(&rose_list_lock); 159 sk_del_node_init(sk); 160 spin_unlock_bh(&rose_list_lock); 161 } 162 163 /* 164 * Kill all bound sockets on a broken link layer connection to a 165 * particular neighbour. 166 */ 167 void rose_kill_by_neigh(struct rose_neigh *neigh) 168 { 169 struct sock *s; 170 struct hlist_node *node; 171 172 spin_lock_bh(&rose_list_lock); 173 sk_for_each(s, node, &rose_list) { 174 struct rose_sock *rose = rose_sk(s); 175 176 if (rose->neighbour == neigh) { 177 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 178 rose->neighbour->use--; 179 rose->neighbour = NULL; 180 } 181 } 182 spin_unlock_bh(&rose_list_lock); 183 } 184 185 /* 186 * Kill all bound sockets on a dropped device. 187 */ 188 static void rose_kill_by_device(struct net_device *dev) 189 { 190 struct sock *s; 191 struct hlist_node *node; 192 193 spin_lock_bh(&rose_list_lock); 194 sk_for_each(s, node, &rose_list) { 195 struct rose_sock *rose = rose_sk(s); 196 197 if (rose->device == dev) { 198 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); 199 rose->neighbour->use--; 200 rose->device = NULL; 201 } 202 } 203 spin_unlock_bh(&rose_list_lock); 204 } 205 206 /* 207 * Handle device status changes. 208 */ 209 static int rose_device_event(struct notifier_block *this, unsigned long event, 210 void *ptr) 211 { 212 struct net_device *dev = (struct net_device *)ptr; 213 214 if (!net_eq(dev_net(dev), &init_net)) 215 return NOTIFY_DONE; 216 217 if (event != NETDEV_DOWN) 218 return NOTIFY_DONE; 219 220 switch (dev->type) { 221 case ARPHRD_ROSE: 222 rose_kill_by_device(dev); 223 break; 224 case ARPHRD_AX25: 225 rose_link_device_down(dev); 226 rose_rt_device_down(dev); 227 break; 228 } 229 230 return NOTIFY_DONE; 231 } 232 233 /* 234 * Add a socket to the bound sockets list. 235 */ 236 static void rose_insert_socket(struct sock *sk) 237 { 238 239 spin_lock_bh(&rose_list_lock); 240 sk_add_node(sk, &rose_list); 241 spin_unlock_bh(&rose_list_lock); 242 } 243 244 /* 245 * Find a socket that wants to accept the Call Request we just 246 * received. 247 */ 248 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) 249 { 250 struct sock *s; 251 struct hlist_node *node; 252 253 spin_lock_bh(&rose_list_lock); 254 sk_for_each(s, node, &rose_list) { 255 struct rose_sock *rose = rose_sk(s); 256 257 if (!rosecmp(&rose->source_addr, addr) && 258 !ax25cmp(&rose->source_call, call) && 259 !rose->source_ndigis && s->sk_state == TCP_LISTEN) 260 goto found; 261 } 262 263 sk_for_each(s, node, &rose_list) { 264 struct rose_sock *rose = rose_sk(s); 265 266 if (!rosecmp(&rose->source_addr, addr) && 267 !ax25cmp(&rose->source_call, &null_ax25_address) && 268 s->sk_state == TCP_LISTEN) 269 goto found; 270 } 271 s = NULL; 272 found: 273 spin_unlock_bh(&rose_list_lock); 274 return s; 275 } 276 277 /* 278 * Find a connected ROSE socket given my LCI and device. 279 */ 280 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) 281 { 282 struct sock *s; 283 struct hlist_node *node; 284 285 spin_lock_bh(&rose_list_lock); 286 sk_for_each(s, node, &rose_list) { 287 struct rose_sock *rose = rose_sk(s); 288 289 if (rose->lci == lci && rose->neighbour == neigh) 290 goto found; 291 } 292 s = NULL; 293 found: 294 spin_unlock_bh(&rose_list_lock); 295 return s; 296 } 297 298 /* 299 * Find a unique LCI for a given device. 300 */ 301 unsigned int rose_new_lci(struct rose_neigh *neigh) 302 { 303 int lci; 304 305 if (neigh->dce_mode) { 306 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) 307 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 308 return lci; 309 } else { 310 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) 311 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) 312 return lci; 313 } 314 315 return 0; 316 } 317 318 /* 319 * Deferred destroy. 320 */ 321 void rose_destroy_socket(struct sock *); 322 323 /* 324 * Handler for deferred kills. 325 */ 326 static void rose_destroy_timer(unsigned long data) 327 { 328 rose_destroy_socket((struct sock *)data); 329 } 330 331 /* 332 * This is called from user mode and the timers. Thus it protects itself 333 * against interrupt users but doesn't worry about being called during 334 * work. Once it is removed from the queue no interrupt or bottom half 335 * will touch it and we are (fairly 8-) ) safe. 336 */ 337 void rose_destroy_socket(struct sock *sk) 338 { 339 struct sk_buff *skb; 340 341 rose_remove_socket(sk); 342 rose_stop_heartbeat(sk); 343 rose_stop_idletimer(sk); 344 rose_stop_timer(sk); 345 346 rose_clear_queues(sk); /* Flush the queues */ 347 348 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 349 if (skb->sk != sk) { /* A pending connection */ 350 /* Queue the unaccepted socket for death */ 351 sock_set_flag(skb->sk, SOCK_DEAD); 352 rose_start_heartbeat(skb->sk); 353 rose_sk(skb->sk)->state = ROSE_STATE_0; 354 } 355 356 kfree_skb(skb); 357 } 358 359 if (sk_has_allocations(sk)) { 360 /* Defer: outstanding buffers */ 361 setup_timer(&sk->sk_timer, rose_destroy_timer, 362 (unsigned long)sk); 363 sk->sk_timer.expires = jiffies + 10 * HZ; 364 add_timer(&sk->sk_timer); 365 } else 366 sock_put(sk); 367 } 368 369 /* 370 * Handling for system calls applied via the various interfaces to a 371 * ROSE socket object. 372 */ 373 374 static int rose_setsockopt(struct socket *sock, int level, int optname, 375 char __user *optval, int optlen) 376 { 377 struct sock *sk = sock->sk; 378 struct rose_sock *rose = rose_sk(sk); 379 int opt; 380 381 if (level != SOL_ROSE) 382 return -ENOPROTOOPT; 383 384 if (optlen < sizeof(int)) 385 return -EINVAL; 386 387 if (get_user(opt, (int __user *)optval)) 388 return -EFAULT; 389 390 switch (optname) { 391 case ROSE_DEFER: 392 rose->defer = opt ? 1 : 0; 393 return 0; 394 395 case ROSE_T1: 396 if (opt < 1) 397 return -EINVAL; 398 rose->t1 = opt * HZ; 399 return 0; 400 401 case ROSE_T2: 402 if (opt < 1) 403 return -EINVAL; 404 rose->t2 = opt * HZ; 405 return 0; 406 407 case ROSE_T3: 408 if (opt < 1) 409 return -EINVAL; 410 rose->t3 = opt * HZ; 411 return 0; 412 413 case ROSE_HOLDBACK: 414 if (opt < 1) 415 return -EINVAL; 416 rose->hb = opt * HZ; 417 return 0; 418 419 case ROSE_IDLE: 420 if (opt < 0) 421 return -EINVAL; 422 rose->idle = opt * 60 * HZ; 423 return 0; 424 425 case ROSE_QBITINCL: 426 rose->qbitincl = opt ? 1 : 0; 427 return 0; 428 429 default: 430 return -ENOPROTOOPT; 431 } 432 } 433 434 static int rose_getsockopt(struct socket *sock, int level, int optname, 435 char __user *optval, int __user *optlen) 436 { 437 struct sock *sk = sock->sk; 438 struct rose_sock *rose = rose_sk(sk); 439 int val = 0; 440 int len; 441 442 if (level != SOL_ROSE) 443 return -ENOPROTOOPT; 444 445 if (get_user(len, optlen)) 446 return -EFAULT; 447 448 if (len < 0) 449 return -EINVAL; 450 451 switch (optname) { 452 case ROSE_DEFER: 453 val = rose->defer; 454 break; 455 456 case ROSE_T1: 457 val = rose->t1 / HZ; 458 break; 459 460 case ROSE_T2: 461 val = rose->t2 / HZ; 462 break; 463 464 case ROSE_T3: 465 val = rose->t3 / HZ; 466 break; 467 468 case ROSE_HOLDBACK: 469 val = rose->hb / HZ; 470 break; 471 472 case ROSE_IDLE: 473 val = rose->idle / (60 * HZ); 474 break; 475 476 case ROSE_QBITINCL: 477 val = rose->qbitincl; 478 break; 479 480 default: 481 return -ENOPROTOOPT; 482 } 483 484 len = min_t(unsigned int, len, sizeof(int)); 485 486 if (put_user(len, optlen)) 487 return -EFAULT; 488 489 return copy_to_user(optval, &val, len) ? -EFAULT : 0; 490 } 491 492 static int rose_listen(struct socket *sock, int backlog) 493 { 494 struct sock *sk = sock->sk; 495 496 if (sk->sk_state != TCP_LISTEN) { 497 struct rose_sock *rose = rose_sk(sk); 498 499 rose->dest_ndigis = 0; 500 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); 501 memset(&rose->dest_call, 0, AX25_ADDR_LEN); 502 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); 503 sk->sk_max_ack_backlog = backlog; 504 sk->sk_state = TCP_LISTEN; 505 return 0; 506 } 507 508 return -EOPNOTSUPP; 509 } 510 511 static struct proto rose_proto = { 512 .name = "ROSE", 513 .owner = THIS_MODULE, 514 .obj_size = sizeof(struct rose_sock), 515 }; 516 517 static int rose_create(struct net *net, struct socket *sock, int protocol) 518 { 519 struct sock *sk; 520 struct rose_sock *rose; 521 522 if (net != &init_net) 523 return -EAFNOSUPPORT; 524 525 if (sock->type != SOCK_SEQPACKET || protocol != 0) 526 return -ESOCKTNOSUPPORT; 527 528 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); 529 if (sk == NULL) 530 return -ENOMEM; 531 532 rose = rose_sk(sk); 533 534 sock_init_data(sock, sk); 535 536 skb_queue_head_init(&rose->ack_queue); 537 #ifdef M_BIT 538 skb_queue_head_init(&rose->frag_queue); 539 rose->fraglen = 0; 540 #endif 541 542 sock->ops = &rose_proto_ops; 543 sk->sk_protocol = protocol; 544 545 init_timer(&rose->timer); 546 init_timer(&rose->idletimer); 547 548 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); 549 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); 550 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); 551 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); 552 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); 553 554 rose->state = ROSE_STATE_0; 555 556 return 0; 557 } 558 559 static struct sock *rose_make_new(struct sock *osk) 560 { 561 struct sock *sk; 562 struct rose_sock *rose, *orose; 563 564 if (osk->sk_type != SOCK_SEQPACKET) 565 return NULL; 566 567 sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); 568 if (sk == NULL) 569 return NULL; 570 571 rose = rose_sk(sk); 572 573 sock_init_data(NULL, sk); 574 575 skb_queue_head_init(&rose->ack_queue); 576 #ifdef M_BIT 577 skb_queue_head_init(&rose->frag_queue); 578 rose->fraglen = 0; 579 #endif 580 581 sk->sk_type = osk->sk_type; 582 sk->sk_priority = osk->sk_priority; 583 sk->sk_protocol = osk->sk_protocol; 584 sk->sk_rcvbuf = osk->sk_rcvbuf; 585 sk->sk_sndbuf = osk->sk_sndbuf; 586 sk->sk_state = TCP_ESTABLISHED; 587 sock_copy_flags(sk, osk); 588 589 init_timer(&rose->timer); 590 init_timer(&rose->idletimer); 591 592 orose = rose_sk(osk); 593 rose->t1 = orose->t1; 594 rose->t2 = orose->t2; 595 rose->t3 = orose->t3; 596 rose->hb = orose->hb; 597 rose->idle = orose->idle; 598 rose->defer = orose->defer; 599 rose->device = orose->device; 600 rose->qbitincl = orose->qbitincl; 601 602 return sk; 603 } 604 605 static int rose_release(struct socket *sock) 606 { 607 struct sock *sk = sock->sk; 608 struct rose_sock *rose; 609 610 if (sk == NULL) return 0; 611 612 sock_hold(sk); 613 sock_orphan(sk); 614 lock_sock(sk); 615 rose = rose_sk(sk); 616 617 switch (rose->state) { 618 case ROSE_STATE_0: 619 release_sock(sk); 620 rose_disconnect(sk, 0, -1, -1); 621 lock_sock(sk); 622 rose_destroy_socket(sk); 623 break; 624 625 case ROSE_STATE_2: 626 rose->neighbour->use--; 627 release_sock(sk); 628 rose_disconnect(sk, 0, -1, -1); 629 lock_sock(sk); 630 rose_destroy_socket(sk); 631 break; 632 633 case ROSE_STATE_1: 634 case ROSE_STATE_3: 635 case ROSE_STATE_4: 636 case ROSE_STATE_5: 637 rose_clear_queues(sk); 638 rose_stop_idletimer(sk); 639 rose_write_internal(sk, ROSE_CLEAR_REQUEST); 640 rose_start_t3timer(sk); 641 rose->state = ROSE_STATE_2; 642 sk->sk_state = TCP_CLOSE; 643 sk->sk_shutdown |= SEND_SHUTDOWN; 644 sk->sk_state_change(sk); 645 sock_set_flag(sk, SOCK_DEAD); 646 sock_set_flag(sk, SOCK_DESTROY); 647 break; 648 649 default: 650 break; 651 } 652 653 sock->sk = NULL; 654 release_sock(sk); 655 sock_put(sk); 656 657 return 0; 658 } 659 660 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 661 { 662 struct sock *sk = sock->sk; 663 struct rose_sock *rose = rose_sk(sk); 664 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 665 struct net_device *dev; 666 ax25_address *source; 667 ax25_uid_assoc *user; 668 int n; 669 670 if (!sock_flag(sk, SOCK_ZAPPED)) 671 return -EINVAL; 672 673 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 674 return -EINVAL; 675 676 if (addr->srose_family != AF_ROSE) 677 return -EINVAL; 678 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 680 return -EINVAL; 681 682 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 683 return -EINVAL; 684 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 686 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); 687 return -EADDRNOTAVAIL; 688 } 689 690 source = &addr->srose_call; 691 692 user = ax25_findbyuid(current_euid()); 693 if (user) { 694 rose->source_call = user->call; 695 ax25_uid_put(user); 696 } else { 697 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) 698 return -EACCES; 699 rose->source_call = *source; 700 } 701 702 rose->source_addr = addr->srose_addr; 703 rose->device = dev; 704 rose->source_ndigis = addr->srose_ndigis; 705 706 if (addr_len == sizeof(struct full_sockaddr_rose)) { 707 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 708 for (n = 0 ; n < addr->srose_ndigis ; n++) 709 rose->source_digis[n] = full_addr->srose_digis[n]; 710 } else { 711 if (rose->source_ndigis == 1) { 712 rose->source_digis[0] = addr->srose_digi; 713 } 714 } 715 716 rose_insert_socket(sk); 717 718 sock_reset_flag(sk, SOCK_ZAPPED); 719 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 720 return 0; 721 } 722 723 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) 724 { 725 struct sock *sk = sock->sk; 726 struct rose_sock *rose = rose_sk(sk); 727 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; 728 unsigned char cause, diagnostic; 729 struct net_device *dev; 730 ax25_uid_assoc *user; 731 int n, err = 0; 732 733 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) 734 return -EINVAL; 735 736 if (addr->srose_family != AF_ROSE) 737 return -EINVAL; 738 739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 740 return -EINVAL; 741 742 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 743 return -EINVAL; 744 745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 746 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) 747 return -EINVAL; 748 749 lock_sock(sk); 750 751 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 752 /* Connect completed during a ERESTARTSYS event */ 753 sock->state = SS_CONNECTED; 754 goto out_release; 755 } 756 757 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { 758 sock->state = SS_UNCONNECTED; 759 err = -ECONNREFUSED; 760 goto out_release; 761 } 762 763 if (sk->sk_state == TCP_ESTABLISHED) { 764 /* No reconnect on a seqpacket socket */ 765 err = -EISCONN; 766 goto out_release; 767 } 768 769 sk->sk_state = TCP_CLOSE; 770 sock->state = SS_UNCONNECTED; 771 772 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, 773 &diagnostic, 0); 774 if (!rose->neighbour) { 775 err = -ENETUNREACH; 776 goto out_release; 777 } 778 779 rose->lci = rose_new_lci(rose->neighbour); 780 if (!rose->lci) { 781 err = -ENETUNREACH; 782 goto out_release; 783 } 784 785 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ 786 sock_reset_flag(sk, SOCK_ZAPPED); 787 788 if ((dev = rose_dev_first()) == NULL) { 789 err = -ENETUNREACH; 790 goto out_release; 791 } 792 793 user = ax25_findbyuid(current_euid()); 794 if (!user) { 795 err = -EINVAL; 796 goto out_release; 797 } 798 799 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); 800 rose->source_call = user->call; 801 rose->device = dev; 802 ax25_uid_put(user); 803 804 rose_insert_socket(sk); /* Finish the bind */ 805 } 806 rose_try_next_neigh: 807 rose->dest_addr = addr->srose_addr; 808 rose->dest_call = addr->srose_call; 809 rose->rand = ((long)rose & 0xFFFF) + rose->lci; 810 rose->dest_ndigis = addr->srose_ndigis; 811 812 if (addr_len == sizeof(struct full_sockaddr_rose)) { 813 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; 814 for (n = 0 ; n < addr->srose_ndigis ; n++) 815 rose->dest_digis[n] = full_addr->srose_digis[n]; 816 } else { 817 if (rose->dest_ndigis == 1) { 818 rose->dest_digis[0] = addr->srose_digi; 819 } 820 } 821 822 /* Move to connecting socket, start sending Connect Requests */ 823 sock->state = SS_CONNECTING; 824 sk->sk_state = TCP_SYN_SENT; 825 826 rose->state = ROSE_STATE_1; 827 828 rose->neighbour->use++; 829 830 rose_write_internal(sk, ROSE_CALL_REQUEST); 831 rose_start_heartbeat(sk); 832 rose_start_t1timer(sk); 833 834 /* Now the loop */ 835 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { 836 err = -EINPROGRESS; 837 goto out_release; 838 } 839 840 /* 841 * A Connect Ack with Choke or timeout or failed routing will go to 842 * closed. 843 */ 844 if (sk->sk_state == TCP_SYN_SENT) { 845 DEFINE_WAIT(wait); 846 847 for (;;) { 848 prepare_to_wait(sk->sk_sleep, &wait, 849 TASK_INTERRUPTIBLE); 850 if (sk->sk_state != TCP_SYN_SENT) 851 break; 852 if (!signal_pending(current)) { 853 release_sock(sk); 854 schedule(); 855 lock_sock(sk); 856 continue; 857 } 858 err = -ERESTARTSYS; 859 break; 860 } 861 finish_wait(sk->sk_sleep, &wait); 862 863 if (err) 864 goto out_release; 865 } 866 867 if (sk->sk_state != TCP_ESTABLISHED) { 868 /* Try next neighbour */ 869 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); 870 if (rose->neighbour) 871 goto rose_try_next_neigh; 872 873 /* No more neighbours */ 874 sock->state = SS_UNCONNECTED; 875 err = sock_error(sk); /* Always set at this point */ 876 goto out_release; 877 } 878 879 sock->state = SS_CONNECTED; 880 881 out_release: 882 release_sock(sk); 883 884 return err; 885 } 886 887 static int rose_accept(struct socket *sock, struct socket *newsock, int flags) 888 { 889 struct sk_buff *skb; 890 struct sock *newsk; 891 DEFINE_WAIT(wait); 892 struct sock *sk; 893 int err = 0; 894 895 if ((sk = sock->sk) == NULL) 896 return -EINVAL; 897 898 lock_sock(sk); 899 if (sk->sk_type != SOCK_SEQPACKET) { 900 err = -EOPNOTSUPP; 901 goto out_release; 902 } 903 904 if (sk->sk_state != TCP_LISTEN) { 905 err = -EINVAL; 906 goto out_release; 907 } 908 909 /* 910 * The write queue this time is holding sockets ready to use 911 * hooked into the SABM we saved 912 */ 913 for (;;) { 914 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 915 916 skb = skb_dequeue(&sk->sk_receive_queue); 917 if (skb) 918 break; 919 920 if (flags & O_NONBLOCK) { 921 err = -EWOULDBLOCK; 922 break; 923 } 924 if (!signal_pending(current)) { 925 release_sock(sk); 926 schedule(); 927 lock_sock(sk); 928 continue; 929 } 930 err = -ERESTARTSYS; 931 break; 932 } 933 finish_wait(sk->sk_sleep, &wait); 934 if (err) 935 goto out_release; 936 937 newsk = skb->sk; 938 sock_graft(newsk, newsock); 939 940 /* Now attach up the new socket */ 941 skb->sk = NULL; 942 kfree_skb(skb); 943 sk->sk_ack_backlog--; 944 945 out_release: 946 release_sock(sk); 947 948 return err; 949 } 950 951 static int rose_getname(struct socket *sock, struct sockaddr *uaddr, 952 int *uaddr_len, int peer) 953 { 954 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; 955 struct sock *sk = sock->sk; 956 struct rose_sock *rose = rose_sk(sk); 957 int n; 958 959 if (peer != 0) { 960 if (sk->sk_state != TCP_ESTABLISHED) 961 return -ENOTCONN; 962 srose->srose_family = AF_ROSE; 963 srose->srose_addr = rose->dest_addr; 964 srose->srose_call = rose->dest_call; 965 srose->srose_ndigis = rose->dest_ndigis; 966 for (n = 0; n < rose->dest_ndigis; n++) 967 srose->srose_digis[n] = rose->dest_digis[n]; 968 } else { 969 srose->srose_family = AF_ROSE; 970 srose->srose_addr = rose->source_addr; 971 srose->srose_call = rose->source_call; 972 srose->srose_ndigis = rose->source_ndigis; 973 for (n = 0; n < rose->source_ndigis; n++) 974 srose->srose_digis[n] = rose->source_digis[n]; 975 } 976 977 *uaddr_len = sizeof(struct full_sockaddr_rose); 978 return 0; 979 } 980 981 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) 982 { 983 struct sock *sk; 984 struct sock *make; 985 struct rose_sock *make_rose; 986 struct rose_facilities_struct facilities; 987 int n, len; 988 989 skb->sk = NULL; /* Initially we don't know who it's for */ 990 991 /* 992 * skb->data points to the rose frame start 993 */ 994 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); 995 996 len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; 997 len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; 998 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { 999 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); 1000 return 0; 1001 } 1002 1003 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); 1004 1005 /* 1006 * We can't accept the Call Request. 1007 */ 1008 if (sk == NULL || sk_acceptq_is_full(sk) || 1009 (make = rose_make_new(sk)) == NULL) { 1010 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); 1011 return 0; 1012 } 1013 1014 skb->sk = make; 1015 make->sk_state = TCP_ESTABLISHED; 1016 make_rose = rose_sk(make); 1017 1018 make_rose->lci = lci; 1019 make_rose->dest_addr = facilities.dest_addr; 1020 make_rose->dest_call = facilities.dest_call; 1021 make_rose->dest_ndigis = facilities.dest_ndigis; 1022 for (n = 0 ; n < facilities.dest_ndigis ; n++) 1023 make_rose->dest_digis[n] = facilities.dest_digis[n]; 1024 make_rose->source_addr = facilities.source_addr; 1025 make_rose->source_call = facilities.source_call; 1026 make_rose->source_ndigis = facilities.source_ndigis; 1027 for (n = 0 ; n < facilities.source_ndigis ; n++) 1028 make_rose->source_digis[n]= facilities.source_digis[n]; 1029 make_rose->neighbour = neigh; 1030 make_rose->device = dev; 1031 make_rose->facilities = facilities; 1032 1033 make_rose->neighbour->use++; 1034 1035 if (rose_sk(sk)->defer) { 1036 make_rose->state = ROSE_STATE_5; 1037 } else { 1038 rose_write_internal(make, ROSE_CALL_ACCEPTED); 1039 make_rose->state = ROSE_STATE_3; 1040 rose_start_idletimer(make); 1041 } 1042 1043 make_rose->condition = 0x00; 1044 make_rose->vs = 0; 1045 make_rose->va = 0; 1046 make_rose->vr = 0; 1047 make_rose->vl = 0; 1048 sk->sk_ack_backlog++; 1049 1050 rose_insert_socket(make); 1051 1052 skb_queue_head(&sk->sk_receive_queue, skb); 1053 1054 rose_start_heartbeat(make); 1055 1056 if (!sock_flag(sk, SOCK_DEAD)) 1057 sk->sk_data_ready(sk, skb->len); 1058 1059 return 1; 1060 } 1061 1062 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, 1063 struct msghdr *msg, size_t len) 1064 { 1065 struct sock *sk = sock->sk; 1066 struct rose_sock *rose = rose_sk(sk); 1067 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; 1068 int err; 1069 struct full_sockaddr_rose srose; 1070 struct sk_buff *skb; 1071 unsigned char *asmptr; 1072 int n, size, qbit = 0; 1073 1074 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1075 return -EINVAL; 1076 1077 if (sock_flag(sk, SOCK_ZAPPED)) 1078 return -EADDRNOTAVAIL; 1079 1080 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1081 send_sig(SIGPIPE, current, 0); 1082 return -EPIPE; 1083 } 1084 1085 if (rose->neighbour == NULL || rose->device == NULL) 1086 return -ENETUNREACH; 1087 1088 if (usrose != NULL) { 1089 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) 1090 return -EINVAL; 1091 memset(&srose, 0, sizeof(struct full_sockaddr_rose)); 1092 memcpy(&srose, usrose, msg->msg_namelen); 1093 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || 1094 ax25cmp(&rose->dest_call, &srose.srose_call) != 0) 1095 return -EISCONN; 1096 if (srose.srose_ndigis != rose->dest_ndigis) 1097 return -EISCONN; 1098 if (srose.srose_ndigis == rose->dest_ndigis) { 1099 for (n = 0 ; n < srose.srose_ndigis ; n++) 1100 if (ax25cmp(&rose->dest_digis[n], 1101 &srose.srose_digis[n])) 1102 return -EISCONN; 1103 } 1104 if (srose.srose_family != AF_ROSE) 1105 return -EINVAL; 1106 } else { 1107 if (sk->sk_state != TCP_ESTABLISHED) 1108 return -ENOTCONN; 1109 1110 srose.srose_family = AF_ROSE; 1111 srose.srose_addr = rose->dest_addr; 1112 srose.srose_call = rose->dest_call; 1113 srose.srose_ndigis = rose->dest_ndigis; 1114 for (n = 0 ; n < rose->dest_ndigis ; n++) 1115 srose.srose_digis[n] = rose->dest_digis[n]; 1116 } 1117 1118 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); 1119 1120 /* Build a packet */ 1121 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); 1122 /* Sanity check the packet size */ 1123 if (len > 65535) 1124 return -EMSGSIZE; 1125 1126 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; 1127 1128 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1129 return err; 1130 1131 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); 1132 1133 /* 1134 * Put the data on the end 1135 */ 1136 SOCK_DEBUG(sk, "ROSE: Appending user data\n"); 1137 1138 skb_reset_transport_header(skb); 1139 skb_put(skb, len); 1140 1141 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); 1142 if (err) { 1143 kfree_skb(skb); 1144 return err; 1145 } 1146 1147 /* 1148 * If the Q BIT Include socket option is in force, the first 1149 * byte of the user data is the logical value of the Q Bit. 1150 */ 1151 if (rose->qbitincl) { 1152 qbit = skb->data[0]; 1153 skb_pull(skb, 1); 1154 } 1155 1156 /* 1157 * Push down the ROSE header 1158 */ 1159 asmptr = skb_push(skb, ROSE_MIN_LEN); 1160 1161 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); 1162 1163 /* Build a ROSE Network header */ 1164 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1165 asmptr[1] = (rose->lci >> 0) & 0xFF; 1166 asmptr[2] = ROSE_DATA; 1167 1168 if (qbit) 1169 asmptr[0] |= ROSE_Q_BIT; 1170 1171 SOCK_DEBUG(sk, "ROSE: Built header.\n"); 1172 1173 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); 1174 1175 if (sk->sk_state != TCP_ESTABLISHED) { 1176 kfree_skb(skb); 1177 return -ENOTCONN; 1178 } 1179 1180 #ifdef M_BIT 1181 #define ROSE_PACLEN (256-ROSE_MIN_LEN) 1182 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { 1183 unsigned char header[ROSE_MIN_LEN]; 1184 struct sk_buff *skbn; 1185 int frontlen; 1186 int lg; 1187 1188 /* Save a copy of the Header */ 1189 skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); 1190 skb_pull(skb, ROSE_MIN_LEN); 1191 1192 frontlen = skb_headroom(skb); 1193 1194 while (skb->len > 0) { 1195 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { 1196 kfree_skb(skb); 1197 return err; 1198 } 1199 1200 skbn->sk = sk; 1201 skbn->free = 1; 1202 skbn->arp = 1; 1203 1204 skb_reserve(skbn, frontlen); 1205 1206 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; 1207 1208 /* Copy the user data */ 1209 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); 1210 skb_pull(skb, lg); 1211 1212 /* Duplicate the Header */ 1213 skb_push(skbn, ROSE_MIN_LEN); 1214 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); 1215 1216 if (skb->len > 0) 1217 skbn->data[2] |= M_BIT; 1218 1219 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ 1220 } 1221 1222 skb->free = 1; 1223 kfree_skb(skb); 1224 } else { 1225 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ 1226 } 1227 #else 1228 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ 1229 #endif 1230 1231 rose_kick(sk); 1232 1233 return len; 1234 } 1235 1236 1237 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, 1238 struct msghdr *msg, size_t size, int flags) 1239 { 1240 struct sock *sk = sock->sk; 1241 struct rose_sock *rose = rose_sk(sk); 1242 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; 1243 size_t copied; 1244 unsigned char *asmptr; 1245 struct sk_buff *skb; 1246 int n, er, qbit; 1247 1248 /* 1249 * This works for seqpacket too. The receiver has ordered the queue for 1250 * us! We do one quick check first though 1251 */ 1252 if (sk->sk_state != TCP_ESTABLISHED) 1253 return -ENOTCONN; 1254 1255 /* Now we can treat all alike */ 1256 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) 1257 return er; 1258 1259 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; 1260 1261 skb_pull(skb, ROSE_MIN_LEN); 1262 1263 if (rose->qbitincl) { 1264 asmptr = skb_push(skb, 1); 1265 *asmptr = qbit; 1266 } 1267 1268 skb_reset_transport_header(skb); 1269 copied = skb->len; 1270 1271 if (copied > size) { 1272 copied = size; 1273 msg->msg_flags |= MSG_TRUNC; 1274 } 1275 1276 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1277 1278 if (srose != NULL) { 1279 srose->srose_family = AF_ROSE; 1280 srose->srose_addr = rose->dest_addr; 1281 srose->srose_call = rose->dest_call; 1282 srose->srose_ndigis = rose->dest_ndigis; 1283 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { 1284 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; 1285 for (n = 0 ; n < rose->dest_ndigis ; n++) 1286 full_srose->srose_digis[n] = rose->dest_digis[n]; 1287 msg->msg_namelen = sizeof(struct full_sockaddr_rose); 1288 } else { 1289 if (rose->dest_ndigis >= 1) { 1290 srose->srose_ndigis = 1; 1291 srose->srose_digi = rose->dest_digis[0]; 1292 } 1293 msg->msg_namelen = sizeof(struct sockaddr_rose); 1294 } 1295 } 1296 1297 skb_free_datagram(sk, skb); 1298 1299 return copied; 1300 } 1301 1302 1303 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1304 { 1305 struct sock *sk = sock->sk; 1306 struct rose_sock *rose = rose_sk(sk); 1307 void __user *argp = (void __user *)arg; 1308 1309 switch (cmd) { 1310 case TIOCOUTQ: { 1311 long amount; 1312 1313 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1314 if (amount < 0) 1315 amount = 0; 1316 return put_user(amount, (unsigned int __user *) argp); 1317 } 1318 1319 case TIOCINQ: { 1320 struct sk_buff *skb; 1321 long amount = 0L; 1322 /* These two are safe on a single CPU system as only user tasks fiddle here */ 1323 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1324 amount = skb->len; 1325 return put_user(amount, (unsigned int __user *) argp); 1326 } 1327 1328 case SIOCGSTAMP: 1329 return sock_get_timestamp(sk, (struct timeval __user *) argp); 1330 1331 case SIOCGSTAMPNS: 1332 return sock_get_timestampns(sk, (struct timespec __user *) argp); 1333 1334 case SIOCGIFADDR: 1335 case SIOCSIFADDR: 1336 case SIOCGIFDSTADDR: 1337 case SIOCSIFDSTADDR: 1338 case SIOCGIFBRDADDR: 1339 case SIOCSIFBRDADDR: 1340 case SIOCGIFNETMASK: 1341 case SIOCSIFNETMASK: 1342 case SIOCGIFMETRIC: 1343 case SIOCSIFMETRIC: 1344 return -EINVAL; 1345 1346 case SIOCADDRT: 1347 case SIOCDELRT: 1348 case SIOCRSCLRRT: 1349 if (!capable(CAP_NET_ADMIN)) 1350 return -EPERM; 1351 return rose_rt_ioctl(cmd, argp); 1352 1353 case SIOCRSGCAUSE: { 1354 struct rose_cause_struct rose_cause; 1355 rose_cause.cause = rose->cause; 1356 rose_cause.diagnostic = rose->diagnostic; 1357 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; 1358 } 1359 1360 case SIOCRSSCAUSE: { 1361 struct rose_cause_struct rose_cause; 1362 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) 1363 return -EFAULT; 1364 rose->cause = rose_cause.cause; 1365 rose->diagnostic = rose_cause.diagnostic; 1366 return 0; 1367 } 1368 1369 case SIOCRSSL2CALL: 1370 if (!capable(CAP_NET_ADMIN)) return -EPERM; 1371 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1372 ax25_listen_release(&rose_callsign, NULL); 1373 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) 1374 return -EFAULT; 1375 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1376 return ax25_listen_register(&rose_callsign, NULL); 1377 1378 return 0; 1379 1380 case SIOCRSGL2CALL: 1381 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; 1382 1383 case SIOCRSACCEPT: 1384 if (rose->state == ROSE_STATE_5) { 1385 rose_write_internal(sk, ROSE_CALL_ACCEPTED); 1386 rose_start_idletimer(sk); 1387 rose->condition = 0x00; 1388 rose->vs = 0; 1389 rose->va = 0; 1390 rose->vr = 0; 1391 rose->vl = 0; 1392 rose->state = ROSE_STATE_3; 1393 } 1394 return 0; 1395 1396 default: 1397 return -ENOIOCTLCMD; 1398 } 1399 1400 return 0; 1401 } 1402 1403 #ifdef CONFIG_PROC_FS 1404 static void *rose_info_start(struct seq_file *seq, loff_t *pos) 1405 __acquires(rose_list_lock) 1406 { 1407 int i; 1408 struct sock *s; 1409 struct hlist_node *node; 1410 1411 spin_lock_bh(&rose_list_lock); 1412 if (*pos == 0) 1413 return SEQ_START_TOKEN; 1414 1415 i = 1; 1416 sk_for_each(s, node, &rose_list) { 1417 if (i == *pos) 1418 return s; 1419 ++i; 1420 } 1421 return NULL; 1422 } 1423 1424 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) 1425 { 1426 ++*pos; 1427 1428 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list) 1429 : sk_next((struct sock *)v); 1430 } 1431 1432 static void rose_info_stop(struct seq_file *seq, void *v) 1433 __releases(rose_list_lock) 1434 { 1435 spin_unlock_bh(&rose_list_lock); 1436 } 1437 1438 static int rose_info_show(struct seq_file *seq, void *v) 1439 { 1440 char buf[11]; 1441 1442 if (v == SEQ_START_TOKEN) 1443 seq_puts(seq, 1444 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1445 1446 else { 1447 struct sock *s = v; 1448 struct rose_sock *rose = rose_sk(s); 1449 const char *devname, *callsign; 1450 const struct net_device *dev = rose->device; 1451 1452 if (!dev) 1453 devname = "???"; 1454 else 1455 devname = dev->name; 1456 1457 seq_printf(seq, "%-10s %-9s ", 1458 rose2asc(&rose->dest_addr), 1459 ax2asc(buf, &rose->dest_call)); 1460 1461 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) 1462 callsign = "??????-?"; 1463 else 1464 callsign = ax2asc(buf, &rose->source_call); 1465 1466 seq_printf(seq, 1467 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", 1468 rose2asc(&rose->source_addr), 1469 callsign, 1470 devname, 1471 rose->lci & 0x0FFF, 1472 (rose->neighbour) ? rose->neighbour->number : 0, 1473 rose->state, 1474 rose->vs, 1475 rose->vr, 1476 rose->va, 1477 ax25_display_timer(&rose->timer) / HZ, 1478 rose->t1 / HZ, 1479 rose->t2 / HZ, 1480 rose->t3 / HZ, 1481 rose->hb / HZ, 1482 ax25_display_timer(&rose->idletimer) / (60 * HZ), 1483 rose->idle / (60 * HZ), 1484 sk_wmem_alloc_get(s), 1485 sk_rmem_alloc_get(s), 1486 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); 1487 } 1488 1489 return 0; 1490 } 1491 1492 static const struct seq_operations rose_info_seqops = { 1493 .start = rose_info_start, 1494 .next = rose_info_next, 1495 .stop = rose_info_stop, 1496 .show = rose_info_show, 1497 }; 1498 1499 static int rose_info_open(struct inode *inode, struct file *file) 1500 { 1501 return seq_open(file, &rose_info_seqops); 1502 } 1503 1504 static const struct file_operations rose_info_fops = { 1505 .owner = THIS_MODULE, 1506 .open = rose_info_open, 1507 .read = seq_read, 1508 .llseek = seq_lseek, 1509 .release = seq_release, 1510 }; 1511 #endif /* CONFIG_PROC_FS */ 1512 1513 static struct net_proto_family rose_family_ops = { 1514 .family = PF_ROSE, 1515 .create = rose_create, 1516 .owner = THIS_MODULE, 1517 }; 1518 1519 static struct proto_ops rose_proto_ops = { 1520 .family = PF_ROSE, 1521 .owner = THIS_MODULE, 1522 .release = rose_release, 1523 .bind = rose_bind, 1524 .connect = rose_connect, 1525 .socketpair = sock_no_socketpair, 1526 .accept = rose_accept, 1527 .getname = rose_getname, 1528 .poll = datagram_poll, 1529 .ioctl = rose_ioctl, 1530 .listen = rose_listen, 1531 .shutdown = sock_no_shutdown, 1532 .setsockopt = rose_setsockopt, 1533 .getsockopt = rose_getsockopt, 1534 .sendmsg = rose_sendmsg, 1535 .recvmsg = rose_recvmsg, 1536 .mmap = sock_no_mmap, 1537 .sendpage = sock_no_sendpage, 1538 }; 1539 1540 static struct notifier_block rose_dev_notifier = { 1541 .notifier_call = rose_device_event, 1542 }; 1543 1544 static struct net_device **dev_rose; 1545 1546 static struct ax25_protocol rose_pid = { 1547 .pid = AX25_P_ROSE, 1548 .func = rose_route_frame 1549 }; 1550 1551 static struct ax25_linkfail rose_linkfail_notifier = { 1552 .func = rose_link_failed 1553 }; 1554 1555 static int __init rose_proto_init(void) 1556 { 1557 int i; 1558 int rc; 1559 1560 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { 1561 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); 1562 rc = -EINVAL; 1563 goto out; 1564 } 1565 1566 rc = proto_register(&rose_proto, 0); 1567 if (rc != 0) 1568 goto out; 1569 1570 rose_callsign = null_ax25_address; 1571 1572 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1573 if (dev_rose == NULL) { 1574 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1575 rc = -ENOMEM; 1576 goto out_proto_unregister; 1577 } 1578 1579 for (i = 0; i < rose_ndevs; i++) { 1580 struct net_device *dev; 1581 char name[IFNAMSIZ]; 1582 1583 sprintf(name, "rose%d", i); 1584 dev = alloc_netdev(0, name, rose_setup); 1585 if (!dev) { 1586 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); 1587 rc = -ENOMEM; 1588 goto fail; 1589 } 1590 rc = register_netdev(dev); 1591 if (rc) { 1592 printk(KERN_ERR "ROSE: netdevice registration failed\n"); 1593 free_netdev(dev); 1594 goto fail; 1595 } 1596 rose_set_lockdep_key(dev); 1597 dev_rose[i] = dev; 1598 } 1599 1600 sock_register(&rose_family_ops); 1601 register_netdevice_notifier(&rose_dev_notifier); 1602 1603 ax25_register_pid(&rose_pid); 1604 ax25_linkfail_register(&rose_linkfail_notifier); 1605 1606 #ifdef CONFIG_SYSCTL 1607 rose_register_sysctl(); 1608 #endif 1609 rose_loopback_init(); 1610 1611 rose_add_loopback_neigh(); 1612 1613 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); 1614 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); 1615 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); 1616 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); 1617 out: 1618 return rc; 1619 fail: 1620 while (--i >= 0) { 1621 unregister_netdev(dev_rose[i]); 1622 free_netdev(dev_rose[i]); 1623 } 1624 kfree(dev_rose); 1625 out_proto_unregister: 1626 proto_unregister(&rose_proto); 1627 goto out; 1628 } 1629 module_init(rose_proto_init); 1630 1631 module_param(rose_ndevs, int, 0); 1632 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); 1633 1634 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); 1635 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); 1636 MODULE_LICENSE("GPL"); 1637 MODULE_ALIAS_NETPROTO(PF_ROSE); 1638 1639 static void __exit rose_exit(void) 1640 { 1641 int i; 1642 1643 proc_net_remove(&init_net, "rose"); 1644 proc_net_remove(&init_net, "rose_neigh"); 1645 proc_net_remove(&init_net, "rose_nodes"); 1646 proc_net_remove(&init_net, "rose_routes"); 1647 rose_loopback_clear(); 1648 1649 rose_rt_free(); 1650 1651 ax25_protocol_release(AX25_P_ROSE); 1652 ax25_linkfail_release(&rose_linkfail_notifier); 1653 1654 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) 1655 ax25_listen_release(&rose_callsign, NULL); 1656 1657 #ifdef CONFIG_SYSCTL 1658 rose_unregister_sysctl(); 1659 #endif 1660 unregister_netdevice_notifier(&rose_dev_notifier); 1661 1662 sock_unregister(PF_ROSE); 1663 1664 for (i = 0; i < rose_ndevs; i++) { 1665 struct net_device *dev = dev_rose[i]; 1666 1667 if (dev) { 1668 unregister_netdev(dev); 1669 free_netdev(dev); 1670 } 1671 } 1672 1673 kfree(dev_rose); 1674 proto_unregister(&rose_proto); 1675 } 1676 1677 module_exit(rose_exit); 1678