1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * 4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) 5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) 6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) 7 */ 8 #include <linux/errno.h> 9 #include <linux/types.h> 10 #include <linux/socket.h> 11 #include <linux/in.h> 12 #include <linux/kernel.h> 13 #include <linux/timer.h> 14 #include <linux/string.h> 15 #include <linux/sockios.h> 16 #include <linux/net.h> 17 #include <linux/slab.h> 18 #include <net/ax25.h> 19 #include <linux/inet.h> 20 #include <linux/netdevice.h> 21 #include <net/arp.h> 22 #include <linux/if_arp.h> 23 #include <linux/skbuff.h> 24 #include <net/sock.h> 25 #include <linux/uaccess.h> 26 #include <linux/fcntl.h> 27 #include <linux/termios.h> /* For TIOCINQ/OUTQ */ 28 #include <linux/mm.h> 29 #include <linux/interrupt.h> 30 #include <linux/notifier.h> 31 #include <linux/init.h> 32 #include <linux/spinlock.h> 33 #include <net/netrom.h> 34 #include <linux/seq_file.h> 35 #include <linux/export.h> 36 37 static unsigned int nr_neigh_no = 1; 38 39 static HLIST_HEAD(nr_node_list); 40 static DEFINE_SPINLOCK(nr_node_list_lock); 41 static HLIST_HEAD(nr_neigh_list); 42 static DEFINE_SPINLOCK(nr_neigh_list_lock); 43 44 static struct nr_node *nr_node_get(ax25_address *callsign) 45 { 46 struct nr_node *found = NULL; 47 struct nr_node *nr_node; 48 49 spin_lock_bh(&nr_node_list_lock); 50 nr_node_for_each(nr_node, &nr_node_list) 51 if (ax25cmp(callsign, &nr_node->callsign) == 0) { 52 nr_node_hold(nr_node); 53 found = nr_node; 54 break; 55 } 56 spin_unlock_bh(&nr_node_list_lock); 57 return found; 58 } 59 60 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, 61 struct net_device *dev) 62 { 63 struct nr_neigh *found = NULL; 64 struct nr_neigh *nr_neigh; 65 66 spin_lock_bh(&nr_neigh_list_lock); 67 nr_neigh_for_each(nr_neigh, &nr_neigh_list) 68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && 69 nr_neigh->dev == dev) { 70 nr_neigh_hold(nr_neigh); 71 found = nr_neigh; 72 break; 73 } 74 spin_unlock_bh(&nr_neigh_list_lock); 75 return found; 76 } 77 78 static void nr_remove_neigh(struct nr_neigh *); 79 80 /* re-sort the routes in quality order. */ 81 static void re_sort_routes(struct nr_node *nr_node, int x, int y) 82 { 83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) { 84 if (nr_node->which == x) 85 nr_node->which = y; 86 else if (nr_node->which == y) 87 nr_node->which = x; 88 89 swap(nr_node->routes[x], nr_node->routes[y]); 90 } 91 } 92 93 /* 94 * Add a new route to a node, and in the process add the node and the 95 * neighbour if it is new. 96 */ 97 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, 98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev, 99 int quality, int obs_count) 100 { 101 struct nr_node *nr_node; 102 struct nr_neigh *nr_neigh; 103 int i, found; 104 struct net_device *odev; 105 106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */ 107 dev_put(odev); 108 return -EINVAL; 109 } 110 111 nr_node = nr_node_get(nr); 112 113 nr_neigh = nr_neigh_get_dev(ax25, dev); 114 115 /* 116 * The L2 link to a neighbour has failed in the past 117 * and now a frame comes from this neighbour. We assume 118 * it was a temporary trouble with the link and reset the 119 * routes now (and not wait for a node broadcast). 120 */ 121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { 122 struct nr_node *nr_nodet; 123 124 spin_lock_bh(&nr_node_list_lock); 125 nr_node_for_each(nr_nodet, &nr_node_list) { 126 nr_node_lock(nr_nodet); 127 for (i = 0; i < nr_nodet->count; i++) 128 if (nr_nodet->routes[i].neighbour == nr_neigh) 129 if (i < nr_nodet->which) 130 nr_nodet->which = i; 131 nr_node_unlock(nr_nodet); 132 } 133 spin_unlock_bh(&nr_node_list_lock); 134 } 135 136 if (nr_neigh != NULL) 137 nr_neigh->failed = 0; 138 139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) { 140 nr_neigh_put(nr_neigh); 141 nr_node_put(nr_node); 142 return 0; 143 } 144 145 if (nr_neigh == NULL) { 146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) { 147 if (nr_node) 148 nr_node_put(nr_node); 149 return -ENOMEM; 150 } 151 152 nr_neigh->callsign = *ax25; 153 nr_neigh->digipeat = NULL; 154 nr_neigh->ax25 = NULL; 155 nr_neigh->dev = dev; 156 nr_neigh->quality = sysctl_netrom_default_path_quality; 157 nr_neigh->locked = 0; 158 nr_neigh->count = 0; 159 nr_neigh->number = nr_neigh_no++; 160 nr_neigh->failed = 0; 161 refcount_set(&nr_neigh->refcount, 1); 162 163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 164 nr_neigh->digipeat = kmemdup(ax25_digi, 165 sizeof(*ax25_digi), 166 GFP_KERNEL); 167 if (nr_neigh->digipeat == NULL) { 168 kfree(nr_neigh); 169 if (nr_node) 170 nr_node_put(nr_node); 171 return -ENOMEM; 172 } 173 } 174 175 spin_lock_bh(&nr_neigh_list_lock); 176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 177 nr_neigh_hold(nr_neigh); 178 spin_unlock_bh(&nr_neigh_list_lock); 179 } 180 181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked) 182 nr_neigh->quality = quality; 183 184 if (nr_node == NULL) { 185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) { 186 if (nr_neigh) 187 nr_neigh_put(nr_neigh); 188 return -ENOMEM; 189 } 190 191 nr_node->callsign = *nr; 192 strcpy(nr_node->mnemonic, mnemonic); 193 194 nr_node->which = 0; 195 nr_node->count = 1; 196 refcount_set(&nr_node->refcount, 1); 197 spin_lock_init(&nr_node->node_lock); 198 199 nr_node->routes[0].quality = quality; 200 nr_node->routes[0].obs_count = obs_count; 201 nr_node->routes[0].neighbour = nr_neigh; 202 203 nr_neigh_hold(nr_neigh); 204 nr_neigh->count++; 205 206 spin_lock_bh(&nr_node_list_lock); 207 hlist_add_head(&nr_node->node_node, &nr_node_list); 208 /* refcount initialized at 1 */ 209 spin_unlock_bh(&nr_node_list_lock); 210 211 nr_neigh_put(nr_neigh); 212 return 0; 213 } 214 nr_node_lock(nr_node); 215 216 if (quality != 0) 217 strcpy(nr_node->mnemonic, mnemonic); 218 219 for (found = 0, i = 0; i < nr_node->count; i++) { 220 if (nr_node->routes[i].neighbour == nr_neigh) { 221 nr_node->routes[i].quality = quality; 222 nr_node->routes[i].obs_count = obs_count; 223 found = 1; 224 break; 225 } 226 } 227 228 if (!found) { 229 /* We have space at the bottom, slot it in */ 230 if (nr_node->count < 3) { 231 nr_node->routes[2] = nr_node->routes[1]; 232 nr_node->routes[1] = nr_node->routes[0]; 233 234 nr_node->routes[0].quality = quality; 235 nr_node->routes[0].obs_count = obs_count; 236 nr_node->routes[0].neighbour = nr_neigh; 237 238 nr_node->which++; 239 nr_node->count++; 240 nr_neigh_hold(nr_neigh); 241 nr_neigh->count++; 242 } else { 243 /* It must be better than the worst */ 244 if (quality > nr_node->routes[2].quality) { 245 nr_node->routes[2].neighbour->count--; 246 nr_neigh_put(nr_node->routes[2].neighbour); 247 248 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked) 249 nr_remove_neigh(nr_node->routes[2].neighbour); 250 251 nr_node->routes[2].quality = quality; 252 nr_node->routes[2].obs_count = obs_count; 253 nr_node->routes[2].neighbour = nr_neigh; 254 255 nr_neigh_hold(nr_neigh); 256 nr_neigh->count++; 257 } 258 } 259 } 260 261 /* Now re-sort the routes in quality order */ 262 switch (nr_node->count) { 263 case 3: 264 re_sort_routes(nr_node, 0, 1); 265 re_sort_routes(nr_node, 1, 2); 266 fallthrough; 267 case 2: 268 re_sort_routes(nr_node, 0, 1); 269 break; 270 case 1: 271 break; 272 } 273 274 for (i = 0; i < nr_node->count; i++) { 275 if (nr_node->routes[i].neighbour == nr_neigh) { 276 if (i < nr_node->which) 277 nr_node->which = i; 278 break; 279 } 280 } 281 282 nr_neigh_put(nr_neigh); 283 nr_node_unlock(nr_node); 284 nr_node_put(nr_node); 285 return 0; 286 } 287 288 static inline void __nr_remove_node(struct nr_node *nr_node) 289 { 290 hlist_del_init(&nr_node->node_node); 291 nr_node_put(nr_node); 292 } 293 294 #define nr_remove_node_locked(__node) \ 295 __nr_remove_node(__node) 296 297 static void nr_remove_node(struct nr_node *nr_node) 298 { 299 spin_lock_bh(&nr_node_list_lock); 300 __nr_remove_node(nr_node); 301 spin_unlock_bh(&nr_node_list_lock); 302 } 303 304 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh) 305 { 306 hlist_del_init(&nr_neigh->neigh_node); 307 nr_neigh_put(nr_neigh); 308 } 309 310 #define nr_remove_neigh_locked(__neigh) \ 311 __nr_remove_neigh(__neigh) 312 313 static void nr_remove_neigh(struct nr_neigh *nr_neigh) 314 { 315 spin_lock_bh(&nr_neigh_list_lock); 316 __nr_remove_neigh(nr_neigh); 317 spin_unlock_bh(&nr_neigh_list_lock); 318 } 319 320 /* 321 * "Delete" a node. Strictly speaking remove a route to a node. The node 322 * is only deleted if no routes are left to it. 323 */ 324 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev) 325 { 326 struct nr_node *nr_node; 327 struct nr_neigh *nr_neigh; 328 int i; 329 330 nr_node = nr_node_get(callsign); 331 332 if (nr_node == NULL) 333 return -EINVAL; 334 335 nr_neigh = nr_neigh_get_dev(neighbour, dev); 336 337 if (nr_neigh == NULL) { 338 nr_node_put(nr_node); 339 return -EINVAL; 340 } 341 342 nr_node_lock(nr_node); 343 for (i = 0; i < nr_node->count; i++) { 344 if (nr_node->routes[i].neighbour == nr_neigh) { 345 nr_neigh->count--; 346 nr_neigh_put(nr_neigh); 347 348 if (nr_neigh->count == 0 && !nr_neigh->locked) 349 nr_remove_neigh(nr_neigh); 350 nr_neigh_put(nr_neigh); 351 352 nr_node->count--; 353 354 if (nr_node->count == 0) { 355 nr_remove_node(nr_node); 356 } else { 357 switch (i) { 358 case 0: 359 nr_node->routes[0] = nr_node->routes[1]; 360 fallthrough; 361 case 1: 362 nr_node->routes[1] = nr_node->routes[2]; 363 fallthrough; 364 case 2: 365 break; 366 } 367 nr_node_put(nr_node); 368 } 369 nr_node_unlock(nr_node); 370 371 return 0; 372 } 373 } 374 nr_neigh_put(nr_neigh); 375 nr_node_unlock(nr_node); 376 nr_node_put(nr_node); 377 378 return -EINVAL; 379 } 380 381 /* 382 * Lock a neighbour with a quality. 383 */ 384 static int __must_check nr_add_neigh(ax25_address *callsign, 385 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) 386 { 387 struct nr_neigh *nr_neigh; 388 389 nr_neigh = nr_neigh_get_dev(callsign, dev); 390 if (nr_neigh) { 391 nr_neigh->quality = quality; 392 nr_neigh->locked = 1; 393 nr_neigh_put(nr_neigh); 394 return 0; 395 } 396 397 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) 398 return -ENOMEM; 399 400 nr_neigh->callsign = *callsign; 401 nr_neigh->digipeat = NULL; 402 nr_neigh->ax25 = NULL; 403 nr_neigh->dev = dev; 404 nr_neigh->quality = quality; 405 nr_neigh->locked = 1; 406 nr_neigh->count = 0; 407 nr_neigh->number = nr_neigh_no++; 408 nr_neigh->failed = 0; 409 refcount_set(&nr_neigh->refcount, 1); 410 411 if (ax25_digi != NULL && ax25_digi->ndigi > 0) { 412 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), 413 GFP_KERNEL); 414 if (nr_neigh->digipeat == NULL) { 415 kfree(nr_neigh); 416 return -ENOMEM; 417 } 418 } 419 420 spin_lock_bh(&nr_neigh_list_lock); 421 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); 422 /* refcount is initialized at 1 */ 423 spin_unlock_bh(&nr_neigh_list_lock); 424 425 return 0; 426 } 427 428 /* 429 * "Delete" a neighbour. The neighbour is only removed if the number 430 * of nodes that may use it is zero. 431 */ 432 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality) 433 { 434 struct nr_neigh *nr_neigh; 435 436 nr_neigh = nr_neigh_get_dev(callsign, dev); 437 438 if (nr_neigh == NULL) return -EINVAL; 439 440 nr_neigh->quality = quality; 441 nr_neigh->locked = 0; 442 443 if (nr_neigh->count == 0) 444 nr_remove_neigh(nr_neigh); 445 nr_neigh_put(nr_neigh); 446 447 return 0; 448 } 449 450 /* 451 * Decrement the obsolescence count by one. If a route is reduced to a 452 * count of zero, remove it. Also remove any unlocked neighbours with 453 * zero nodes routing via it. 454 */ 455 static int nr_dec_obs(void) 456 { 457 struct nr_neigh *nr_neigh; 458 struct nr_node *s; 459 struct hlist_node *nodet; 460 int i; 461 462 spin_lock_bh(&nr_node_list_lock); 463 nr_node_for_each_safe(s, nodet, &nr_node_list) { 464 nr_node_lock(s); 465 for (i = 0; i < s->count; i++) { 466 switch (s->routes[i].obs_count) { 467 case 0: /* A locked entry */ 468 break; 469 470 case 1: /* From 1 -> 0 */ 471 nr_neigh = s->routes[i].neighbour; 472 473 nr_neigh->count--; 474 nr_neigh_put(nr_neigh); 475 476 if (nr_neigh->count == 0 && !nr_neigh->locked) 477 nr_remove_neigh(nr_neigh); 478 479 s->count--; 480 481 switch (i) { 482 case 0: 483 s->routes[0] = s->routes[1]; 484 fallthrough; 485 case 1: 486 s->routes[1] = s->routes[2]; 487 break; 488 case 2: 489 break; 490 } 491 break; 492 493 default: 494 s->routes[i].obs_count--; 495 break; 496 497 } 498 } 499 500 if (s->count <= 0) 501 nr_remove_node_locked(s); 502 nr_node_unlock(s); 503 } 504 spin_unlock_bh(&nr_node_list_lock); 505 506 return 0; 507 } 508 509 /* 510 * A device has been removed. Remove its routes and neighbours. 511 */ 512 void nr_rt_device_down(struct net_device *dev) 513 { 514 struct nr_neigh *s; 515 struct hlist_node *nodet, *node2t; 516 struct nr_node *t; 517 int i; 518 519 spin_lock_bh(&nr_neigh_list_lock); 520 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { 521 if (s->dev == dev) { 522 spin_lock_bh(&nr_node_list_lock); 523 nr_node_for_each_safe(t, node2t, &nr_node_list) { 524 nr_node_lock(t); 525 for (i = 0; i < t->count; i++) { 526 if (t->routes[i].neighbour == s) { 527 t->count--; 528 529 switch (i) { 530 case 0: 531 t->routes[0] = t->routes[1]; 532 fallthrough; 533 case 1: 534 t->routes[1] = t->routes[2]; 535 break; 536 case 2: 537 break; 538 } 539 } 540 } 541 542 if (t->count <= 0) 543 nr_remove_node_locked(t); 544 nr_node_unlock(t); 545 } 546 spin_unlock_bh(&nr_node_list_lock); 547 548 nr_remove_neigh_locked(s); 549 } 550 } 551 spin_unlock_bh(&nr_neigh_list_lock); 552 } 553 554 /* 555 * Check that the device given is a valid AX.25 interface that is "up". 556 * Or a valid ethernet interface with an AX.25 callsign binding. 557 */ 558 static struct net_device *nr_ax25_dev_get(char *devname) 559 { 560 struct net_device *dev; 561 562 if ((dev = dev_get_by_name(&init_net, devname)) == NULL) 563 return NULL; 564 565 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) 566 return dev; 567 568 dev_put(dev); 569 return NULL; 570 } 571 572 /* 573 * Find the first active NET/ROM device, usually "nr0". 574 */ 575 struct net_device *nr_dev_first(void) 576 { 577 struct net_device *dev, *first = NULL; 578 579 rcu_read_lock(); 580 for_each_netdev_rcu(&init_net, dev) { 581 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) 582 if (first == NULL || strncmp(dev->name, first->name, 3) < 0) 583 first = dev; 584 } 585 if (first) 586 dev_hold(first); 587 rcu_read_unlock(); 588 589 return first; 590 } 591 592 /* 593 * Find the NET/ROM device for the given callsign. 594 */ 595 struct net_device *nr_dev_get(ax25_address *addr) 596 { 597 struct net_device *dev; 598 599 rcu_read_lock(); 600 for_each_netdev_rcu(&init_net, dev) { 601 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && 602 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { 603 dev_hold(dev); 604 goto out; 605 } 606 } 607 dev = NULL; 608 out: 609 rcu_read_unlock(); 610 return dev; 611 } 612 613 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis, 614 ax25_address *digipeaters) 615 { 616 int i; 617 618 if (ndigis == 0) 619 return NULL; 620 621 for (i = 0; i < ndigis; i++) { 622 digi->calls[i] = digipeaters[i]; 623 digi->repeated[i] = 0; 624 } 625 626 digi->ndigi = ndigis; 627 digi->lastrepeat = -1; 628 629 return digi; 630 } 631 632 /* 633 * Handle the ioctls that control the routing functions. 634 */ 635 int nr_rt_ioctl(unsigned int cmd, void __user *arg) 636 { 637 struct nr_route_struct nr_route; 638 struct net_device *dev; 639 ax25_digi digi; 640 int ret; 641 642 switch (cmd) { 643 case SIOCADDRT: 644 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 645 return -EFAULT; 646 if (nr_route.ndigis > AX25_MAX_DIGIS) 647 return -EINVAL; 648 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 649 return -EINVAL; 650 switch (nr_route.type) { 651 case NETROM_NODE: 652 if (strnlen(nr_route.mnemonic, 7) == 7) { 653 ret = -EINVAL; 654 break; 655 } 656 657 ret = nr_add_node(&nr_route.callsign, 658 nr_route.mnemonic, 659 &nr_route.neighbour, 660 nr_call_to_digi(&digi, nr_route.ndigis, 661 nr_route.digipeaters), 662 dev, nr_route.quality, 663 nr_route.obs_count); 664 break; 665 case NETROM_NEIGH: 666 ret = nr_add_neigh(&nr_route.callsign, 667 nr_call_to_digi(&digi, nr_route.ndigis, 668 nr_route.digipeaters), 669 dev, nr_route.quality); 670 break; 671 default: 672 ret = -EINVAL; 673 } 674 dev_put(dev); 675 return ret; 676 677 case SIOCDELRT: 678 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) 679 return -EFAULT; 680 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) 681 return -EINVAL; 682 switch (nr_route.type) { 683 case NETROM_NODE: 684 ret = nr_del_node(&nr_route.callsign, 685 &nr_route.neighbour, dev); 686 break; 687 case NETROM_NEIGH: 688 ret = nr_del_neigh(&nr_route.callsign, 689 dev, nr_route.quality); 690 break; 691 default: 692 ret = -EINVAL; 693 } 694 dev_put(dev); 695 return ret; 696 697 case SIOCNRDECOBS: 698 return nr_dec_obs(); 699 700 default: 701 return -EINVAL; 702 } 703 704 return 0; 705 } 706 707 /* 708 * A level 2 link has timed out, therefore it appears to be a poor link, 709 * then don't use that neighbour until it is reset. 710 */ 711 void nr_link_failed(ax25_cb *ax25, int reason) 712 { 713 struct nr_neigh *s, *nr_neigh = NULL; 714 struct nr_node *nr_node = NULL; 715 716 spin_lock_bh(&nr_neigh_list_lock); 717 nr_neigh_for_each(s, &nr_neigh_list) { 718 if (s->ax25 == ax25) { 719 nr_neigh_hold(s); 720 nr_neigh = s; 721 break; 722 } 723 } 724 spin_unlock_bh(&nr_neigh_list_lock); 725 726 if (nr_neigh == NULL) 727 return; 728 729 nr_neigh->ax25 = NULL; 730 ax25_cb_put(ax25); 731 732 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) { 733 nr_neigh_put(nr_neigh); 734 return; 735 } 736 spin_lock_bh(&nr_node_list_lock); 737 nr_node_for_each(nr_node, &nr_node_list) { 738 nr_node_lock(nr_node); 739 if (nr_node->which < nr_node->count && 740 nr_node->routes[nr_node->which].neighbour == nr_neigh) 741 nr_node->which++; 742 nr_node_unlock(nr_node); 743 } 744 spin_unlock_bh(&nr_node_list_lock); 745 nr_neigh_put(nr_neigh); 746 } 747 748 /* 749 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb 750 * indicates an internally generated frame. 751 */ 752 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) 753 { 754 ax25_address *nr_src, *nr_dest; 755 struct nr_neigh *nr_neigh; 756 struct nr_node *nr_node; 757 struct net_device *dev; 758 unsigned char *dptr; 759 ax25_cb *ax25s; 760 int ret; 761 struct sk_buff *skbn; 762 763 764 nr_src = (ax25_address *)(skb->data + 0); 765 nr_dest = (ax25_address *)(skb->data + 7); 766 767 if (ax25 != NULL) { 768 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, 769 ax25->ax25_dev->dev, 0, 770 sysctl_netrom_obsolescence_count_initialiser); 771 if (ret) 772 return ret; 773 } 774 775 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ 776 if (ax25 == NULL) /* Its from me */ 777 ret = nr_loopback_queue(skb); 778 else 779 ret = nr_rx_frame(skb, dev); 780 dev_put(dev); 781 return ret; 782 } 783 784 if (!sysctl_netrom_routing_control && ax25 != NULL) 785 return 0; 786 787 /* Its Time-To-Live has expired */ 788 if (skb->data[14] == 1) { 789 return 0; 790 } 791 792 nr_node = nr_node_get(nr_dest); 793 if (nr_node == NULL) 794 return 0; 795 nr_node_lock(nr_node); 796 797 if (nr_node->which >= nr_node->count) { 798 nr_node_unlock(nr_node); 799 nr_node_put(nr_node); 800 return 0; 801 } 802 803 nr_neigh = nr_node->routes[nr_node->which].neighbour; 804 805 if ((dev = nr_dev_first()) == NULL) { 806 nr_node_unlock(nr_node); 807 nr_node_put(nr_node); 808 return 0; 809 } 810 811 /* We are going to change the netrom headers so we should get our 812 own skb, we also did not know until now how much header space 813 we had to reserve... - RXQ */ 814 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { 815 nr_node_unlock(nr_node); 816 nr_node_put(nr_node); 817 dev_put(dev); 818 return 0; 819 } 820 kfree_skb(skb); 821 skb=skbn; 822 skb->data[14]--; 823 824 dptr = skb_push(skb, 1); 825 *dptr = AX25_P_NETROM; 826 827 ax25s = nr_neigh->ax25; 828 nr_neigh->ax25 = ax25_send_frame(skb, 256, 829 (ax25_address *)dev->dev_addr, 830 &nr_neigh->callsign, 831 nr_neigh->digipeat, nr_neigh->dev); 832 if (ax25s) 833 ax25_cb_put(ax25s); 834 835 dev_put(dev); 836 ret = (nr_neigh->ax25 != NULL); 837 nr_node_unlock(nr_node); 838 nr_node_put(nr_node); 839 840 return ret; 841 } 842 843 #ifdef CONFIG_PROC_FS 844 845 static void *nr_node_start(struct seq_file *seq, loff_t *pos) 846 __acquires(&nr_node_list_lock) 847 { 848 spin_lock_bh(&nr_node_list_lock); 849 return seq_hlist_start_head(&nr_node_list, *pos); 850 } 851 852 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) 853 { 854 return seq_hlist_next(v, &nr_node_list, pos); 855 } 856 857 static void nr_node_stop(struct seq_file *seq, void *v) 858 __releases(&nr_node_list_lock) 859 { 860 spin_unlock_bh(&nr_node_list_lock); 861 } 862 863 static int nr_node_show(struct seq_file *seq, void *v) 864 { 865 char buf[11]; 866 int i; 867 868 if (v == SEQ_START_TOKEN) 869 seq_puts(seq, 870 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); 871 else { 872 struct nr_node *nr_node = hlist_entry(v, struct nr_node, 873 node_node); 874 875 nr_node_lock(nr_node); 876 seq_printf(seq, "%-9s %-7s %d %d", 877 ax2asc(buf, &nr_node->callsign), 878 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, 879 nr_node->which + 1, 880 nr_node->count); 881 882 for (i = 0; i < nr_node->count; i++) { 883 seq_printf(seq, " %3d %d %05d", 884 nr_node->routes[i].quality, 885 nr_node->routes[i].obs_count, 886 nr_node->routes[i].neighbour->number); 887 } 888 nr_node_unlock(nr_node); 889 890 seq_puts(seq, "\n"); 891 } 892 return 0; 893 } 894 895 const struct seq_operations nr_node_seqops = { 896 .start = nr_node_start, 897 .next = nr_node_next, 898 .stop = nr_node_stop, 899 .show = nr_node_show, 900 }; 901 902 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) 903 __acquires(&nr_neigh_list_lock) 904 { 905 spin_lock_bh(&nr_neigh_list_lock); 906 return seq_hlist_start_head(&nr_neigh_list, *pos); 907 } 908 909 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) 910 { 911 return seq_hlist_next(v, &nr_neigh_list, pos); 912 } 913 914 static void nr_neigh_stop(struct seq_file *seq, void *v) 915 __releases(&nr_neigh_list_lock) 916 { 917 spin_unlock_bh(&nr_neigh_list_lock); 918 } 919 920 static int nr_neigh_show(struct seq_file *seq, void *v) 921 { 922 char buf[11]; 923 int i; 924 925 if (v == SEQ_START_TOKEN) 926 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); 927 else { 928 struct nr_neigh *nr_neigh; 929 930 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node); 931 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 932 nr_neigh->number, 933 ax2asc(buf, &nr_neigh->callsign), 934 nr_neigh->dev ? nr_neigh->dev->name : "???", 935 nr_neigh->quality, 936 nr_neigh->locked, 937 nr_neigh->count, 938 nr_neigh->failed); 939 940 if (nr_neigh->digipeat != NULL) { 941 for (i = 0; i < nr_neigh->digipeat->ndigi; i++) 942 seq_printf(seq, " %s", 943 ax2asc(buf, &nr_neigh->digipeat->calls[i])); 944 } 945 946 seq_puts(seq, "\n"); 947 } 948 return 0; 949 } 950 951 const struct seq_operations nr_neigh_seqops = { 952 .start = nr_neigh_start, 953 .next = nr_neigh_next, 954 .stop = nr_neigh_stop, 955 .show = nr_neigh_show, 956 }; 957 #endif 958 959 /* 960 * Free all memory associated with the nodes and routes lists. 961 */ 962 void nr_rt_free(void) 963 { 964 struct nr_neigh *s = NULL; 965 struct nr_node *t = NULL; 966 struct hlist_node *nodet; 967 968 spin_lock_bh(&nr_neigh_list_lock); 969 spin_lock_bh(&nr_node_list_lock); 970 nr_node_for_each_safe(t, nodet, &nr_node_list) { 971 nr_node_lock(t); 972 nr_remove_node_locked(t); 973 nr_node_unlock(t); 974 } 975 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { 976 while(s->count) { 977 s->count--; 978 nr_neigh_put(s); 979 } 980 nr_remove_neigh_locked(s); 981 } 982 spin_unlock_bh(&nr_node_list_lock); 983 spin_unlock_bh(&nr_neigh_list_lock); 984 } 985