1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * slip.c This module implements the SLIP protocol for kernel-based 4 * devices like TTY. It interfaces between a raw TTY, and the 5 * kernel's INET protocol layers. 6 * 7 * Version: @(#)slip.c 0.8.3 12/24/94 8 * 9 * Authors: Laurence Culhane, <loz@holmes.demon.co.uk> 10 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> 11 * 12 * Fixes: 13 * Alan Cox : Sanity checks and avoid tx overruns. 14 * Has a new sl->mtu field. 15 * Alan Cox : Found cause of overrun. ifconfig sl0 16 * mtu upwards. Driver now spots this 17 * and grows/shrinks its buffers(hack!). 18 * Memory leak if you run out of memory 19 * setting up a slip driver fixed. 20 * Matt Dillon : Printable slip (borrowed from NET2E) 21 * Pauline Middelink : Slip driver fixes. 22 * Alan Cox : Honours the old SL_COMPRESSED flag 23 * Alan Cox : KISS AX.25 and AXUI IP support 24 * Michael Riepe : Automatic CSLIP recognition added 25 * Charles Hedrick : CSLIP header length problem fix. 26 * Alan Cox : Corrected non-IP cases of the above. 27 * Alan Cox : Now uses hardware type as per FvK. 28 * Alan Cox : Default to 192.168.0.0 (RFC 1597) 29 * A.N.Kuznetsov : dev_tint() recursion fix. 30 * Dmitry Gorodchanin : SLIP memory leaks 31 * Dmitry Gorodchanin : Code cleanup. Reduce tty driver 32 * buffering from 4096 to 256 bytes. 33 * Improving SLIP response time. 34 * CONFIG_SLIP_MODE_SLIP6. 35 * ifconfig sl? up & down now works 36 * correctly. 37 * Modularization. 38 * Alan Cox : Oops - fix AX.25 buffer lengths 39 * Dmitry Gorodchanin : Even more cleanups. Preserve CSLIP 40 * statistics. Include CSLIP code only 41 * if it really needed. 42 * Alan Cox : Free slhc buffers in the right place. 43 * Alan Cox : Allow for digipeated IP over AX.25 44 * Matti Aarnio : Dynamic SLIP devices, with ideas taken 45 * from Jim Freeman's <jfree@caldera.com> 46 * dynamic PPP devices. We do NOT kfree() 47 * device entries, just reg./unreg. them 48 * as they are needed. We kfree() them 49 * at module cleanup. 50 * With MODULE-loading ``insmod'', user 51 * can issue parameter: slip_maxdev=1024 52 * (Or how much he/she wants.. Default 53 * is 256) 54 * Stanislav Voronyi : Slip line checking, with ideas taken 55 * from multislip BSDI driver which was 56 * written by Igor Chechik, RELCOM Corp. 57 * Only algorithms have been ported to 58 * Linux SLIP driver. 59 * Vitaly E. Lavrov : Sane behaviour on tty hangup. 60 * Alexey Kuznetsov : Cleanup interfaces to tty & netdevice 61 * modules. 62 */ 63 64 #define SL_CHECK_TRANSMIT 65 #include <linux/compat.h> 66 #include <linux/module.h> 67 #include <linux/moduleparam.h> 68 69 #include <linux/uaccess.h> 70 #include <linux/bitops.h> 71 #include <linux/sched/signal.h> 72 #include <linux/string.h> 73 #include <linux/mm.h> 74 #include <linux/interrupt.h> 75 #include <linux/in.h> 76 #include <linux/tty.h> 77 #include <linux/errno.h> 78 #include <linux/netdevice.h> 79 #include <linux/etherdevice.h> 80 #include <linux/skbuff.h> 81 #include <linux/rtnetlink.h> 82 #include <linux/if_arp.h> 83 #include <linux/if_slip.h> 84 #include <linux/delay.h> 85 #include <linux/init.h> 86 #include <linux/slab.h> 87 #include <linux/workqueue.h> 88 #include "slip.h" 89 #ifdef CONFIG_INET 90 #include <linux/ip.h> 91 #include <linux/tcp.h> 92 #include <net/slhc_vj.h> 93 #endif 94 95 #define SLIP_VERSION "0.8.4-NET3.019-NEWTTY" 96 97 static struct net_device **slip_devs; 98 99 static int slip_maxdev = SL_NRUNIT; 100 module_param(slip_maxdev, int, 0); 101 MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices"); 102 103 static int slip_esc(unsigned char *p, unsigned char *d, int len); 104 static void slip_unesc(struct slip *sl, unsigned char c); 105 #ifdef CONFIG_SLIP_MODE_SLIP6 106 static int slip_esc6(unsigned char *p, unsigned char *d, int len); 107 static void slip_unesc6(struct slip *sl, unsigned char c); 108 #endif 109 #ifdef CONFIG_SLIP_SMART 110 static void sl_keepalive(struct timer_list *t); 111 static void sl_outfill(struct timer_list *t); 112 static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); 113 #endif 114 115 /******************************** 116 * Buffer administration routines: 117 * sl_alloc_bufs() 118 * sl_free_bufs() 119 * sl_realloc_bufs() 120 * 121 * NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because 122 * sl_realloc_bufs provides strong atomicity and reallocation 123 * on actively running device. 124 *********************************/ 125 126 /* 127 Allocate channel buffers. 128 */ 129 130 static int sl_alloc_bufs(struct slip *sl, int mtu) 131 { 132 int err = -ENOBUFS; 133 unsigned long len; 134 char *rbuff = NULL; 135 char *xbuff = NULL; 136 #ifdef SL_INCLUDE_CSLIP 137 char *cbuff = NULL; 138 struct slcompress *slcomp = NULL; 139 #endif 140 141 /* 142 * Allocate the SLIP frame buffers: 143 * 144 * rbuff Receive buffer. 145 * xbuff Transmit buffer. 146 * cbuff Temporary compression buffer. 147 */ 148 len = mtu * 2; 149 150 /* 151 * allow for arrival of larger UDP packets, even if we say not to 152 * also fixes a bug in which SunOS sends 512-byte packets even with 153 * an MSS of 128 154 */ 155 if (len < 576 * 2) 156 len = 576 * 2; 157 rbuff = kmalloc(len + 4, GFP_KERNEL); 158 if (rbuff == NULL) 159 goto err_exit; 160 xbuff = kmalloc(len + 4, GFP_KERNEL); 161 if (xbuff == NULL) 162 goto err_exit; 163 #ifdef SL_INCLUDE_CSLIP 164 cbuff = kmalloc(len + 4, GFP_KERNEL); 165 if (cbuff == NULL) 166 goto err_exit; 167 slcomp = slhc_init(16, 16); 168 if (IS_ERR(slcomp)) 169 goto err_exit; 170 #endif 171 spin_lock_bh(&sl->lock); 172 if (sl->tty == NULL) { 173 spin_unlock_bh(&sl->lock); 174 err = -ENODEV; 175 goto err_exit; 176 } 177 sl->mtu = mtu; 178 sl->buffsize = len; 179 sl->rcount = 0; 180 sl->xleft = 0; 181 rbuff = xchg(&sl->rbuff, rbuff); 182 xbuff = xchg(&sl->xbuff, xbuff); 183 #ifdef SL_INCLUDE_CSLIP 184 cbuff = xchg(&sl->cbuff, cbuff); 185 slcomp = xchg(&sl->slcomp, slcomp); 186 #endif 187 #ifdef CONFIG_SLIP_MODE_SLIP6 188 sl->xdata = 0; 189 sl->xbits = 0; 190 #endif 191 spin_unlock_bh(&sl->lock); 192 err = 0; 193 194 /* Cleanup */ 195 err_exit: 196 #ifdef SL_INCLUDE_CSLIP 197 kfree(cbuff); 198 slhc_free(slcomp); 199 #endif 200 kfree(xbuff); 201 kfree(rbuff); 202 return err; 203 } 204 205 /* Free a SLIP channel buffers. */ 206 static void sl_free_bufs(struct slip *sl) 207 { 208 /* Free all SLIP frame buffers. */ 209 kfree(xchg(&sl->rbuff, NULL)); 210 kfree(xchg(&sl->xbuff, NULL)); 211 #ifdef SL_INCLUDE_CSLIP 212 kfree(xchg(&sl->cbuff, NULL)); 213 slhc_free(xchg(&sl->slcomp, NULL)); 214 #endif 215 } 216 217 /* 218 Reallocate slip channel buffers. 219 */ 220 221 static int sl_realloc_bufs(struct slip *sl, int mtu) 222 { 223 int err = 0; 224 struct net_device *dev = sl->dev; 225 unsigned char *xbuff, *rbuff; 226 #ifdef SL_INCLUDE_CSLIP 227 unsigned char *cbuff; 228 #endif 229 int len = mtu * 2; 230 231 /* 232 * allow for arrival of larger UDP packets, even if we say not to 233 * also fixes a bug in which SunOS sends 512-byte packets even with 234 * an MSS of 128 235 */ 236 if (len < 576 * 2) 237 len = 576 * 2; 238 239 xbuff = kmalloc(len + 4, GFP_ATOMIC); 240 rbuff = kmalloc(len + 4, GFP_ATOMIC); 241 #ifdef SL_INCLUDE_CSLIP 242 cbuff = kmalloc(len + 4, GFP_ATOMIC); 243 #endif 244 245 246 #ifdef SL_INCLUDE_CSLIP 247 if (xbuff == NULL || rbuff == NULL || cbuff == NULL) { 248 #else 249 if (xbuff == NULL || rbuff == NULL) { 250 #endif 251 if (mtu > sl->mtu) { 252 printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n", 253 dev->name); 254 err = -ENOBUFS; 255 } 256 goto done; 257 } 258 spin_lock_bh(&sl->lock); 259 260 err = -ENODEV; 261 if (sl->tty == NULL) 262 goto done_on_bh; 263 264 xbuff = xchg(&sl->xbuff, xbuff); 265 rbuff = xchg(&sl->rbuff, rbuff); 266 #ifdef SL_INCLUDE_CSLIP 267 cbuff = xchg(&sl->cbuff, cbuff); 268 #endif 269 if (sl->xleft) { 270 if (sl->xleft <= len) { 271 memcpy(sl->xbuff, sl->xhead, sl->xleft); 272 } else { 273 sl->xleft = 0; 274 dev->stats.tx_dropped++; 275 } 276 } 277 sl->xhead = sl->xbuff; 278 279 if (sl->rcount) { 280 if (sl->rcount <= len) { 281 memcpy(sl->rbuff, rbuff, sl->rcount); 282 } else { 283 sl->rcount = 0; 284 dev->stats.rx_over_errors++; 285 set_bit(SLF_ERROR, &sl->flags); 286 } 287 } 288 sl->mtu = mtu; 289 dev->mtu = mtu; 290 sl->buffsize = len; 291 err = 0; 292 293 done_on_bh: 294 spin_unlock_bh(&sl->lock); 295 296 done: 297 kfree(xbuff); 298 kfree(rbuff); 299 #ifdef SL_INCLUDE_CSLIP 300 kfree(cbuff); 301 #endif 302 return err; 303 } 304 305 306 /* Set the "sending" flag. This must be atomic hence the set_bit. */ 307 static inline void sl_lock(struct slip *sl) 308 { 309 netif_stop_queue(sl->dev); 310 } 311 312 313 /* Clear the "sending" flag. This must be atomic, hence the ASM. */ 314 static inline void sl_unlock(struct slip *sl) 315 { 316 netif_wake_queue(sl->dev); 317 } 318 319 /* Send one completely decapsulated IP datagram to the IP layer. */ 320 static void sl_bump(struct slip *sl) 321 { 322 struct net_device *dev = sl->dev; 323 struct sk_buff *skb; 324 int count; 325 326 count = sl->rcount; 327 #ifdef SL_INCLUDE_CSLIP 328 if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) { 329 unsigned char c = sl->rbuff[0]; 330 if (c & SL_TYPE_COMPRESSED_TCP) { 331 /* ignore compressed packets when CSLIP is off */ 332 if (!(sl->mode & SL_MODE_CSLIP)) { 333 printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name); 334 return; 335 } 336 /* make sure we've reserved enough space for uncompress 337 to use */ 338 if (count + 80 > sl->buffsize) { 339 dev->stats.rx_over_errors++; 340 return; 341 } 342 count = slhc_uncompress(sl->slcomp, sl->rbuff, count); 343 if (count <= 0) 344 return; 345 } else if (c >= SL_TYPE_UNCOMPRESSED_TCP) { 346 if (!(sl->mode & SL_MODE_CSLIP)) { 347 /* turn on header compression */ 348 sl->mode |= SL_MODE_CSLIP; 349 sl->mode &= ~SL_MODE_ADAPTIVE; 350 printk(KERN_INFO "%s: header compression turned on\n", dev->name); 351 } 352 sl->rbuff[0] &= 0x4f; 353 if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0) 354 return; 355 } 356 } 357 #endif /* SL_INCLUDE_CSLIP */ 358 359 dev->stats.rx_bytes += count; 360 361 skb = dev_alloc_skb(count); 362 if (skb == NULL) { 363 printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); 364 dev->stats.rx_dropped++; 365 return; 366 } 367 skb->dev = dev; 368 skb_put_data(skb, sl->rbuff, count); 369 skb_reset_mac_header(skb); 370 skb->protocol = htons(ETH_P_IP); 371 netif_rx_ni(skb); 372 dev->stats.rx_packets++; 373 } 374 375 /* Encapsulate one IP datagram and stuff into a TTY queue. */ 376 static void sl_encaps(struct slip *sl, unsigned char *icp, int len) 377 { 378 unsigned char *p; 379 int actual, count; 380 381 if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */ 382 printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name); 383 sl->dev->stats.tx_dropped++; 384 sl_unlock(sl); 385 return; 386 } 387 388 p = icp; 389 #ifdef SL_INCLUDE_CSLIP 390 if (sl->mode & SL_MODE_CSLIP) 391 len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1); 392 #endif 393 #ifdef CONFIG_SLIP_MODE_SLIP6 394 if (sl->mode & SL_MODE_SLIP6) 395 count = slip_esc6(p, sl->xbuff, len); 396 else 397 #endif 398 count = slip_esc(p, sl->xbuff, len); 399 400 /* Order of next two lines is *very* important. 401 * When we are sending a little amount of data, 402 * the transfer may be completed inside the ops->write() 403 * routine, because it's running with interrupts enabled. 404 * In this case we *never* got WRITE_WAKEUP event, 405 * if we did not request it before write operation. 406 * 14 Oct 1994 Dmitry Gorodchanin. 407 */ 408 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 409 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); 410 #ifdef SL_CHECK_TRANSMIT 411 netif_trans_update(sl->dev); 412 #endif 413 sl->xleft = count - actual; 414 sl->xhead = sl->xbuff + actual; 415 #ifdef CONFIG_SLIP_SMART 416 /* VSV */ 417 clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */ 418 #endif 419 } 420 421 /* Write out any remaining transmit buffer. Scheduled when tty is writable */ 422 static void slip_transmit(struct work_struct *work) 423 { 424 struct slip *sl = container_of(work, struct slip, tx_work); 425 int actual; 426 427 spin_lock_bh(&sl->lock); 428 /* First make sure we're connected. */ 429 if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) { 430 spin_unlock_bh(&sl->lock); 431 return; 432 } 433 434 if (sl->xleft <= 0) { 435 /* Now serial buffer is almost free & we can start 436 * transmission of another packet */ 437 sl->dev->stats.tx_packets++; 438 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 439 spin_unlock_bh(&sl->lock); 440 sl_unlock(sl); 441 return; 442 } 443 444 actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); 445 sl->xleft -= actual; 446 sl->xhead += actual; 447 spin_unlock_bh(&sl->lock); 448 } 449 450 /* 451 * Called by the driver when there's room for more data. 452 * Schedule the transmit. 453 */ 454 static void slip_write_wakeup(struct tty_struct *tty) 455 { 456 struct slip *sl; 457 458 rcu_read_lock(); 459 sl = rcu_dereference(tty->disc_data); 460 if (sl) 461 schedule_work(&sl->tx_work); 462 rcu_read_unlock(); 463 } 464 465 static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) 466 { 467 struct slip *sl = netdev_priv(dev); 468 469 spin_lock(&sl->lock); 470 471 if (netif_queue_stopped(dev)) { 472 if (!netif_running(dev)) 473 goto out; 474 475 /* May be we must check transmitter timeout here ? 476 * 14 Oct 1994 Dmitry Gorodchanin. 477 */ 478 #ifdef SL_CHECK_TRANSMIT 479 if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) { 480 /* 20 sec timeout not reached */ 481 goto out; 482 } 483 printk(KERN_WARNING "%s: transmit timed out, %s?\n", 484 dev->name, 485 (tty_chars_in_buffer(sl->tty) || sl->xleft) ? 486 "bad line quality" : "driver error"); 487 sl->xleft = 0; 488 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 489 sl_unlock(sl); 490 #endif 491 } 492 out: 493 spin_unlock(&sl->lock); 494 } 495 496 497 /* Encapsulate an IP datagram and kick it into a TTY queue. */ 498 static netdev_tx_t 499 sl_xmit(struct sk_buff *skb, struct net_device *dev) 500 { 501 struct slip *sl = netdev_priv(dev); 502 503 spin_lock(&sl->lock); 504 if (!netif_running(dev)) { 505 spin_unlock(&sl->lock); 506 printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name); 507 dev_kfree_skb(skb); 508 return NETDEV_TX_OK; 509 } 510 if (sl->tty == NULL) { 511 spin_unlock(&sl->lock); 512 dev_kfree_skb(skb); 513 return NETDEV_TX_OK; 514 } 515 516 sl_lock(sl); 517 dev->stats.tx_bytes += skb->len; 518 sl_encaps(sl, skb->data, skb->len); 519 spin_unlock(&sl->lock); 520 521 dev_kfree_skb(skb); 522 return NETDEV_TX_OK; 523 } 524 525 526 /****************************************** 527 * Routines looking at netdevice side. 528 ******************************************/ 529 530 /* Netdevice UP -> DOWN routine */ 531 532 static int 533 sl_close(struct net_device *dev) 534 { 535 struct slip *sl = netdev_priv(dev); 536 537 spin_lock_bh(&sl->lock); 538 if (sl->tty) 539 /* TTY discipline is running. */ 540 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); 541 netif_stop_queue(dev); 542 sl->rcount = 0; 543 sl->xleft = 0; 544 spin_unlock_bh(&sl->lock); 545 546 return 0; 547 } 548 549 /* Netdevice DOWN -> UP routine */ 550 551 static int sl_open(struct net_device *dev) 552 { 553 struct slip *sl = netdev_priv(dev); 554 555 if (sl->tty == NULL) 556 return -ENODEV; 557 558 sl->flags &= (1 << SLF_INUSE); 559 netif_start_queue(dev); 560 return 0; 561 } 562 563 /* Netdevice change MTU request */ 564 565 static int sl_change_mtu(struct net_device *dev, int new_mtu) 566 { 567 struct slip *sl = netdev_priv(dev); 568 569 return sl_realloc_bufs(sl, new_mtu); 570 } 571 572 /* Netdevice get statistics request */ 573 574 static void 575 sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 576 { 577 struct net_device_stats *devstats = &dev->stats; 578 #ifdef SL_INCLUDE_CSLIP 579 struct slip *sl = netdev_priv(dev); 580 struct slcompress *comp = sl->slcomp; 581 #endif 582 stats->rx_packets = devstats->rx_packets; 583 stats->tx_packets = devstats->tx_packets; 584 stats->rx_bytes = devstats->rx_bytes; 585 stats->tx_bytes = devstats->tx_bytes; 586 stats->rx_dropped = devstats->rx_dropped; 587 stats->tx_dropped = devstats->tx_dropped; 588 stats->tx_errors = devstats->tx_errors; 589 stats->rx_errors = devstats->rx_errors; 590 stats->rx_over_errors = devstats->rx_over_errors; 591 592 #ifdef SL_INCLUDE_CSLIP 593 if (comp) { 594 /* Generic compressed statistics */ 595 stats->rx_compressed = comp->sls_i_compressed; 596 stats->tx_compressed = comp->sls_o_compressed; 597 598 /* Are we really still needs this? */ 599 stats->rx_fifo_errors += comp->sls_i_compressed; 600 stats->rx_dropped += comp->sls_i_tossed; 601 stats->tx_fifo_errors += comp->sls_o_compressed; 602 stats->collisions += comp->sls_o_misses; 603 } 604 #endif 605 } 606 607 /* Netdevice register callback */ 608 609 static int sl_init(struct net_device *dev) 610 { 611 struct slip *sl = netdev_priv(dev); 612 613 /* 614 * Finish setting up the DEVICE info. 615 */ 616 617 dev->mtu = sl->mtu; 618 dev->type = ARPHRD_SLIP + sl->mode; 619 #ifdef SL_CHECK_TRANSMIT 620 dev->watchdog_timeo = 20*HZ; 621 #endif 622 return 0; 623 } 624 625 626 static void sl_uninit(struct net_device *dev) 627 { 628 struct slip *sl = netdev_priv(dev); 629 630 sl_free_bufs(sl); 631 } 632 633 /* Hook the destructor so we can free slip devices at the right point in time */ 634 static void sl_free_netdev(struct net_device *dev) 635 { 636 int i = dev->base_addr; 637 638 slip_devs[i] = NULL; 639 } 640 641 static const struct net_device_ops sl_netdev_ops = { 642 .ndo_init = sl_init, 643 .ndo_uninit = sl_uninit, 644 .ndo_open = sl_open, 645 .ndo_stop = sl_close, 646 .ndo_start_xmit = sl_xmit, 647 .ndo_get_stats64 = sl_get_stats64, 648 .ndo_change_mtu = sl_change_mtu, 649 .ndo_tx_timeout = sl_tx_timeout, 650 #ifdef CONFIG_SLIP_SMART 651 .ndo_siocdevprivate = sl_siocdevprivate, 652 #endif 653 }; 654 655 656 static void sl_setup(struct net_device *dev) 657 { 658 dev->netdev_ops = &sl_netdev_ops; 659 dev->needs_free_netdev = true; 660 dev->priv_destructor = sl_free_netdev; 661 662 dev->hard_header_len = 0; 663 dev->addr_len = 0; 664 dev->tx_queue_len = 10; 665 666 /* MTU range: 68 - 65534 */ 667 dev->min_mtu = 68; 668 dev->max_mtu = 65534; 669 670 /* New-style flags. */ 671 dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST; 672 } 673 674 /****************************************** 675 Routines looking at TTY side. 676 ******************************************/ 677 678 679 /* 680 * Handle the 'receiver data ready' interrupt. 681 * This function is called by the 'tty_io' module in the kernel when 682 * a block of SLIP data has been received, which can now be decapsulated 683 * and sent on to some IP layer for further processing. This will not 684 * be re-entered while running but other ldisc functions may be called 685 * in parallel 686 */ 687 688 static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp, 689 const char *fp, int count) 690 { 691 struct slip *sl = tty->disc_data; 692 693 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 694 return; 695 696 /* Read the characters out of the buffer */ 697 while (count--) { 698 if (fp && *fp++) { 699 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 700 sl->dev->stats.rx_errors++; 701 cp++; 702 continue; 703 } 704 #ifdef CONFIG_SLIP_MODE_SLIP6 705 if (sl->mode & SL_MODE_SLIP6) 706 slip_unesc6(sl, *cp++); 707 else 708 #endif 709 slip_unesc(sl, *cp++); 710 } 711 } 712 713 /************************************ 714 * slip_open helper routines. 715 ************************************/ 716 717 /* Collect hanged up channels */ 718 static void sl_sync(void) 719 { 720 int i; 721 struct net_device *dev; 722 struct slip *sl; 723 724 for (i = 0; i < slip_maxdev; i++) { 725 dev = slip_devs[i]; 726 if (dev == NULL) 727 break; 728 729 sl = netdev_priv(dev); 730 if (sl->tty || sl->leased) 731 continue; 732 if (dev->flags & IFF_UP) 733 dev_close(dev); 734 } 735 } 736 737 738 /* Find a free SLIP channel, and link in this `tty' line. */ 739 static struct slip *sl_alloc(void) 740 { 741 int i; 742 char name[IFNAMSIZ]; 743 struct net_device *dev = NULL; 744 struct slip *sl; 745 746 for (i = 0; i < slip_maxdev; i++) { 747 dev = slip_devs[i]; 748 if (dev == NULL) 749 break; 750 } 751 /* Sorry, too many, all slots in use */ 752 if (i >= slip_maxdev) 753 return NULL; 754 755 sprintf(name, "sl%d", i); 756 dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup); 757 if (!dev) 758 return NULL; 759 760 dev->base_addr = i; 761 sl = netdev_priv(dev); 762 763 /* Initialize channel control data */ 764 sl->magic = SLIP_MAGIC; 765 sl->dev = dev; 766 spin_lock_init(&sl->lock); 767 INIT_WORK(&sl->tx_work, slip_transmit); 768 sl->mode = SL_MODE_DEFAULT; 769 #ifdef CONFIG_SLIP_SMART 770 /* initialize timer_list struct */ 771 timer_setup(&sl->keepalive_timer, sl_keepalive, 0); 772 timer_setup(&sl->outfill_timer, sl_outfill, 0); 773 #endif 774 slip_devs[i] = dev; 775 return sl; 776 } 777 778 /* 779 * Open the high-level part of the SLIP channel. 780 * This function is called by the TTY module when the 781 * SLIP line discipline is called for. Because we are 782 * sure the tty line exists, we only have to link it to 783 * a free SLIP channel... 784 * 785 * Called in process context serialized from other ldisc calls. 786 */ 787 788 static int slip_open(struct tty_struct *tty) 789 { 790 struct slip *sl; 791 int err; 792 793 if (!capable(CAP_NET_ADMIN)) 794 return -EPERM; 795 796 if (tty->ops->write == NULL) 797 return -EOPNOTSUPP; 798 799 /* RTnetlink lock is misused here to serialize concurrent 800 opens of slip channels. There are better ways, but it is 801 the simplest one. 802 */ 803 rtnl_lock(); 804 805 /* Collect hanged up channels. */ 806 sl_sync(); 807 808 sl = tty->disc_data; 809 810 err = -EEXIST; 811 /* First make sure we're not already connected. */ 812 if (sl && sl->magic == SLIP_MAGIC) 813 goto err_exit; 814 815 /* OK. Find a free SLIP channel to use. */ 816 err = -ENFILE; 817 sl = sl_alloc(); 818 if (sl == NULL) 819 goto err_exit; 820 821 sl->tty = tty; 822 tty->disc_data = sl; 823 sl->pid = current->pid; 824 825 if (!test_bit(SLF_INUSE, &sl->flags)) { 826 /* Perform the low-level SLIP initialization. */ 827 err = sl_alloc_bufs(sl, SL_MTU); 828 if (err) 829 goto err_free_chan; 830 831 set_bit(SLF_INUSE, &sl->flags); 832 833 err = register_netdevice(sl->dev); 834 if (err) 835 goto err_free_bufs; 836 } 837 838 #ifdef CONFIG_SLIP_SMART 839 if (sl->keepalive) { 840 sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ; 841 add_timer(&sl->keepalive_timer); 842 } 843 if (sl->outfill) { 844 sl->outfill_timer.expires = jiffies + sl->outfill * HZ; 845 add_timer(&sl->outfill_timer); 846 } 847 #endif 848 849 /* Done. We have linked the TTY line to a channel. */ 850 rtnl_unlock(); 851 tty->receive_room = 65536; /* We don't flow control */ 852 853 /* TTY layer expects 0 on success */ 854 return 0; 855 856 err_free_bufs: 857 sl_free_bufs(sl); 858 859 err_free_chan: 860 sl->tty = NULL; 861 tty->disc_data = NULL; 862 clear_bit(SLF_INUSE, &sl->flags); 863 sl_free_netdev(sl->dev); 864 /* do not call free_netdev before rtnl_unlock */ 865 rtnl_unlock(); 866 free_netdev(sl->dev); 867 return err; 868 869 err_exit: 870 rtnl_unlock(); 871 872 /* Count references from TTY module */ 873 return err; 874 } 875 876 /* 877 * Close down a SLIP channel. 878 * This means flushing out any pending queues, and then returning. This 879 * call is serialized against other ldisc functions. 880 * 881 * We also use this method fo a hangup event 882 */ 883 884 static void slip_close(struct tty_struct *tty) 885 { 886 struct slip *sl = tty->disc_data; 887 888 /* First make sure we're connected. */ 889 if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty) 890 return; 891 892 spin_lock_bh(&sl->lock); 893 rcu_assign_pointer(tty->disc_data, NULL); 894 sl->tty = NULL; 895 spin_unlock_bh(&sl->lock); 896 897 synchronize_rcu(); 898 flush_work(&sl->tx_work); 899 900 /* VSV = very important to remove timers */ 901 #ifdef CONFIG_SLIP_SMART 902 del_timer_sync(&sl->keepalive_timer); 903 del_timer_sync(&sl->outfill_timer); 904 #endif 905 /* Flush network side */ 906 unregister_netdev(sl->dev); 907 /* This will complete via sl_free_netdev */ 908 } 909 910 static int slip_hangup(struct tty_struct *tty) 911 { 912 slip_close(tty); 913 return 0; 914 } 915 /************************************************************************ 916 * STANDARD SLIP ENCAPSULATION * 917 ************************************************************************/ 918 919 static int slip_esc(unsigned char *s, unsigned char *d, int len) 920 { 921 unsigned char *ptr = d; 922 unsigned char c; 923 924 /* 925 * Send an initial END character to flush out any 926 * data that may have accumulated in the receiver 927 * due to line noise. 928 */ 929 930 *ptr++ = END; 931 932 /* 933 * For each byte in the packet, send the appropriate 934 * character sequence, according to the SLIP protocol. 935 */ 936 937 while (len-- > 0) { 938 switch (c = *s++) { 939 case END: 940 *ptr++ = ESC; 941 *ptr++ = ESC_END; 942 break; 943 case ESC: 944 *ptr++ = ESC; 945 *ptr++ = ESC_ESC; 946 break; 947 default: 948 *ptr++ = c; 949 break; 950 } 951 } 952 *ptr++ = END; 953 return ptr - d; 954 } 955 956 static void slip_unesc(struct slip *sl, unsigned char s) 957 { 958 959 switch (s) { 960 case END: 961 #ifdef CONFIG_SLIP_SMART 962 /* drop keeptest bit = VSV */ 963 if (test_bit(SLF_KEEPTEST, &sl->flags)) 964 clear_bit(SLF_KEEPTEST, &sl->flags); 965 #endif 966 967 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 968 (sl->rcount > 2)) 969 sl_bump(sl); 970 clear_bit(SLF_ESCAPE, &sl->flags); 971 sl->rcount = 0; 972 return; 973 974 case ESC: 975 set_bit(SLF_ESCAPE, &sl->flags); 976 return; 977 case ESC_ESC: 978 if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) 979 s = ESC; 980 break; 981 case ESC_END: 982 if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) 983 s = END; 984 break; 985 } 986 if (!test_bit(SLF_ERROR, &sl->flags)) { 987 if (sl->rcount < sl->buffsize) { 988 sl->rbuff[sl->rcount++] = s; 989 return; 990 } 991 sl->dev->stats.rx_over_errors++; 992 set_bit(SLF_ERROR, &sl->flags); 993 } 994 } 995 996 997 #ifdef CONFIG_SLIP_MODE_SLIP6 998 /************************************************************************ 999 * 6 BIT SLIP ENCAPSULATION * 1000 ************************************************************************/ 1001 1002 static int slip_esc6(unsigned char *s, unsigned char *d, int len) 1003 { 1004 unsigned char *ptr = d; 1005 unsigned char c; 1006 int i; 1007 unsigned short v = 0; 1008 short bits = 0; 1009 1010 /* 1011 * Send an initial END character to flush out any 1012 * data that may have accumulated in the receiver 1013 * due to line noise. 1014 */ 1015 1016 *ptr++ = 0x70; 1017 1018 /* 1019 * Encode the packet into printable ascii characters 1020 */ 1021 1022 for (i = 0; i < len; ++i) { 1023 v = (v << 8) | s[i]; 1024 bits += 8; 1025 while (bits >= 6) { 1026 bits -= 6; 1027 c = 0x30 + ((v >> bits) & 0x3F); 1028 *ptr++ = c; 1029 } 1030 } 1031 if (bits) { 1032 c = 0x30 + ((v << (6 - bits)) & 0x3F); 1033 *ptr++ = c; 1034 } 1035 *ptr++ = 0x70; 1036 return ptr - d; 1037 } 1038 1039 static void slip_unesc6(struct slip *sl, unsigned char s) 1040 { 1041 unsigned char c; 1042 1043 if (s == 0x70) { 1044 #ifdef CONFIG_SLIP_SMART 1045 /* drop keeptest bit = VSV */ 1046 if (test_bit(SLF_KEEPTEST, &sl->flags)) 1047 clear_bit(SLF_KEEPTEST, &sl->flags); 1048 #endif 1049 1050 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && 1051 (sl->rcount > 2)) 1052 sl_bump(sl); 1053 sl->rcount = 0; 1054 sl->xbits = 0; 1055 sl->xdata = 0; 1056 } else if (s >= 0x30 && s < 0x70) { 1057 sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F); 1058 sl->xbits += 6; 1059 if (sl->xbits >= 8) { 1060 sl->xbits -= 8; 1061 c = (unsigned char)(sl->xdata >> sl->xbits); 1062 if (!test_bit(SLF_ERROR, &sl->flags)) { 1063 if (sl->rcount < sl->buffsize) { 1064 sl->rbuff[sl->rcount++] = c; 1065 return; 1066 } 1067 sl->dev->stats.rx_over_errors++; 1068 set_bit(SLF_ERROR, &sl->flags); 1069 } 1070 } 1071 } 1072 } 1073 #endif /* CONFIG_SLIP_MODE_SLIP6 */ 1074 1075 /* Perform I/O control on an active SLIP channel. */ 1076 static int slip_ioctl(struct tty_struct *tty, struct file *file, 1077 unsigned int cmd, unsigned long arg) 1078 { 1079 struct slip *sl = tty->disc_data; 1080 unsigned int tmp; 1081 int __user *p = (int __user *)arg; 1082 1083 /* First make sure we're connected. */ 1084 if (!sl || sl->magic != SLIP_MAGIC) 1085 return -EINVAL; 1086 1087 switch (cmd) { 1088 case SIOCGIFNAME: 1089 tmp = strlen(sl->dev->name) + 1; 1090 if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) 1091 return -EFAULT; 1092 return 0; 1093 1094 case SIOCGIFENCAP: 1095 if (put_user(sl->mode, p)) 1096 return -EFAULT; 1097 return 0; 1098 1099 case SIOCSIFENCAP: 1100 if (get_user(tmp, p)) 1101 return -EFAULT; 1102 #ifndef SL_INCLUDE_CSLIP 1103 if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE)) 1104 return -EINVAL; 1105 #else 1106 if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) == 1107 (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) 1108 /* return -EINVAL; */ 1109 tmp &= ~SL_MODE_ADAPTIVE; 1110 #endif 1111 #ifndef CONFIG_SLIP_MODE_SLIP6 1112 if (tmp & SL_MODE_SLIP6) 1113 return -EINVAL; 1114 #endif 1115 sl->mode = tmp; 1116 sl->dev->type = ARPHRD_SLIP + sl->mode; 1117 return 0; 1118 1119 case SIOCSIFHWADDR: 1120 return -EINVAL; 1121 1122 #ifdef CONFIG_SLIP_SMART 1123 /* VSV changes start here */ 1124 case SIOCSKEEPALIVE: 1125 if (get_user(tmp, p)) 1126 return -EFAULT; 1127 if (tmp > 255) /* max for unchar */ 1128 return -EINVAL; 1129 1130 spin_lock_bh(&sl->lock); 1131 if (!sl->tty) { 1132 spin_unlock_bh(&sl->lock); 1133 return -ENODEV; 1134 } 1135 sl->keepalive = (u8)tmp; 1136 if (sl->keepalive != 0) { 1137 mod_timer(&sl->keepalive_timer, 1138 jiffies + sl->keepalive * HZ); 1139 set_bit(SLF_KEEPTEST, &sl->flags); 1140 } else 1141 del_timer(&sl->keepalive_timer); 1142 spin_unlock_bh(&sl->lock); 1143 return 0; 1144 1145 case SIOCGKEEPALIVE: 1146 if (put_user(sl->keepalive, p)) 1147 return -EFAULT; 1148 return 0; 1149 1150 case SIOCSOUTFILL: 1151 if (get_user(tmp, p)) 1152 return -EFAULT; 1153 if (tmp > 255) /* max for unchar */ 1154 return -EINVAL; 1155 spin_lock_bh(&sl->lock); 1156 if (!sl->tty) { 1157 spin_unlock_bh(&sl->lock); 1158 return -ENODEV; 1159 } 1160 sl->outfill = (u8)tmp; 1161 if (sl->outfill != 0) { 1162 mod_timer(&sl->outfill_timer, 1163 jiffies + sl->outfill * HZ); 1164 set_bit(SLF_OUTWAIT, &sl->flags); 1165 } else 1166 del_timer(&sl->outfill_timer); 1167 spin_unlock_bh(&sl->lock); 1168 return 0; 1169 1170 case SIOCGOUTFILL: 1171 if (put_user(sl->outfill, p)) 1172 return -EFAULT; 1173 return 0; 1174 /* VSV changes end */ 1175 #endif 1176 default: 1177 return tty_mode_ioctl(tty, file, cmd, arg); 1178 } 1179 } 1180 1181 /* VSV changes start here */ 1182 #ifdef CONFIG_SLIP_SMART 1183 /* function sl_siocdevprivate called from net/core/dev.c 1184 to allow get/set outfill/keepalive parameter 1185 by ifconfig */ 1186 1187 static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, 1188 void __user *data, int cmd) 1189 { 1190 struct slip *sl = netdev_priv(dev); 1191 unsigned long *p = (unsigned long *)&rq->ifr_ifru; 1192 1193 if (sl == NULL) /* Allocation failed ?? */ 1194 return -ENODEV; 1195 1196 if (in_compat_syscall()) 1197 return -EOPNOTSUPP; 1198 1199 spin_lock_bh(&sl->lock); 1200 1201 if (!sl->tty) { 1202 spin_unlock_bh(&sl->lock); 1203 return -ENODEV; 1204 } 1205 1206 switch (cmd) { 1207 case SIOCSKEEPALIVE: 1208 /* max for unchar */ 1209 if ((unsigned)*p > 255) { 1210 spin_unlock_bh(&sl->lock); 1211 return -EINVAL; 1212 } 1213 sl->keepalive = (u8)*p; 1214 if (sl->keepalive != 0) { 1215 sl->keepalive_timer.expires = 1216 jiffies + sl->keepalive * HZ; 1217 mod_timer(&sl->keepalive_timer, 1218 jiffies + sl->keepalive * HZ); 1219 set_bit(SLF_KEEPTEST, &sl->flags); 1220 } else 1221 del_timer(&sl->keepalive_timer); 1222 break; 1223 1224 case SIOCGKEEPALIVE: 1225 *p = sl->keepalive; 1226 break; 1227 1228 case SIOCSOUTFILL: 1229 if ((unsigned)*p > 255) { /* max for unchar */ 1230 spin_unlock_bh(&sl->lock); 1231 return -EINVAL; 1232 } 1233 sl->outfill = (u8)*p; 1234 if (sl->outfill != 0) { 1235 mod_timer(&sl->outfill_timer, 1236 jiffies + sl->outfill * HZ); 1237 set_bit(SLF_OUTWAIT, &sl->flags); 1238 } else 1239 del_timer(&sl->outfill_timer); 1240 break; 1241 1242 case SIOCGOUTFILL: 1243 *p = sl->outfill; 1244 break; 1245 1246 case SIOCSLEASE: 1247 /* Resolve race condition, when ioctl'ing hanged up 1248 and opened by another process device. 1249 */ 1250 if (sl->tty != current->signal->tty && 1251 sl->pid != current->pid) { 1252 spin_unlock_bh(&sl->lock); 1253 return -EPERM; 1254 } 1255 sl->leased = 0; 1256 if (*p) 1257 sl->leased = 1; 1258 break; 1259 1260 case SIOCGLEASE: 1261 *p = sl->leased; 1262 } 1263 spin_unlock_bh(&sl->lock); 1264 return 0; 1265 } 1266 #endif 1267 /* VSV changes end */ 1268 1269 static struct tty_ldisc_ops sl_ldisc = { 1270 .owner = THIS_MODULE, 1271 .num = N_SLIP, 1272 .name = "slip", 1273 .open = slip_open, 1274 .close = slip_close, 1275 .hangup = slip_hangup, 1276 .ioctl = slip_ioctl, 1277 .receive_buf = slip_receive_buf, 1278 .write_wakeup = slip_write_wakeup, 1279 }; 1280 1281 static int __init slip_init(void) 1282 { 1283 int status; 1284 1285 if (slip_maxdev < 4) 1286 slip_maxdev = 4; /* Sanity */ 1287 1288 printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)" 1289 #ifdef CONFIG_SLIP_MODE_SLIP6 1290 " (6 bit encapsulation enabled)" 1291 #endif 1292 ".\n", 1293 SLIP_VERSION, slip_maxdev); 1294 #if defined(SL_INCLUDE_CSLIP) 1295 printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n"); 1296 #endif 1297 #ifdef CONFIG_SLIP_SMART 1298 printk(KERN_INFO "SLIP linefill/keepalive option.\n"); 1299 #endif 1300 1301 slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *), 1302 GFP_KERNEL); 1303 if (!slip_devs) 1304 return -ENOMEM; 1305 1306 /* Fill in our line protocol discipline, and register it */ 1307 status = tty_register_ldisc(&sl_ldisc); 1308 if (status != 0) { 1309 printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); 1310 kfree(slip_devs); 1311 } 1312 return status; 1313 } 1314 1315 static void __exit slip_exit(void) 1316 { 1317 int i; 1318 struct net_device *dev; 1319 struct slip *sl; 1320 unsigned long timeout = jiffies + HZ; 1321 int busy = 0; 1322 1323 if (slip_devs == NULL) 1324 return; 1325 1326 /* First of all: check for active disciplines and hangup them. 1327 */ 1328 do { 1329 if (busy) 1330 msleep_interruptible(100); 1331 1332 busy = 0; 1333 for (i = 0; i < slip_maxdev; i++) { 1334 dev = slip_devs[i]; 1335 if (!dev) 1336 continue; 1337 sl = netdev_priv(dev); 1338 spin_lock_bh(&sl->lock); 1339 if (sl->tty) { 1340 busy++; 1341 tty_hangup(sl->tty); 1342 } 1343 spin_unlock_bh(&sl->lock); 1344 } 1345 } while (busy && time_before(jiffies, timeout)); 1346 1347 /* FIXME: hangup is async so we should wait when doing this second 1348 phase */ 1349 1350 for (i = 0; i < slip_maxdev; i++) { 1351 dev = slip_devs[i]; 1352 if (!dev) 1353 continue; 1354 slip_devs[i] = NULL; 1355 1356 sl = netdev_priv(dev); 1357 if (sl->tty) { 1358 printk(KERN_ERR "%s: tty discipline still running\n", 1359 dev->name); 1360 } 1361 1362 unregister_netdev(dev); 1363 } 1364 1365 kfree(slip_devs); 1366 slip_devs = NULL; 1367 1368 tty_unregister_ldisc(&sl_ldisc); 1369 } 1370 1371 module_init(slip_init); 1372 module_exit(slip_exit); 1373 1374 #ifdef CONFIG_SLIP_SMART 1375 /* 1376 * This is start of the code for multislip style line checking 1377 * added by Stanislav Voronyi. All changes before marked VSV 1378 */ 1379 1380 static void sl_outfill(struct timer_list *t) 1381 { 1382 struct slip *sl = from_timer(sl, t, outfill_timer); 1383 1384 spin_lock(&sl->lock); 1385 1386 if (sl->tty == NULL) 1387 goto out; 1388 1389 if (sl->outfill) { 1390 if (test_bit(SLF_OUTWAIT, &sl->flags)) { 1391 /* no packets were transmitted, do outfill */ 1392 #ifdef CONFIG_SLIP_MODE_SLIP6 1393 unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END; 1394 #else 1395 unsigned char s = END; 1396 #endif 1397 /* put END into tty queue. Is it right ??? */ 1398 if (!netif_queue_stopped(sl->dev)) { 1399 /* if device busy no outfill */ 1400 sl->tty->ops->write(sl->tty, &s, 1); 1401 } 1402 } else 1403 set_bit(SLF_OUTWAIT, &sl->flags); 1404 1405 mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ); 1406 } 1407 out: 1408 spin_unlock(&sl->lock); 1409 } 1410 1411 static void sl_keepalive(struct timer_list *t) 1412 { 1413 struct slip *sl = from_timer(sl, t, keepalive_timer); 1414 1415 spin_lock(&sl->lock); 1416 1417 if (sl->tty == NULL) 1418 goto out; 1419 1420 if (sl->keepalive) { 1421 if (test_bit(SLF_KEEPTEST, &sl->flags)) { 1422 /* keepalive still high :(, we must hangup */ 1423 if (sl->outfill) 1424 /* outfill timer must be deleted too */ 1425 (void)del_timer(&sl->outfill_timer); 1426 printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name); 1427 /* this must hangup tty & close slip */ 1428 tty_hangup(sl->tty); 1429 /* I think we need not something else */ 1430 goto out; 1431 } else 1432 set_bit(SLF_KEEPTEST, &sl->flags); 1433 1434 mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ); 1435 } 1436 out: 1437 spin_unlock(&sl->lock); 1438 } 1439 1440 #endif 1441 MODULE_LICENSE("GPL"); 1442 MODULE_ALIAS_LDISC(N_SLIP); 1443