1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ 3 4 #include <linux/err.h> 5 #include <linux/errno.h> 6 #include <linux/fs.h> 7 #include <linux/init.h> 8 #include <linux/idr.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/poll.h> 12 #include <linux/skbuff.h> 13 #include <linux/slab.h> 14 #include <linux/types.h> 15 #include <linux/termios.h> 16 #include <linux/wwan.h> 17 18 /* Maximum number of minors in use */ 19 #define WWAN_MAX_MINORS (1 << MINORBITS) 20 21 static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */ 22 static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ 23 static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ 24 static struct class *wwan_class; 25 static int wwan_major; 26 27 #define to_wwan_dev(d) container_of(d, struct wwan_device, dev) 28 #define to_wwan_port(d) container_of(d, struct wwan_port, dev) 29 30 /* WWAN port flags */ 31 #define WWAN_PORT_TX_OFF 0 32 33 /** 34 * struct wwan_device - The structure that defines a WWAN device 35 * 36 * @id: WWAN device unique ID. 37 * @dev: Underlying device. 38 */ 39 struct wwan_device { 40 unsigned int id; 41 struct device dev; 42 }; 43 44 /** 45 * struct wwan_port - The structure that defines a WWAN port 46 * @type: Port type 47 * @start_count: Port start counter 48 * @flags: Store port state and capabilities 49 * @ops: Pointer to WWAN port operations 50 * @ops_lock: Protect port ops 51 * @dev: Underlying device 52 * @rxq: Buffer inbound queue 53 * @waitqueue: The waitqueue for port fops (read/write/poll) 54 * @data_lock: Port specific data access serialization 55 * @at_data: AT port specific data 56 */ 57 struct wwan_port { 58 enum wwan_port_type type; 59 unsigned int start_count; 60 unsigned long flags; 61 const struct wwan_port_ops *ops; 62 struct mutex ops_lock; /* Serialize ops + protect against removal */ 63 struct device dev; 64 struct sk_buff_head rxq; 65 wait_queue_head_t waitqueue; 66 struct mutex data_lock; /* Port specific data access serialization */ 67 union { 68 struct { 69 struct ktermios termios; 70 int mdmbits; 71 } at_data; 72 }; 73 }; 74 75 static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf) 76 { 77 struct wwan_device *wwan = to_wwan_dev(dev); 78 79 return sprintf(buf, "%d\n", wwan->id); 80 } 81 static DEVICE_ATTR_RO(index); 82 83 static struct attribute *wwan_dev_attrs[] = { 84 &dev_attr_index.attr, 85 NULL, 86 }; 87 ATTRIBUTE_GROUPS(wwan_dev); 88 89 static void wwan_dev_destroy(struct device *dev) 90 { 91 struct wwan_device *wwandev = to_wwan_dev(dev); 92 93 ida_free(&wwan_dev_ids, wwandev->id); 94 kfree(wwandev); 95 } 96 97 static const struct device_type wwan_dev_type = { 98 .name = "wwan_dev", 99 .release = wwan_dev_destroy, 100 .groups = wwan_dev_groups, 101 }; 102 103 static int wwan_dev_parent_match(struct device *dev, const void *parent) 104 { 105 return (dev->type == &wwan_dev_type && dev->parent == parent); 106 } 107 108 static struct wwan_device *wwan_dev_get_by_parent(struct device *parent) 109 { 110 struct device *dev; 111 112 dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match); 113 if (!dev) 114 return ERR_PTR(-ENODEV); 115 116 return to_wwan_dev(dev); 117 } 118 119 /* This function allocates and registers a new WWAN device OR if a WWAN device 120 * already exist for the given parent, it gets a reference and return it. 121 * This function is not exported (for now), it is called indirectly via 122 * wwan_create_port(). 123 */ 124 static struct wwan_device *wwan_create_dev(struct device *parent) 125 { 126 struct wwan_device *wwandev; 127 int err, id; 128 129 /* The 'find-alloc-register' operation must be protected against 130 * concurrent execution, a WWAN device is possibly shared between 131 * multiple callers or concurrently unregistered from wwan_remove_dev(). 132 */ 133 mutex_lock(&wwan_register_lock); 134 135 /* If wwandev already exists, return it */ 136 wwandev = wwan_dev_get_by_parent(parent); 137 if (!IS_ERR(wwandev)) 138 goto done_unlock; 139 140 id = ida_alloc(&wwan_dev_ids, GFP_KERNEL); 141 if (id < 0) 142 goto done_unlock; 143 144 wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL); 145 if (!wwandev) { 146 ida_free(&wwan_dev_ids, id); 147 goto done_unlock; 148 } 149 150 wwandev->dev.parent = parent; 151 wwandev->dev.class = wwan_class; 152 wwandev->dev.type = &wwan_dev_type; 153 wwandev->id = id; 154 dev_set_name(&wwandev->dev, "wwan%d", wwandev->id); 155 156 err = device_register(&wwandev->dev); 157 if (err) { 158 put_device(&wwandev->dev); 159 wwandev = NULL; 160 } 161 162 done_unlock: 163 mutex_unlock(&wwan_register_lock); 164 165 return wwandev; 166 } 167 168 static int is_wwan_child(struct device *dev, void *data) 169 { 170 return dev->class == wwan_class; 171 } 172 173 static void wwan_remove_dev(struct wwan_device *wwandev) 174 { 175 int ret; 176 177 /* Prevent concurrent picking from wwan_create_dev */ 178 mutex_lock(&wwan_register_lock); 179 180 /* WWAN device is created and registered (get+add) along with its first 181 * child port, and subsequent port registrations only grab a reference 182 * (get). The WWAN device must then be unregistered (del+put) along with 183 * its latest port, and reference simply dropped (put) otherwise. 184 */ 185 ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); 186 if (!ret) 187 device_unregister(&wwandev->dev); 188 else 189 put_device(&wwandev->dev); 190 191 mutex_unlock(&wwan_register_lock); 192 } 193 194 /* ------- WWAN port management ------- */ 195 196 static const struct { 197 const char * const name; /* Port type name */ 198 const char * const devsuf; /* Port devce name suffix */ 199 } wwan_port_types[WWAN_PORT_MAX + 1] = { 200 [WWAN_PORT_AT] = { 201 .name = "AT", 202 .devsuf = "at", 203 }, 204 [WWAN_PORT_MBIM] = { 205 .name = "MBIM", 206 .devsuf = "mbim", 207 }, 208 [WWAN_PORT_QMI] = { 209 .name = "QMI", 210 .devsuf = "qmi", 211 }, 212 [WWAN_PORT_QCDM] = { 213 .name = "QCDM", 214 .devsuf = "qcdm", 215 }, 216 [WWAN_PORT_FIREHOSE] = { 217 .name = "FIREHOSE", 218 .devsuf = "firehose", 219 }, 220 }; 221 222 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 223 char *buf) 224 { 225 struct wwan_port *port = to_wwan_port(dev); 226 227 return sprintf(buf, "%s\n", wwan_port_types[port->type].name); 228 } 229 static DEVICE_ATTR_RO(type); 230 231 static struct attribute *wwan_port_attrs[] = { 232 &dev_attr_type.attr, 233 NULL, 234 }; 235 ATTRIBUTE_GROUPS(wwan_port); 236 237 static void wwan_port_destroy(struct device *dev) 238 { 239 struct wwan_port *port = to_wwan_port(dev); 240 241 ida_free(&minors, MINOR(port->dev.devt)); 242 mutex_destroy(&port->data_lock); 243 mutex_destroy(&port->ops_lock); 244 kfree(port); 245 } 246 247 static const struct device_type wwan_port_dev_type = { 248 .name = "wwan_port", 249 .release = wwan_port_destroy, 250 .groups = wwan_port_groups, 251 }; 252 253 static int wwan_port_minor_match(struct device *dev, const void *minor) 254 { 255 return (dev->type == &wwan_port_dev_type && 256 MINOR(dev->devt) == *(unsigned int *)minor); 257 } 258 259 static struct wwan_port *wwan_port_get_by_minor(unsigned int minor) 260 { 261 struct device *dev; 262 263 dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match); 264 if (!dev) 265 return ERR_PTR(-ENODEV); 266 267 return to_wwan_port(dev); 268 } 269 270 /* Allocate and set unique name based on passed format 271 * 272 * Name allocation approach is highly inspired by the __dev_alloc_name() 273 * function. 274 * 275 * To avoid names collision, the caller must prevent the new port device 276 * registration as well as concurrent invocation of this function. 277 */ 278 static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) 279 { 280 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); 281 const unsigned int max_ports = PAGE_SIZE * 8; 282 struct class_dev_iter iter; 283 unsigned long *idmap; 284 struct device *dev; 285 char buf[0x20]; 286 int id; 287 288 idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL); 289 if (!idmap) 290 return -ENOMEM; 291 292 /* Collect ids of same name format ports */ 293 class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type); 294 while ((dev = class_dev_iter_next(&iter))) { 295 if (dev->parent != &wwandev->dev) 296 continue; 297 if (sscanf(dev_name(dev), fmt, &id) != 1) 298 continue; 299 if (id < 0 || id >= max_ports) 300 continue; 301 set_bit(id, idmap); 302 } 303 class_dev_iter_exit(&iter); 304 305 /* Allocate unique id */ 306 id = find_first_zero_bit(idmap, max_ports); 307 free_page((unsigned long)idmap); 308 309 snprintf(buf, sizeof(buf), fmt, id); /* Name generation */ 310 311 dev = device_find_child_by_name(&wwandev->dev, buf); 312 if (dev) { 313 put_device(dev); 314 return -ENFILE; 315 } 316 317 return dev_set_name(&port->dev, buf); 318 } 319 320 struct wwan_port *wwan_create_port(struct device *parent, 321 enum wwan_port_type type, 322 const struct wwan_port_ops *ops, 323 void *drvdata) 324 { 325 struct wwan_device *wwandev; 326 struct wwan_port *port; 327 int minor, err = -ENOMEM; 328 char namefmt[0x20]; 329 330 if (type > WWAN_PORT_MAX || !ops) 331 return ERR_PTR(-EINVAL); 332 333 /* A port is always a child of a WWAN device, retrieve (allocate or 334 * pick) the WWAN device based on the provided parent device. 335 */ 336 wwandev = wwan_create_dev(parent); 337 if (IS_ERR(wwandev)) 338 return ERR_CAST(wwandev); 339 340 /* A port is exposed as character device, get a minor */ 341 minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); 342 if (minor < 0) 343 goto error_wwandev_remove; 344 345 port = kzalloc(sizeof(*port), GFP_KERNEL); 346 if (!port) { 347 ida_free(&minors, minor); 348 goto error_wwandev_remove; 349 } 350 351 port->type = type; 352 port->ops = ops; 353 mutex_init(&port->ops_lock); 354 skb_queue_head_init(&port->rxq); 355 init_waitqueue_head(&port->waitqueue); 356 mutex_init(&port->data_lock); 357 358 port->dev.parent = &wwandev->dev; 359 port->dev.class = wwan_class; 360 port->dev.type = &wwan_port_dev_type; 361 port->dev.devt = MKDEV(wwan_major, minor); 362 dev_set_drvdata(&port->dev, drvdata); 363 364 /* allocate unique name based on wwan device id, port type and number */ 365 snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id, 366 wwan_port_types[port->type].devsuf); 367 368 /* Serialize ports registration */ 369 mutex_lock(&wwan_register_lock); 370 371 __wwan_port_dev_assign_name(port, namefmt); 372 err = device_register(&port->dev); 373 374 mutex_unlock(&wwan_register_lock); 375 376 if (err) 377 goto error_put_device; 378 379 return port; 380 381 error_put_device: 382 put_device(&port->dev); 383 error_wwandev_remove: 384 wwan_remove_dev(wwandev); 385 386 return ERR_PTR(err); 387 } 388 EXPORT_SYMBOL_GPL(wwan_create_port); 389 390 void wwan_remove_port(struct wwan_port *port) 391 { 392 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); 393 394 mutex_lock(&port->ops_lock); 395 if (port->start_count) 396 port->ops->stop(port); 397 port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */ 398 mutex_unlock(&port->ops_lock); 399 400 wake_up_interruptible(&port->waitqueue); 401 402 skb_queue_purge(&port->rxq); 403 dev_set_drvdata(&port->dev, NULL); 404 device_unregister(&port->dev); 405 406 /* Release related wwan device */ 407 wwan_remove_dev(wwandev); 408 } 409 EXPORT_SYMBOL_GPL(wwan_remove_port); 410 411 void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb) 412 { 413 skb_queue_tail(&port->rxq, skb); 414 wake_up_interruptible(&port->waitqueue); 415 } 416 EXPORT_SYMBOL_GPL(wwan_port_rx); 417 418 void wwan_port_txon(struct wwan_port *port) 419 { 420 clear_bit(WWAN_PORT_TX_OFF, &port->flags); 421 wake_up_interruptible(&port->waitqueue); 422 } 423 EXPORT_SYMBOL_GPL(wwan_port_txon); 424 425 void wwan_port_txoff(struct wwan_port *port) 426 { 427 set_bit(WWAN_PORT_TX_OFF, &port->flags); 428 } 429 EXPORT_SYMBOL_GPL(wwan_port_txoff); 430 431 void *wwan_port_get_drvdata(struct wwan_port *port) 432 { 433 return dev_get_drvdata(&port->dev); 434 } 435 EXPORT_SYMBOL_GPL(wwan_port_get_drvdata); 436 437 static int wwan_port_op_start(struct wwan_port *port) 438 { 439 int ret = 0; 440 441 mutex_lock(&port->ops_lock); 442 if (!port->ops) { /* Port got unplugged */ 443 ret = -ENODEV; 444 goto out_unlock; 445 } 446 447 /* If port is already started, don't start again */ 448 if (!port->start_count) 449 ret = port->ops->start(port); 450 451 if (!ret) 452 port->start_count++; 453 454 out_unlock: 455 mutex_unlock(&port->ops_lock); 456 457 return ret; 458 } 459 460 static void wwan_port_op_stop(struct wwan_port *port) 461 { 462 mutex_lock(&port->ops_lock); 463 port->start_count--; 464 if (!port->start_count) { 465 if (port->ops) 466 port->ops->stop(port); 467 skb_queue_purge(&port->rxq); 468 } 469 mutex_unlock(&port->ops_lock); 470 } 471 472 static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb) 473 { 474 int ret; 475 476 mutex_lock(&port->ops_lock); 477 if (!port->ops) { /* Port got unplugged */ 478 ret = -ENODEV; 479 goto out_unlock; 480 } 481 482 ret = port->ops->tx(port, skb); 483 484 out_unlock: 485 mutex_unlock(&port->ops_lock); 486 487 return ret; 488 } 489 490 static bool is_read_blocked(struct wwan_port *port) 491 { 492 return skb_queue_empty(&port->rxq) && port->ops; 493 } 494 495 static bool is_write_blocked(struct wwan_port *port) 496 { 497 return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops; 498 } 499 500 static int wwan_wait_rx(struct wwan_port *port, bool nonblock) 501 { 502 if (!is_read_blocked(port)) 503 return 0; 504 505 if (nonblock) 506 return -EAGAIN; 507 508 if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port))) 509 return -ERESTARTSYS; 510 511 return 0; 512 } 513 514 static int wwan_wait_tx(struct wwan_port *port, bool nonblock) 515 { 516 if (!is_write_blocked(port)) 517 return 0; 518 519 if (nonblock) 520 return -EAGAIN; 521 522 if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port))) 523 return -ERESTARTSYS; 524 525 return 0; 526 } 527 528 static int wwan_port_fops_open(struct inode *inode, struct file *file) 529 { 530 struct wwan_port *port; 531 int err = 0; 532 533 port = wwan_port_get_by_minor(iminor(inode)); 534 if (IS_ERR(port)) 535 return PTR_ERR(port); 536 537 file->private_data = port; 538 stream_open(inode, file); 539 540 err = wwan_port_op_start(port); 541 if (err) 542 put_device(&port->dev); 543 544 return err; 545 } 546 547 static int wwan_port_fops_release(struct inode *inode, struct file *filp) 548 { 549 struct wwan_port *port = filp->private_data; 550 551 wwan_port_op_stop(port); 552 put_device(&port->dev); 553 554 return 0; 555 } 556 557 static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf, 558 size_t count, loff_t *ppos) 559 { 560 struct wwan_port *port = filp->private_data; 561 struct sk_buff *skb; 562 size_t copied; 563 int ret; 564 565 ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK)); 566 if (ret) 567 return ret; 568 569 skb = skb_dequeue(&port->rxq); 570 if (!skb) 571 return -EIO; 572 573 copied = min_t(size_t, count, skb->len); 574 if (copy_to_user(buf, skb->data, copied)) { 575 kfree_skb(skb); 576 return -EFAULT; 577 } 578 skb_pull(skb, copied); 579 580 /* skb is not fully consumed, keep it in the queue */ 581 if (skb->len) 582 skb_queue_head(&port->rxq, skb); 583 else 584 consume_skb(skb); 585 586 return copied; 587 } 588 589 static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, 590 size_t count, loff_t *offp) 591 { 592 struct wwan_port *port = filp->private_data; 593 struct sk_buff *skb; 594 int ret; 595 596 ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); 597 if (ret) 598 return ret; 599 600 skb = alloc_skb(count, GFP_KERNEL); 601 if (!skb) 602 return -ENOMEM; 603 604 if (copy_from_user(skb_put(skb, count), buf, count)) { 605 kfree_skb(skb); 606 return -EFAULT; 607 } 608 609 ret = wwan_port_op_tx(port, skb); 610 if (ret) { 611 kfree_skb(skb); 612 return ret; 613 } 614 615 return count; 616 } 617 618 static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) 619 { 620 struct wwan_port *port = filp->private_data; 621 __poll_t mask = 0; 622 623 poll_wait(filp, &port->waitqueue, wait); 624 625 if (!is_write_blocked(port)) 626 mask |= EPOLLOUT | EPOLLWRNORM; 627 if (!is_read_blocked(port)) 628 mask |= EPOLLIN | EPOLLRDNORM; 629 if (!port->ops) 630 mask |= EPOLLHUP | EPOLLERR; 631 632 return mask; 633 } 634 635 /* Implements minimalistic stub terminal IOCTLs support */ 636 static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd, 637 unsigned long arg) 638 { 639 int ret = 0; 640 641 mutex_lock(&port->data_lock); 642 643 switch (cmd) { 644 case TCFLSH: 645 break; 646 647 case TCGETS: 648 if (copy_to_user((void __user *)arg, &port->at_data.termios, 649 sizeof(struct termios))) 650 ret = -EFAULT; 651 break; 652 653 case TCSETS: 654 case TCSETSW: 655 case TCSETSF: 656 if (copy_from_user(&port->at_data.termios, (void __user *)arg, 657 sizeof(struct termios))) 658 ret = -EFAULT; 659 break; 660 661 #ifdef TCGETS2 662 case TCGETS2: 663 if (copy_to_user((void __user *)arg, &port->at_data.termios, 664 sizeof(struct termios2))) 665 ret = -EFAULT; 666 break; 667 668 case TCSETS2: 669 case TCSETSW2: 670 case TCSETSF2: 671 if (copy_from_user(&port->at_data.termios, (void __user *)arg, 672 sizeof(struct termios2))) 673 ret = -EFAULT; 674 break; 675 #endif 676 677 case TIOCMGET: 678 ret = put_user(port->at_data.mdmbits, (int __user *)arg); 679 break; 680 681 case TIOCMSET: 682 case TIOCMBIC: 683 case TIOCMBIS: { 684 int mdmbits; 685 686 if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) { 687 ret = -EFAULT; 688 break; 689 } 690 if (cmd == TIOCMBIC) 691 port->at_data.mdmbits &= ~mdmbits; 692 else if (cmd == TIOCMBIS) 693 port->at_data.mdmbits |= mdmbits; 694 else 695 port->at_data.mdmbits = mdmbits; 696 break; 697 } 698 699 default: 700 ret = -ENOIOCTLCMD; 701 } 702 703 mutex_unlock(&port->data_lock); 704 705 return ret; 706 } 707 708 static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd, 709 unsigned long arg) 710 { 711 struct wwan_port *port = filp->private_data; 712 int res; 713 714 if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */ 715 res = wwan_port_fops_at_ioctl(port, cmd, arg); 716 if (res != -ENOIOCTLCMD) 717 return res; 718 } 719 720 switch (cmd) { 721 case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */ 722 unsigned long flags; 723 struct sk_buff *skb; 724 int amount = 0; 725 726 spin_lock_irqsave(&port->rxq.lock, flags); 727 skb_queue_walk(&port->rxq, skb) 728 amount += skb->len; 729 spin_unlock_irqrestore(&port->rxq.lock, flags); 730 731 return put_user(amount, (int __user *)arg); 732 } 733 734 default: 735 return -ENOIOCTLCMD; 736 } 737 } 738 739 static const struct file_operations wwan_port_fops = { 740 .owner = THIS_MODULE, 741 .open = wwan_port_fops_open, 742 .release = wwan_port_fops_release, 743 .read = wwan_port_fops_read, 744 .write = wwan_port_fops_write, 745 .poll = wwan_port_fops_poll, 746 .unlocked_ioctl = wwan_port_fops_ioctl, 747 #ifdef CONFIG_COMPAT 748 .compat_ioctl = compat_ptr_ioctl, 749 #endif 750 .llseek = noop_llseek, 751 }; 752 753 static int __init wwan_init(void) 754 { 755 wwan_class = class_create(THIS_MODULE, "wwan"); 756 if (IS_ERR(wwan_class)) 757 return PTR_ERR(wwan_class); 758 759 /* chrdev used for wwan ports */ 760 wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port", 761 &wwan_port_fops); 762 if (wwan_major < 0) { 763 class_destroy(wwan_class); 764 return wwan_major; 765 } 766 767 return 0; 768 } 769 770 static void __exit wwan_exit(void) 771 { 772 __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port"); 773 class_destroy(wwan_class); 774 } 775 776 module_init(wwan_init); 777 module_exit(wwan_exit); 778 779 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); 780 MODULE_DESCRIPTION("WWAN core"); 781 MODULE_LICENSE("GPL v2"); 782