1 /* 2 * The Serio abstraction module 3 * 4 * Copyright (c) 1999-2004 Vojtech Pavlik 5 * Copyright (c) 2004 Dmitry Torokhov 6 * Copyright (c) 2003 Daniele Bellucci 7 */ 8 9 /* 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * Should you need to contact me, the author, you can do so either by 25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: 26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic 27 */ 28 29 #include <linux/stddef.h> 30 #include <linux/module.h> 31 #include <linux/serio.h> 32 #include <linux/errno.h> 33 #include <linux/wait.h> 34 #include <linux/sched.h> 35 #include <linux/slab.h> 36 #include <linux/kthread.h> 37 #include <linux/mutex.h> 38 #include <linux/freezer.h> 39 40 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 41 MODULE_DESCRIPTION("Serio abstraction core"); 42 MODULE_LICENSE("GPL"); 43 44 EXPORT_SYMBOL(serio_interrupt); 45 EXPORT_SYMBOL(__serio_register_port); 46 EXPORT_SYMBOL(serio_unregister_port); 47 EXPORT_SYMBOL(serio_unregister_child_port); 48 EXPORT_SYMBOL(__serio_register_driver); 49 EXPORT_SYMBOL(serio_unregister_driver); 50 EXPORT_SYMBOL(serio_open); 51 EXPORT_SYMBOL(serio_close); 52 EXPORT_SYMBOL(serio_rescan); 53 EXPORT_SYMBOL(serio_reconnect); 54 55 /* 56 * serio_mutex protects entire serio subsystem and is taken every time 57 * serio port or driver registrered or unregistered. 58 */ 59 static DEFINE_MUTEX(serio_mutex); 60 61 static LIST_HEAD(serio_list); 62 63 static struct bus_type serio_bus; 64 65 static void serio_add_port(struct serio *serio); 66 static void serio_reconnect_port(struct serio *serio); 67 static void serio_disconnect_port(struct serio *serio); 68 static void serio_attach_driver(struct serio_driver *drv); 69 70 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) 71 { 72 int retval; 73 74 mutex_lock(&serio->drv_mutex); 75 retval = drv->connect(serio, drv); 76 mutex_unlock(&serio->drv_mutex); 77 78 return retval; 79 } 80 81 static int serio_reconnect_driver(struct serio *serio) 82 { 83 int retval = -1; 84 85 mutex_lock(&serio->drv_mutex); 86 if (serio->drv && serio->drv->reconnect) 87 retval = serio->drv->reconnect(serio); 88 mutex_unlock(&serio->drv_mutex); 89 90 return retval; 91 } 92 93 static void serio_disconnect_driver(struct serio *serio) 94 { 95 mutex_lock(&serio->drv_mutex); 96 if (serio->drv) 97 serio->drv->disconnect(serio); 98 mutex_unlock(&serio->drv_mutex); 99 } 100 101 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) 102 { 103 while (ids->type || ids->proto) { 104 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) && 105 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) && 106 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) && 107 (ids->id == SERIO_ANY || ids->id == serio->id.id)) 108 return 1; 109 ids++; 110 } 111 return 0; 112 } 113 114 /* 115 * Basic serio -> driver core mappings 116 */ 117 118 static void serio_bind_driver(struct serio *serio, struct serio_driver *drv) 119 { 120 int error; 121 122 down_write(&serio_bus.subsys.rwsem); 123 124 if (serio_match_port(drv->id_table, serio)) { 125 serio->dev.driver = &drv->driver; 126 if (serio_connect_driver(serio, drv)) { 127 serio->dev.driver = NULL; 128 goto out; 129 } 130 error = device_bind_driver(&serio->dev); 131 if (error) { 132 printk(KERN_WARNING 133 "serio: device_bind_driver() failed " 134 "for %s (%s) and %s, error: %d\n", 135 serio->phys, serio->name, 136 drv->description, error); 137 serio_disconnect_driver(serio); 138 serio->dev.driver = NULL; 139 goto out; 140 } 141 } 142 out: 143 up_write(&serio_bus.subsys.rwsem); 144 } 145 146 static void serio_release_driver(struct serio *serio) 147 { 148 down_write(&serio_bus.subsys.rwsem); 149 device_release_driver(&serio->dev); 150 up_write(&serio_bus.subsys.rwsem); 151 } 152 153 static void serio_find_driver(struct serio *serio) 154 { 155 int error; 156 157 down_write(&serio_bus.subsys.rwsem); 158 error = device_attach(&serio->dev); 159 if (error < 0) 160 printk(KERN_WARNING 161 "serio: device_attach() failed for %s (%s), error: %d\n", 162 serio->phys, serio->name, error); 163 up_write(&serio_bus.subsys.rwsem); 164 } 165 166 167 /* 168 * Serio event processing. 169 */ 170 171 enum serio_event_type { 172 SERIO_RESCAN_PORT, 173 SERIO_RECONNECT_PORT, 174 SERIO_REGISTER_PORT, 175 SERIO_ATTACH_DRIVER, 176 }; 177 178 struct serio_event { 179 enum serio_event_type type; 180 void *object; 181 struct module *owner; 182 struct list_head node; 183 }; 184 185 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ 186 static LIST_HEAD(serio_event_list); 187 static DECLARE_WAIT_QUEUE_HEAD(serio_wait); 188 static struct task_struct *serio_task; 189 190 static int serio_queue_event(void *object, struct module *owner, 191 enum serio_event_type event_type) 192 { 193 unsigned long flags; 194 struct serio_event *event; 195 int retval = 0; 196 197 spin_lock_irqsave(&serio_event_lock, flags); 198 199 /* 200 * Scan event list for the other events for the same serio port, 201 * starting with the most recent one. If event is the same we 202 * do not need add new one. If event is of different type we 203 * need to add this event and should not look further because 204 * we need to preseve sequence of distinct events. 205 */ 206 list_for_each_entry_reverse(event, &serio_event_list, node) { 207 if (event->object == object) { 208 if (event->type == event_type) 209 goto out; 210 break; 211 } 212 } 213 214 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); 215 if (!event) { 216 printk(KERN_ERR 217 "serio: Not enough memory to queue event %d\n", 218 event_type); 219 retval = -ENOMEM; 220 goto out; 221 } 222 223 if (!try_module_get(owner)) { 224 printk(KERN_WARNING 225 "serio: Can't get module reference, dropping event %d\n", 226 event_type); 227 kfree(event); 228 retval = -EINVAL; 229 goto out; 230 } 231 232 event->type = event_type; 233 event->object = object; 234 event->owner = owner; 235 236 list_add_tail(&event->node, &serio_event_list); 237 wake_up(&serio_wait); 238 239 out: 240 spin_unlock_irqrestore(&serio_event_lock, flags); 241 return retval; 242 } 243 244 static void serio_free_event(struct serio_event *event) 245 { 246 module_put(event->owner); 247 kfree(event); 248 } 249 250 static void serio_remove_duplicate_events(struct serio_event *event) 251 { 252 struct list_head *node, *next; 253 struct serio_event *e; 254 unsigned long flags; 255 256 spin_lock_irqsave(&serio_event_lock, flags); 257 258 list_for_each_safe(node, next, &serio_event_list) { 259 e = list_entry(node, struct serio_event, node); 260 if (event->object == e->object) { 261 /* 262 * If this event is of different type we should not 263 * look further - we only suppress duplicate events 264 * that were sent back-to-back. 265 */ 266 if (event->type != e->type) 267 break; 268 269 list_del_init(node); 270 serio_free_event(e); 271 } 272 } 273 274 spin_unlock_irqrestore(&serio_event_lock, flags); 275 } 276 277 278 static struct serio_event *serio_get_event(void) 279 { 280 struct serio_event *event; 281 struct list_head *node; 282 unsigned long flags; 283 284 spin_lock_irqsave(&serio_event_lock, flags); 285 286 if (list_empty(&serio_event_list)) { 287 spin_unlock_irqrestore(&serio_event_lock, flags); 288 return NULL; 289 } 290 291 node = serio_event_list.next; 292 event = list_entry(node, struct serio_event, node); 293 list_del_init(node); 294 295 spin_unlock_irqrestore(&serio_event_lock, flags); 296 297 return event; 298 } 299 300 static void serio_handle_event(void) 301 { 302 struct serio_event *event; 303 304 mutex_lock(&serio_mutex); 305 306 /* 307 * Note that we handle only one event here to give swsusp 308 * a chance to freeze kseriod thread. Serio events should 309 * be pretty rare so we are not concerned about taking 310 * performance hit. 311 */ 312 if ((event = serio_get_event())) { 313 314 switch (event->type) { 315 case SERIO_REGISTER_PORT: 316 serio_add_port(event->object); 317 break; 318 319 case SERIO_RECONNECT_PORT: 320 serio_reconnect_port(event->object); 321 break; 322 323 case SERIO_RESCAN_PORT: 324 serio_disconnect_port(event->object); 325 serio_find_driver(event->object); 326 break; 327 328 case SERIO_ATTACH_DRIVER: 329 serio_attach_driver(event->object); 330 break; 331 332 default: 333 break; 334 } 335 336 serio_remove_duplicate_events(event); 337 serio_free_event(event); 338 } 339 340 mutex_unlock(&serio_mutex); 341 } 342 343 /* 344 * Remove all events that have been submitted for a given serio port. 345 */ 346 static void serio_remove_pending_events(struct serio *serio) 347 { 348 struct list_head *node, *next; 349 struct serio_event *event; 350 unsigned long flags; 351 352 spin_lock_irqsave(&serio_event_lock, flags); 353 354 list_for_each_safe(node, next, &serio_event_list) { 355 event = list_entry(node, struct serio_event, node); 356 if (event->object == serio) { 357 list_del_init(node); 358 serio_free_event(event); 359 } 360 } 361 362 spin_unlock_irqrestore(&serio_event_lock, flags); 363 } 364 365 /* 366 * Destroy child serio port (if any) that has not been fully registered yet. 367 * 368 * Note that we rely on the fact that port can have only one child and therefore 369 * only one child registration request can be pending. Additionally, children 370 * are registered by driver's connect() handler so there can't be a grandchild 371 * pending registration together with a child. 372 */ 373 static struct serio *serio_get_pending_child(struct serio *parent) 374 { 375 struct serio_event *event; 376 struct serio *serio, *child = NULL; 377 unsigned long flags; 378 379 spin_lock_irqsave(&serio_event_lock, flags); 380 381 list_for_each_entry(event, &serio_event_list, node) { 382 if (event->type == SERIO_REGISTER_PORT) { 383 serio = event->object; 384 if (serio->parent == parent) { 385 child = serio; 386 break; 387 } 388 } 389 } 390 391 spin_unlock_irqrestore(&serio_event_lock, flags); 392 return child; 393 } 394 395 static int serio_thread(void *nothing) 396 { 397 do { 398 serio_handle_event(); 399 wait_event_interruptible(serio_wait, 400 kthread_should_stop() || !list_empty(&serio_event_list)); 401 try_to_freeze(); 402 } while (!kthread_should_stop()); 403 404 printk(KERN_DEBUG "serio: kseriod exiting\n"); 405 return 0; 406 } 407 408 409 /* 410 * Serio port operations 411 */ 412 413 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf) 414 { 415 struct serio *serio = to_serio_port(dev); 416 return sprintf(buf, "%s\n", serio->name); 417 } 418 419 static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) 420 { 421 struct serio *serio = to_serio_port(dev); 422 423 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n", 424 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 425 } 426 427 static ssize_t serio_show_id_type(struct device *dev, struct device_attribute *attr, char *buf) 428 { 429 struct serio *serio = to_serio_port(dev); 430 return sprintf(buf, "%02x\n", serio->id.type); 431 } 432 433 static ssize_t serio_show_id_proto(struct device *dev, struct device_attribute *attr, char *buf) 434 { 435 struct serio *serio = to_serio_port(dev); 436 return sprintf(buf, "%02x\n", serio->id.proto); 437 } 438 439 static ssize_t serio_show_id_id(struct device *dev, struct device_attribute *attr, char *buf) 440 { 441 struct serio *serio = to_serio_port(dev); 442 return sprintf(buf, "%02x\n", serio->id.id); 443 } 444 445 static ssize_t serio_show_id_extra(struct device *dev, struct device_attribute *attr, char *buf) 446 { 447 struct serio *serio = to_serio_port(dev); 448 return sprintf(buf, "%02x\n", serio->id.extra); 449 } 450 451 static DEVICE_ATTR(type, S_IRUGO, serio_show_id_type, NULL); 452 static DEVICE_ATTR(proto, S_IRUGO, serio_show_id_proto, NULL); 453 static DEVICE_ATTR(id, S_IRUGO, serio_show_id_id, NULL); 454 static DEVICE_ATTR(extra, S_IRUGO, serio_show_id_extra, NULL); 455 456 static struct attribute *serio_device_id_attrs[] = { 457 &dev_attr_type.attr, 458 &dev_attr_proto.attr, 459 &dev_attr_id.attr, 460 &dev_attr_extra.attr, 461 NULL 462 }; 463 464 static struct attribute_group serio_id_attr_group = { 465 .name = "id", 466 .attrs = serio_device_id_attrs, 467 }; 468 469 static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 470 { 471 struct serio *serio = to_serio_port(dev); 472 struct device_driver *drv; 473 int retval; 474 475 retval = mutex_lock_interruptible(&serio_mutex); 476 if (retval) 477 return retval; 478 479 retval = count; 480 if (!strncmp(buf, "none", count)) { 481 serio_disconnect_port(serio); 482 } else if (!strncmp(buf, "reconnect", count)) { 483 serio_reconnect_port(serio); 484 } else if (!strncmp(buf, "rescan", count)) { 485 serio_disconnect_port(serio); 486 serio_find_driver(serio); 487 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 488 serio_disconnect_port(serio); 489 serio_bind_driver(serio, to_serio_driver(drv)); 490 put_driver(drv); 491 } else { 492 retval = -EINVAL; 493 } 494 495 mutex_unlock(&serio_mutex); 496 497 return retval; 498 } 499 500 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) 501 { 502 struct serio *serio = to_serio_port(dev); 503 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto"); 504 } 505 506 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 507 { 508 struct serio *serio = to_serio_port(dev); 509 int retval; 510 511 retval = count; 512 if (!strncmp(buf, "manual", count)) { 513 serio->manual_bind = 1; 514 } else if (!strncmp(buf, "auto", count)) { 515 serio->manual_bind = 0; 516 } else { 517 retval = -EINVAL; 518 } 519 520 return retval; 521 } 522 523 static struct device_attribute serio_device_attrs[] = { 524 __ATTR(description, S_IRUGO, serio_show_description, NULL), 525 __ATTR(modalias, S_IRUGO, serio_show_modalias, NULL), 526 __ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver), 527 __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode), 528 __ATTR_NULL 529 }; 530 531 532 static void serio_release_port(struct device *dev) 533 { 534 struct serio *serio = to_serio_port(dev); 535 536 kfree(serio); 537 module_put(THIS_MODULE); 538 } 539 540 /* 541 * Prepare serio port for registration. 542 */ 543 static void serio_init_port(struct serio *serio) 544 { 545 static atomic_t serio_no = ATOMIC_INIT(0); 546 547 __module_get(THIS_MODULE); 548 549 INIT_LIST_HEAD(&serio->node); 550 spin_lock_init(&serio->lock); 551 mutex_init(&serio->drv_mutex); 552 device_initialize(&serio->dev); 553 snprintf(serio->dev.bus_id, sizeof(serio->dev.bus_id), 554 "serio%ld", (long)atomic_inc_return(&serio_no) - 1); 555 serio->dev.bus = &serio_bus; 556 serio->dev.release = serio_release_port; 557 if (serio->parent) { 558 serio->dev.parent = &serio->parent->dev; 559 serio->depth = serio->parent->depth + 1; 560 } else 561 serio->depth = 0; 562 lockdep_set_subclass(&serio->lock, serio->depth); 563 } 564 565 /* 566 * Complete serio port registration. 567 * Driver core will attempt to find appropriate driver for the port. 568 */ 569 static void serio_add_port(struct serio *serio) 570 { 571 int error; 572 573 if (serio->parent) { 574 serio_pause_rx(serio->parent); 575 serio->parent->child = serio; 576 serio_continue_rx(serio->parent); 577 } 578 579 list_add_tail(&serio->node, &serio_list); 580 if (serio->start) 581 serio->start(serio); 582 error = device_add(&serio->dev); 583 if (error) 584 printk(KERN_ERR 585 "serio: device_add() failed for %s (%s), error: %d\n", 586 serio->phys, serio->name, error); 587 else { 588 serio->registered = 1; 589 error = sysfs_create_group(&serio->dev.kobj, &serio_id_attr_group); 590 if (error) 591 printk(KERN_ERR 592 "serio: sysfs_create_group() failed for %s (%s), error: %d\n", 593 serio->phys, serio->name, error); 594 } 595 } 596 597 /* 598 * serio_destroy_port() completes deregistration process and removes 599 * port from the system 600 */ 601 static void serio_destroy_port(struct serio *serio) 602 { 603 struct serio *child; 604 605 child = serio_get_pending_child(serio); 606 if (child) { 607 serio_remove_pending_events(child); 608 put_device(&child->dev); 609 } 610 611 if (serio->stop) 612 serio->stop(serio); 613 614 if (serio->parent) { 615 serio_pause_rx(serio->parent); 616 serio->parent->child = NULL; 617 serio_continue_rx(serio->parent); 618 serio->parent = NULL; 619 } 620 621 if (serio->registered) { 622 sysfs_remove_group(&serio->dev.kobj, &serio_id_attr_group); 623 device_del(&serio->dev); 624 serio->registered = 0; 625 } 626 627 list_del_init(&serio->node); 628 serio_remove_pending_events(serio); 629 put_device(&serio->dev); 630 } 631 632 /* 633 * Reconnect serio port and all its children (re-initialize attached devices) 634 */ 635 static void serio_reconnect_port(struct serio *serio) 636 { 637 do { 638 if (serio_reconnect_driver(serio)) { 639 serio_disconnect_port(serio); 640 serio_find_driver(serio); 641 /* Ok, old children are now gone, we are done */ 642 break; 643 } 644 serio = serio->child; 645 } while (serio); 646 } 647 648 /* 649 * serio_disconnect_port() unbinds a port from its driver. As a side effect 650 * all child ports are unbound and destroyed. 651 */ 652 static void serio_disconnect_port(struct serio *serio) 653 { 654 struct serio *s, *parent; 655 656 if (serio->child) { 657 /* 658 * Children ports should be disconnected and destroyed 659 * first, staring with the leaf one, since we don't want 660 * to do recursion 661 */ 662 for (s = serio; s->child; s = s->child) 663 /* empty */; 664 665 do { 666 parent = s->parent; 667 668 serio_release_driver(s); 669 serio_destroy_port(s); 670 } while ((s = parent) != serio); 671 } 672 673 /* 674 * Ok, no children left, now disconnect this port 675 */ 676 serio_release_driver(serio); 677 } 678 679 void serio_rescan(struct serio *serio) 680 { 681 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT); 682 } 683 684 void serio_reconnect(struct serio *serio) 685 { 686 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT); 687 } 688 689 /* 690 * Submits register request to kseriod for subsequent execution. 691 * Note that port registration is always asynchronous. 692 */ 693 void __serio_register_port(struct serio *serio, struct module *owner) 694 { 695 serio_init_port(serio); 696 serio_queue_event(serio, owner, SERIO_REGISTER_PORT); 697 } 698 699 /* 700 * Synchronously unregisters serio port. 701 */ 702 void serio_unregister_port(struct serio *serio) 703 { 704 mutex_lock(&serio_mutex); 705 serio_disconnect_port(serio); 706 serio_destroy_port(serio); 707 mutex_unlock(&serio_mutex); 708 } 709 710 /* 711 * Safely unregisters child port if one is present. 712 */ 713 void serio_unregister_child_port(struct serio *serio) 714 { 715 mutex_lock(&serio_mutex); 716 if (serio->child) { 717 serio_disconnect_port(serio->child); 718 serio_destroy_port(serio->child); 719 } 720 mutex_unlock(&serio_mutex); 721 } 722 723 724 /* 725 * Serio driver operations 726 */ 727 728 static ssize_t serio_driver_show_description(struct device_driver *drv, char *buf) 729 { 730 struct serio_driver *driver = to_serio_driver(drv); 731 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)"); 732 } 733 734 static ssize_t serio_driver_show_bind_mode(struct device_driver *drv, char *buf) 735 { 736 struct serio_driver *serio_drv = to_serio_driver(drv); 737 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto"); 738 } 739 740 static ssize_t serio_driver_set_bind_mode(struct device_driver *drv, const char *buf, size_t count) 741 { 742 struct serio_driver *serio_drv = to_serio_driver(drv); 743 int retval; 744 745 retval = count; 746 if (!strncmp(buf, "manual", count)) { 747 serio_drv->manual_bind = 1; 748 } else if (!strncmp(buf, "auto", count)) { 749 serio_drv->manual_bind = 0; 750 } else { 751 retval = -EINVAL; 752 } 753 754 return retval; 755 } 756 757 758 static struct driver_attribute serio_driver_attrs[] = { 759 __ATTR(description, S_IRUGO, serio_driver_show_description, NULL), 760 __ATTR(bind_mode, S_IWUSR | S_IRUGO, 761 serio_driver_show_bind_mode, serio_driver_set_bind_mode), 762 __ATTR_NULL 763 }; 764 765 static int serio_driver_probe(struct device *dev) 766 { 767 struct serio *serio = to_serio_port(dev); 768 struct serio_driver *drv = to_serio_driver(dev->driver); 769 770 return serio_connect_driver(serio, drv); 771 } 772 773 static int serio_driver_remove(struct device *dev) 774 { 775 struct serio *serio = to_serio_port(dev); 776 777 serio_disconnect_driver(serio); 778 return 0; 779 } 780 781 static void serio_attach_driver(struct serio_driver *drv) 782 { 783 int error; 784 785 error = driver_attach(&drv->driver); 786 if (error) 787 printk(KERN_WARNING 788 "serio: driver_attach() failed for %s with error %d\n", 789 drv->driver.name, error); 790 } 791 792 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name) 793 { 794 int manual_bind = drv->manual_bind; 795 int error; 796 797 drv->driver.bus = &serio_bus; 798 drv->driver.owner = owner; 799 drv->driver.mod_name = mod_name; 800 801 /* 802 * Temporarily disable automatic binding because probing 803 * takes long time and we are better off doing it in kseriod 804 */ 805 drv->manual_bind = 1; 806 807 error = driver_register(&drv->driver); 808 if (error) { 809 printk(KERN_ERR 810 "serio: driver_register() failed for %s, error: %d\n", 811 drv->driver.name, error); 812 return error; 813 } 814 815 /* 816 * Restore original bind mode and let kseriod bind the 817 * driver to free ports 818 */ 819 if (!manual_bind) { 820 drv->manual_bind = 0; 821 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER); 822 if (error) { 823 driver_unregister(&drv->driver); 824 return error; 825 } 826 } 827 828 return 0; 829 } 830 831 void serio_unregister_driver(struct serio_driver *drv) 832 { 833 struct serio *serio; 834 835 mutex_lock(&serio_mutex); 836 drv->manual_bind = 1; /* so serio_find_driver ignores it */ 837 838 start_over: 839 list_for_each_entry(serio, &serio_list, node) { 840 if (serio->drv == drv) { 841 serio_disconnect_port(serio); 842 serio_find_driver(serio); 843 /* we could've deleted some ports, restart */ 844 goto start_over; 845 } 846 } 847 848 driver_unregister(&drv->driver); 849 mutex_unlock(&serio_mutex); 850 } 851 852 static void serio_set_drv(struct serio *serio, struct serio_driver *drv) 853 { 854 serio_pause_rx(serio); 855 serio->drv = drv; 856 serio_continue_rx(serio); 857 } 858 859 static int serio_bus_match(struct device *dev, struct device_driver *drv) 860 { 861 struct serio *serio = to_serio_port(dev); 862 struct serio_driver *serio_drv = to_serio_driver(drv); 863 864 if (serio->manual_bind || serio_drv->manual_bind) 865 return 0; 866 867 return serio_match_port(serio_drv->id_table, serio); 868 } 869 870 #ifdef CONFIG_HOTPLUG 871 872 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \ 873 do { \ 874 int err = add_uevent_var(envp, num_envp, &i, \ 875 buffer, buffer_size, &len, \ 876 fmt, val); \ 877 if (err) \ 878 return err; \ 879 } while (0) 880 881 static int serio_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) 882 { 883 struct serio *serio; 884 int i = 0; 885 int len = 0; 886 887 if (!dev) 888 return -ENODEV; 889 890 serio = to_serio_port(dev); 891 892 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type); 893 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); 894 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); 895 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 896 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 897 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 898 envp[i] = NULL; 899 900 return 0; 901 } 902 #undef SERIO_ADD_UEVENT_VAR 903 904 #else 905 906 static int serio_uevent(struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) 907 { 908 return -ENODEV; 909 } 910 911 #endif /* CONFIG_HOTPLUG */ 912 913 static int serio_resume(struct device *dev) 914 { 915 struct serio *serio = to_serio_port(dev); 916 917 if (serio_reconnect_driver(serio)) { 918 /* 919 * Driver re-probing can take a while, so better let kseriod 920 * deal with it. 921 */ 922 serio_rescan(serio); 923 } 924 925 return 0; 926 } 927 928 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 929 int serio_open(struct serio *serio, struct serio_driver *drv) 930 { 931 serio_set_drv(serio, drv); 932 933 if (serio->open && serio->open(serio)) { 934 serio_set_drv(serio, NULL); 935 return -1; 936 } 937 return 0; 938 } 939 940 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 941 void serio_close(struct serio *serio) 942 { 943 if (serio->close) 944 serio->close(serio); 945 946 serio_set_drv(serio, NULL); 947 } 948 949 irqreturn_t serio_interrupt(struct serio *serio, 950 unsigned char data, unsigned int dfl) 951 { 952 unsigned long flags; 953 irqreturn_t ret = IRQ_NONE; 954 955 spin_lock_irqsave(&serio->lock, flags); 956 957 if (likely(serio->drv)) { 958 ret = serio->drv->interrupt(serio, data, dfl); 959 } else if (!dfl && serio->registered) { 960 serio_rescan(serio); 961 ret = IRQ_HANDLED; 962 } 963 964 spin_unlock_irqrestore(&serio->lock, flags); 965 966 return ret; 967 } 968 969 static struct bus_type serio_bus = { 970 .name = "serio", 971 .dev_attrs = serio_device_attrs, 972 .drv_attrs = serio_driver_attrs, 973 .match = serio_bus_match, 974 .uevent = serio_uevent, 975 .probe = serio_driver_probe, 976 .remove = serio_driver_remove, 977 .resume = serio_resume, 978 }; 979 980 static int __init serio_init(void) 981 { 982 int error; 983 984 error = bus_register(&serio_bus); 985 if (error) { 986 printk(KERN_ERR "serio: failed to register serio bus, error: %d\n", error); 987 return error; 988 } 989 990 serio_task = kthread_run(serio_thread, NULL, "kseriod"); 991 if (IS_ERR(serio_task)) { 992 bus_unregister(&serio_bus); 993 error = PTR_ERR(serio_task); 994 printk(KERN_ERR "serio: Failed to start kseriod, error: %d\n", error); 995 return error; 996 } 997 998 return 0; 999 } 1000 1001 static void __exit serio_exit(void) 1002 { 1003 bus_unregister(&serio_bus); 1004 kthread_stop(serio_task); 1005 } 1006 1007 subsys_initcall(serio_init); 1008 module_exit(serio_exit); 1009