1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * The Serio abstraction module 4 * 5 * Copyright (c) 1999-2004 Vojtech Pavlik 6 * Copyright (c) 2004 Dmitry Torokhov 7 * Copyright (c) 2003 Daniele Bellucci 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/stddef.h> 13 #include <linux/module.h> 14 #include <linux/serio.h> 15 #include <linux/errno.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/workqueue.h> 19 #include <linux/mutex.h> 20 21 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 22 MODULE_DESCRIPTION("Serio abstraction core"); 23 MODULE_LICENSE("GPL"); 24 25 /* 26 * serio_mutex protects entire serio subsystem and is taken every time 27 * serio port or driver registered or unregistered. 28 */ 29 static DEFINE_MUTEX(serio_mutex); 30 31 static LIST_HEAD(serio_list); 32 33 static void serio_add_port(struct serio *serio); 34 static int serio_reconnect_port(struct serio *serio); 35 static void serio_disconnect_port(struct serio *serio); 36 static void serio_reconnect_subtree(struct serio *serio); 37 static void serio_attach_driver(struct serio_driver *drv); 38 39 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) 40 { 41 guard(mutex)(&serio->drv_mutex); 42 43 return drv->connect(serio, drv); 44 } 45 46 static int serio_reconnect_driver(struct serio *serio) 47 { 48 guard(mutex)(&serio->drv_mutex); 49 50 if (serio->drv && serio->drv->reconnect) 51 return serio->drv->reconnect(serio); 52 53 return -1; 54 } 55 56 static void serio_disconnect_driver(struct serio *serio) 57 { 58 guard(mutex)(&serio->drv_mutex); 59 60 if (serio->drv) 61 serio->drv->disconnect(serio); 62 } 63 64 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) 65 { 66 while (ids->type || ids->proto) { 67 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) && 68 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) && 69 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) && 70 (ids->id == SERIO_ANY || ids->id == serio->id.id)) 71 return 1; 72 ids++; 73 } 74 return 0; 75 } 76 77 /* 78 * Basic serio -> driver core mappings 79 */ 80 81 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv) 82 { 83 int error; 84 85 if (serio_match_port(drv->id_table, serio)) { 86 87 serio->dev.driver = &drv->driver; 88 if (serio_connect_driver(serio, drv)) { 89 serio->dev.driver = NULL; 90 return -ENODEV; 91 } 92 93 error = device_bind_driver(&serio->dev); 94 if (error) { 95 dev_warn(&serio->dev, 96 "device_bind_driver() failed for %s (%s) and %s, error: %d\n", 97 serio->phys, serio->name, 98 drv->description, error); 99 serio_disconnect_driver(serio); 100 serio->dev.driver = NULL; 101 return error; 102 } 103 } 104 return 0; 105 } 106 107 static void serio_find_driver(struct serio *serio) 108 { 109 int error; 110 111 error = device_attach(&serio->dev); 112 if (error < 0 && error != -EPROBE_DEFER) 113 dev_warn(&serio->dev, 114 "device_attach() failed for %s (%s), error: %d\n", 115 serio->phys, serio->name, error); 116 } 117 118 119 /* 120 * Serio event processing. 121 */ 122 123 enum serio_event_type { 124 SERIO_RESCAN_PORT, 125 SERIO_RECONNECT_PORT, 126 SERIO_RECONNECT_SUBTREE, 127 SERIO_REGISTER_PORT, 128 SERIO_ATTACH_DRIVER, 129 }; 130 131 struct serio_event { 132 enum serio_event_type type; 133 void *object; 134 struct module *owner; 135 struct list_head node; 136 }; 137 138 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ 139 static LIST_HEAD(serio_event_list); 140 141 static struct serio_event *serio_get_event(void) 142 { 143 struct serio_event *event = NULL; 144 145 guard(spinlock_irqsave)(&serio_event_lock); 146 147 if (!list_empty(&serio_event_list)) { 148 event = list_first_entry(&serio_event_list, 149 struct serio_event, node); 150 list_del_init(&event->node); 151 } 152 153 return event; 154 } 155 156 static void serio_free_event(struct serio_event *event) 157 { 158 module_put(event->owner); 159 kfree(event); 160 } 161 162 static void serio_remove_duplicate_events(void *object, 163 enum serio_event_type type) 164 { 165 struct serio_event *e, *next; 166 167 guard(spinlock_irqsave)(&serio_event_lock); 168 169 list_for_each_entry_safe(e, next, &serio_event_list, node) { 170 if (object == e->object) { 171 /* 172 * If this event is of different type we should not 173 * look further - we only suppress duplicate events 174 * that were sent back-to-back. 175 */ 176 if (type != e->type) 177 break; 178 179 list_del_init(&e->node); 180 serio_free_event(e); 181 } 182 } 183 } 184 185 static void serio_handle_event(struct work_struct *work) 186 { 187 struct serio_event *event; 188 189 guard(mutex)(&serio_mutex); 190 191 while ((event = serio_get_event())) { 192 193 switch (event->type) { 194 195 case SERIO_REGISTER_PORT: 196 serio_add_port(event->object); 197 break; 198 199 case SERIO_RECONNECT_PORT: 200 serio_reconnect_port(event->object); 201 break; 202 203 case SERIO_RESCAN_PORT: 204 serio_disconnect_port(event->object); 205 serio_find_driver(event->object); 206 break; 207 208 case SERIO_RECONNECT_SUBTREE: 209 serio_reconnect_subtree(event->object); 210 break; 211 212 case SERIO_ATTACH_DRIVER: 213 serio_attach_driver(event->object); 214 break; 215 } 216 217 serio_remove_duplicate_events(event->object, event->type); 218 serio_free_event(event); 219 } 220 } 221 222 static DECLARE_WORK(serio_event_work, serio_handle_event); 223 224 static int serio_queue_event(void *object, struct module *owner, 225 enum serio_event_type event_type) 226 { 227 struct serio_event *event; 228 229 guard(spinlock_irqsave)(&serio_event_lock); 230 231 /* 232 * Scan event list for the other events for the same serio port, 233 * starting with the most recent one. If event is the same we 234 * do not need add new one. If event is of different type we 235 * need to add this event and should not look further because 236 * we need to preseve sequence of distinct events. 237 */ 238 list_for_each_entry_reverse(event, &serio_event_list, node) { 239 if (event->object == object) { 240 if (event->type == event_type) 241 return 0; 242 break; 243 } 244 } 245 246 event = kmalloc(sizeof(*event), GFP_ATOMIC); 247 if (!event) { 248 pr_err("Not enough memory to queue event %d\n", event_type); 249 return -ENOMEM; 250 } 251 252 if (!try_module_get(owner)) { 253 pr_warn("Can't get module reference, dropping event %d\n", 254 event_type); 255 kfree(event); 256 return -EINVAL; 257 } 258 259 event->type = event_type; 260 event->object = object; 261 event->owner = owner; 262 263 list_add_tail(&event->node, &serio_event_list); 264 queue_work(system_long_wq, &serio_event_work); 265 266 return 0; 267 } 268 269 /* 270 * Remove all events that have been submitted for a given 271 * object, be it serio port or driver. 272 */ 273 static void serio_remove_pending_events(void *object) 274 { 275 struct serio_event *event, *next; 276 277 guard(spinlock_irqsave)(&serio_event_lock); 278 279 list_for_each_entry_safe(event, next, &serio_event_list, node) { 280 if (event->object == object) { 281 list_del_init(&event->node); 282 serio_free_event(event); 283 } 284 } 285 } 286 287 /* 288 * Locate child serio port (if any) that has not been fully registered yet. 289 * 290 * Children are registered by driver's connect() handler so there can't be a 291 * grandchild pending registration together with a child. 292 */ 293 static struct serio *serio_get_pending_child(struct serio *parent) 294 { 295 struct serio_event *event; 296 struct serio *serio; 297 298 guard(spinlock_irqsave)(&serio_event_lock); 299 300 list_for_each_entry(event, &serio_event_list, node) { 301 if (event->type == SERIO_REGISTER_PORT) { 302 serio = event->object; 303 if (serio->parent == parent) 304 return serio; 305 } 306 } 307 308 return NULL; 309 } 310 311 /* 312 * Serio port operations 313 */ 314 315 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf) 316 { 317 struct serio *serio = to_serio_port(dev); 318 return sprintf(buf, "%s\n", serio->name); 319 } 320 321 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) 322 { 323 struct serio *serio = to_serio_port(dev); 324 325 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n", 326 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 327 } 328 329 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) 330 { 331 struct serio *serio = to_serio_port(dev); 332 return sprintf(buf, "%02x\n", serio->id.type); 333 } 334 335 static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf) 336 { 337 struct serio *serio = to_serio_port(dev); 338 return sprintf(buf, "%02x\n", serio->id.proto); 339 } 340 341 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 342 { 343 struct serio *serio = to_serio_port(dev); 344 return sprintf(buf, "%02x\n", serio->id.id); 345 } 346 347 static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf) 348 { 349 struct serio *serio = to_serio_port(dev); 350 return sprintf(buf, "%02x\n", serio->id.extra); 351 } 352 353 static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 354 { 355 struct serio *serio = to_serio_port(dev); 356 struct device_driver *drv; 357 int error; 358 359 scoped_cond_guard(mutex_intr, return -EINTR, &serio_mutex) { 360 if (!strncmp(buf, "none", count)) { 361 serio_disconnect_port(serio); 362 } else if (!strncmp(buf, "reconnect", count)) { 363 serio_reconnect_subtree(serio); 364 } else if (!strncmp(buf, "rescan", count)) { 365 serio_disconnect_port(serio); 366 serio_find_driver(serio); 367 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 368 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 369 serio_disconnect_port(serio); 370 error = serio_bind_driver(serio, to_serio_driver(drv)); 371 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 372 if (error) 373 return error; 374 } else { 375 return -EINVAL; 376 } 377 } 378 379 return count; 380 } 381 382 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) 383 { 384 struct serio *serio = to_serio_port(dev); 385 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto"); 386 } 387 388 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 389 { 390 struct serio *serio = to_serio_port(dev); 391 int retval; 392 393 retval = count; 394 if (!strncmp(buf, "manual", count)) { 395 serio->manual_bind = true; 396 } else if (!strncmp(buf, "auto", count)) { 397 serio->manual_bind = false; 398 } else { 399 retval = -EINVAL; 400 } 401 402 return retval; 403 } 404 405 static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf) 406 { 407 struct serio *serio = to_serio_port(dev); 408 409 return sprintf(buf, "%s\n", serio->firmware_id); 410 } 411 412 static DEVICE_ATTR_RO(type); 413 static DEVICE_ATTR_RO(proto); 414 static DEVICE_ATTR_RO(id); 415 static DEVICE_ATTR_RO(extra); 416 417 static struct attribute *serio_device_id_attrs[] = { 418 &dev_attr_type.attr, 419 &dev_attr_proto.attr, 420 &dev_attr_id.attr, 421 &dev_attr_extra.attr, 422 NULL 423 }; 424 425 static const struct attribute_group serio_id_attr_group = { 426 .name = "id", 427 .attrs = serio_device_id_attrs, 428 }; 429 430 static DEVICE_ATTR_RO(modalias); 431 static DEVICE_ATTR_WO(drvctl); 432 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); 433 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); 434 static DEVICE_ATTR_RO(firmware_id); 435 436 static struct attribute *serio_device_attrs[] = { 437 &dev_attr_modalias.attr, 438 &dev_attr_description.attr, 439 &dev_attr_drvctl.attr, 440 &dev_attr_bind_mode.attr, 441 &dev_attr_firmware_id.attr, 442 NULL 443 }; 444 445 static const struct attribute_group serio_device_attr_group = { 446 .attrs = serio_device_attrs, 447 }; 448 449 static const struct attribute_group *serio_device_attr_groups[] = { 450 &serio_id_attr_group, 451 &serio_device_attr_group, 452 NULL 453 }; 454 455 static void serio_release_port(struct device *dev) 456 { 457 struct serio *serio = to_serio_port(dev); 458 459 kfree(serio); 460 module_put(THIS_MODULE); 461 } 462 463 /* 464 * Prepare serio port for registration. 465 */ 466 static void serio_init_port(struct serio *serio) 467 { 468 static atomic_t serio_no = ATOMIC_INIT(-1); 469 470 __module_get(THIS_MODULE); 471 472 INIT_LIST_HEAD(&serio->node); 473 INIT_LIST_HEAD(&serio->child_node); 474 INIT_LIST_HEAD(&serio->children); 475 spin_lock_init(&serio->lock); 476 mutex_init(&serio->drv_mutex); 477 device_initialize(&serio->dev); 478 dev_set_name(&serio->dev, "serio%lu", 479 (unsigned long)atomic_inc_return(&serio_no)); 480 serio->dev.bus = &serio_bus; 481 serio->dev.release = serio_release_port; 482 serio->dev.groups = serio_device_attr_groups; 483 if (serio->parent) { 484 serio->dev.parent = &serio->parent->dev; 485 serio->depth = serio->parent->depth + 1; 486 } else 487 serio->depth = 0; 488 lockdep_set_subclass(&serio->lock, serio->depth); 489 } 490 491 /* 492 * Complete serio port registration. 493 * Driver core will attempt to find appropriate driver for the port. 494 */ 495 static void serio_add_port(struct serio *serio) 496 { 497 struct serio *parent = serio->parent; 498 int error; 499 500 if (parent) { 501 guard(serio_pause_rx)(parent); 502 503 list_add_tail(&serio->child_node, &parent->children); 504 } 505 506 list_add_tail(&serio->node, &serio_list); 507 508 if (serio->start) 509 serio->start(serio); 510 511 error = device_add(&serio->dev); 512 if (error) 513 dev_err(&serio->dev, 514 "device_add() failed for %s (%s), error: %d\n", 515 serio->phys, serio->name, error); 516 } 517 518 /* 519 * serio_destroy_port() completes unregistration process and removes 520 * port from the system 521 */ 522 static void serio_destroy_port(struct serio *serio) 523 { 524 struct serio *child; 525 526 while ((child = serio_get_pending_child(serio)) != NULL) { 527 serio_remove_pending_events(child); 528 put_device(&child->dev); 529 } 530 531 if (serio->stop) 532 serio->stop(serio); 533 534 if (serio->parent) { 535 guard(serio_pause_rx)(serio->parent); 536 537 list_del_init(&serio->child_node); 538 serio->parent = NULL; 539 } 540 541 if (device_is_registered(&serio->dev)) 542 device_del(&serio->dev); 543 544 list_del_init(&serio->node); 545 serio_remove_pending_events(serio); 546 put_device(&serio->dev); 547 } 548 549 /* 550 * Reconnect serio port (re-initialize attached device). 551 * If reconnect fails (old device is no longer attached or 552 * there was no device to begin with) we do full rescan in 553 * hope of finding a driver for the port. 554 */ 555 static int serio_reconnect_port(struct serio *serio) 556 { 557 int error = serio_reconnect_driver(serio); 558 559 if (error) { 560 serio_disconnect_port(serio); 561 serio_find_driver(serio); 562 } 563 564 return error; 565 } 566 567 /* 568 * Reconnect serio port and all its children (re-initialize attached 569 * devices). 570 */ 571 static void serio_reconnect_subtree(struct serio *root) 572 { 573 struct serio *s = root; 574 int error; 575 576 do { 577 error = serio_reconnect_port(s); 578 if (!error) { 579 /* 580 * Reconnect was successful, move on to do the 581 * first child. 582 */ 583 if (!list_empty(&s->children)) { 584 s = list_first_entry(&s->children, 585 struct serio, child_node); 586 continue; 587 } 588 } 589 590 /* 591 * Either it was a leaf node or reconnect failed and it 592 * became a leaf node. Continue reconnecting starting with 593 * the next sibling of the parent node. 594 */ 595 while (s != root) { 596 struct serio *parent = s->parent; 597 598 if (!list_is_last(&s->child_node, &parent->children)) { 599 s = list_entry(s->child_node.next, 600 struct serio, child_node); 601 break; 602 } 603 604 s = parent; 605 } 606 } while (s != root); 607 } 608 609 /* 610 * serio_disconnect_port() unbinds a port from its driver. As a side effect 611 * all children ports are unbound and destroyed. 612 */ 613 static void serio_disconnect_port(struct serio *serio) 614 { 615 struct serio *s = serio; 616 617 /* 618 * Children ports should be disconnected and destroyed 619 * first; we travel the tree in depth-first order. 620 */ 621 while (!list_empty(&serio->children)) { 622 623 /* Locate a leaf */ 624 while (!list_empty(&s->children)) 625 s = list_first_entry(&s->children, 626 struct serio, child_node); 627 628 /* 629 * Prune this leaf node unless it is the one we 630 * started with. 631 */ 632 if (s != serio) { 633 struct serio *parent = s->parent; 634 635 device_release_driver(&s->dev); 636 serio_destroy_port(s); 637 638 s = parent; 639 } 640 } 641 642 /* 643 * OK, no children left, now disconnect this port. 644 */ 645 device_release_driver(&serio->dev); 646 } 647 648 void serio_rescan(struct serio *serio) 649 { 650 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT); 651 } 652 EXPORT_SYMBOL(serio_rescan); 653 654 void serio_reconnect(struct serio *serio) 655 { 656 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE); 657 } 658 EXPORT_SYMBOL(serio_reconnect); 659 660 /* 661 * Submits register request to kseriod for subsequent execution. 662 * Note that port registration is always asynchronous. 663 */ 664 void __serio_register_port(struct serio *serio, struct module *owner) 665 { 666 serio_init_port(serio); 667 serio_queue_event(serio, owner, SERIO_REGISTER_PORT); 668 } 669 EXPORT_SYMBOL(__serio_register_port); 670 671 /* 672 * Synchronously unregisters serio port. 673 */ 674 void serio_unregister_port(struct serio *serio) 675 { 676 guard(mutex)(&serio_mutex); 677 678 serio_disconnect_port(serio); 679 serio_destroy_port(serio); 680 } 681 EXPORT_SYMBOL(serio_unregister_port); 682 683 /* 684 * Safely unregisters children ports if they are present. 685 */ 686 void serio_unregister_child_port(struct serio *serio) 687 { 688 struct serio *s, *next; 689 690 guard(mutex)(&serio_mutex); 691 692 list_for_each_entry_safe(s, next, &serio->children, child_node) { 693 serio_disconnect_port(s); 694 serio_destroy_port(s); 695 } 696 } 697 EXPORT_SYMBOL(serio_unregister_child_port); 698 699 700 /* 701 * Serio driver operations 702 */ 703 704 static ssize_t description_show(struct device_driver *drv, char *buf) 705 { 706 struct serio_driver *driver = to_serio_driver(drv); 707 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)"); 708 } 709 static DRIVER_ATTR_RO(description); 710 711 static ssize_t bind_mode_show(struct device_driver *drv, char *buf) 712 { 713 struct serio_driver *serio_drv = to_serio_driver(drv); 714 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto"); 715 } 716 717 static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count) 718 { 719 struct serio_driver *serio_drv = to_serio_driver(drv); 720 int retval; 721 722 retval = count; 723 if (!strncmp(buf, "manual", count)) { 724 serio_drv->manual_bind = true; 725 } else if (!strncmp(buf, "auto", count)) { 726 serio_drv->manual_bind = false; 727 } else { 728 retval = -EINVAL; 729 } 730 731 return retval; 732 } 733 static DRIVER_ATTR_RW(bind_mode); 734 735 static struct attribute *serio_driver_attrs[] = { 736 &driver_attr_description.attr, 737 &driver_attr_bind_mode.attr, 738 NULL, 739 }; 740 ATTRIBUTE_GROUPS(serio_driver); 741 742 static int serio_driver_probe(struct device *dev) 743 { 744 struct serio *serio = to_serio_port(dev); 745 struct serio_driver *drv = to_serio_driver(dev->driver); 746 747 return serio_connect_driver(serio, drv); 748 } 749 750 static void serio_driver_remove(struct device *dev) 751 { 752 struct serio *serio = to_serio_port(dev); 753 754 serio_disconnect_driver(serio); 755 } 756 757 static void serio_cleanup(struct serio *serio) 758 { 759 guard(mutex)(&serio->drv_mutex); 760 761 if (serio->drv && serio->drv->cleanup) 762 serio->drv->cleanup(serio); 763 } 764 765 static void serio_shutdown(struct device *dev) 766 { 767 struct serio *serio = to_serio_port(dev); 768 769 serio_cleanup(serio); 770 } 771 772 static void serio_attach_driver(struct serio_driver *drv) 773 { 774 int error; 775 776 error = driver_attach(&drv->driver); 777 if (error) 778 pr_warn("driver_attach() failed for %s with error %d\n", 779 drv->driver.name, error); 780 } 781 782 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name) 783 { 784 bool manual_bind = drv->manual_bind; 785 int error; 786 787 drv->driver.bus = &serio_bus; 788 drv->driver.owner = owner; 789 drv->driver.mod_name = mod_name; 790 791 /* 792 * Temporarily disable automatic binding because probing 793 * takes long time and we are better off doing it in kseriod 794 */ 795 drv->manual_bind = true; 796 797 error = driver_register(&drv->driver); 798 if (error) { 799 pr_err("driver_register() failed for %s, error: %d\n", 800 drv->driver.name, error); 801 return error; 802 } 803 804 /* 805 * Restore original bind mode and let kseriod bind the 806 * driver to free ports 807 */ 808 if (!manual_bind) { 809 drv->manual_bind = false; 810 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER); 811 if (error) { 812 driver_unregister(&drv->driver); 813 return error; 814 } 815 } 816 817 return 0; 818 } 819 EXPORT_SYMBOL(__serio_register_driver); 820 821 void serio_unregister_driver(struct serio_driver *drv) 822 { 823 struct serio *serio; 824 825 guard(mutex)(&serio_mutex); 826 827 drv->manual_bind = true; /* so serio_find_driver ignores it */ 828 serio_remove_pending_events(drv); 829 830 start_over: 831 list_for_each_entry(serio, &serio_list, node) { 832 if (serio->drv == drv) { 833 serio_disconnect_port(serio); 834 serio_find_driver(serio); 835 /* we could've deleted some ports, restart */ 836 goto start_over; 837 } 838 } 839 840 driver_unregister(&drv->driver); 841 } 842 EXPORT_SYMBOL(serio_unregister_driver); 843 844 static void serio_set_drv(struct serio *serio, struct serio_driver *drv) 845 { 846 guard(serio_pause_rx)(serio); 847 848 serio->drv = drv; 849 } 850 851 static int serio_bus_match(struct device *dev, const struct device_driver *drv) 852 { 853 struct serio *serio = to_serio_port(dev); 854 const struct serio_driver *serio_drv = to_serio_driver(drv); 855 856 if (serio->manual_bind || serio_drv->manual_bind) 857 return 0; 858 859 return serio_match_port(serio_drv->id_table, serio); 860 } 861 862 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \ 863 do { \ 864 int err = add_uevent_var(env, fmt, val); \ 865 if (err) \ 866 return err; \ 867 } while (0) 868 869 static int serio_uevent(const struct device *dev, struct kobj_uevent_env *env) 870 { 871 const struct serio *serio; 872 873 if (!dev) 874 return -ENODEV; 875 876 serio = to_serio_port(dev); 877 878 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type); 879 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); 880 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); 881 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 882 883 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 884 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 885 886 if (serio->firmware_id[0]) 887 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s", 888 serio->firmware_id); 889 890 return 0; 891 } 892 #undef SERIO_ADD_UEVENT_VAR 893 894 #ifdef CONFIG_PM 895 static int serio_suspend(struct device *dev) 896 { 897 struct serio *serio = to_serio_port(dev); 898 899 serio_cleanup(serio); 900 901 return 0; 902 } 903 904 static int serio_resume(struct device *dev) 905 { 906 struct serio *serio = to_serio_port(dev); 907 int error = -ENOENT; 908 909 scoped_guard(mutex, &serio->drv_mutex) { 910 if (serio->drv && serio->drv->fast_reconnect) { 911 error = serio->drv->fast_reconnect(serio); 912 if (error && error != -ENOENT) 913 dev_warn(dev, "fast reconnect failed with error %d\n", 914 error); 915 } 916 } 917 918 if (error) { 919 /* 920 * Driver reconnect can take a while, so better let 921 * kseriod deal with it. 922 */ 923 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT); 924 } 925 926 return 0; 927 } 928 929 static const struct dev_pm_ops serio_pm_ops = { 930 .suspend = serio_suspend, 931 .resume = serio_resume, 932 .poweroff = serio_suspend, 933 .restore = serio_resume, 934 }; 935 #endif /* CONFIG_PM */ 936 937 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 938 int serio_open(struct serio *serio, struct serio_driver *drv) 939 { 940 serio_set_drv(serio, drv); 941 942 if (serio->open && serio->open(serio)) { 943 serio_set_drv(serio, NULL); 944 return -1; 945 } 946 return 0; 947 } 948 EXPORT_SYMBOL(serio_open); 949 950 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 951 void serio_close(struct serio *serio) 952 { 953 if (serio->close) 954 serio->close(serio); 955 956 serio_set_drv(serio, NULL); 957 } 958 EXPORT_SYMBOL(serio_close); 959 960 irqreturn_t serio_interrupt(struct serio *serio, 961 unsigned char data, unsigned int dfl) 962 { 963 guard(spinlock_irqsave)(&serio->lock); 964 965 if (likely(serio->drv)) 966 return serio->drv->interrupt(serio, data, dfl); 967 968 if (!dfl && device_is_registered(&serio->dev)) { 969 serio_rescan(serio); 970 return IRQ_HANDLED; 971 } 972 973 return IRQ_NONE; 974 } 975 EXPORT_SYMBOL(serio_interrupt); 976 977 const struct bus_type serio_bus = { 978 .name = "serio", 979 .drv_groups = serio_driver_groups, 980 .match = serio_bus_match, 981 .uevent = serio_uevent, 982 .probe = serio_driver_probe, 983 .remove = serio_driver_remove, 984 .shutdown = serio_shutdown, 985 #ifdef CONFIG_PM 986 .pm = &serio_pm_ops, 987 #endif 988 }; 989 EXPORT_SYMBOL(serio_bus); 990 991 static int __init serio_init(void) 992 { 993 int error; 994 995 error = bus_register(&serio_bus); 996 if (error) { 997 pr_err("Failed to register serio bus, error: %d\n", error); 998 return error; 999 } 1000 1001 return 0; 1002 } 1003 1004 static void __exit serio_exit(void) 1005 { 1006 bus_unregister(&serio_bus); 1007 1008 /* 1009 * There should not be any outstanding events but work may 1010 * still be scheduled so simply cancel it. 1011 */ 1012 cancel_work_sync(&serio_event_work); 1013 } 1014 1015 subsys_initcall(serio_init); 1016 module_exit(serio_exit); 1017