1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * The Serio abstraction module 4 * 5 * Copyright (c) 1999-2004 Vojtech Pavlik 6 * Copyright (c) 2004 Dmitry Torokhov 7 * Copyright (c) 2003 Daniele Bellucci 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/export.h> 13 #include <linux/stddef.h> 14 #include <linux/module.h> 15 #include <linux/serio.h> 16 #include <linux/errno.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/workqueue.h> 20 #include <linux/mutex.h> 21 22 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 23 MODULE_DESCRIPTION("Serio abstraction core"); 24 MODULE_LICENSE("GPL"); 25 26 /* 27 * serio_mutex protects entire serio subsystem and is taken every time 28 * serio port or driver registered or unregistered. 29 */ 30 static DEFINE_MUTEX(serio_mutex); 31 32 static LIST_HEAD(serio_list); 33 34 static void serio_add_port(struct serio *serio); 35 static int serio_reconnect_port(struct serio *serio); 36 static void serio_disconnect_port(struct serio *serio); 37 static void serio_reconnect_subtree(struct serio *serio); 38 static void serio_attach_driver(struct serio_driver *drv); 39 40 static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) 41 { 42 guard(mutex)(&serio->drv_mutex); 43 44 return drv->connect(serio, drv); 45 } 46 47 static int serio_reconnect_driver(struct serio *serio) 48 { 49 guard(mutex)(&serio->drv_mutex); 50 51 if (serio->drv && serio->drv->reconnect) 52 return serio->drv->reconnect(serio); 53 54 return -1; 55 } 56 57 static void serio_disconnect_driver(struct serio *serio) 58 { 59 guard(mutex)(&serio->drv_mutex); 60 61 if (serio->drv) 62 serio->drv->disconnect(serio); 63 } 64 65 static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) 66 { 67 while (ids->type || ids->proto) { 68 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) && 69 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) && 70 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) && 71 (ids->id == SERIO_ANY || ids->id == serio->id.id)) 72 return 1; 73 ids++; 74 } 75 return 0; 76 } 77 78 /* 79 * Basic serio -> driver core mappings 80 */ 81 82 static int serio_bind_driver(struct serio *serio, struct serio_driver *drv) 83 { 84 int error; 85 86 if (serio_match_port(drv->id_table, serio)) { 87 88 serio->dev.driver = &drv->driver; 89 if (serio_connect_driver(serio, drv)) { 90 serio->dev.driver = NULL; 91 return -ENODEV; 92 } 93 94 error = device_bind_driver(&serio->dev); 95 if (error) { 96 dev_warn(&serio->dev, 97 "device_bind_driver() failed for %s (%s) and %s, error: %d\n", 98 serio->phys, serio->name, 99 drv->description, error); 100 serio_disconnect_driver(serio); 101 serio->dev.driver = NULL; 102 return error; 103 } 104 } 105 return 0; 106 } 107 108 static void serio_find_driver(struct serio *serio) 109 { 110 int error; 111 112 error = device_attach(&serio->dev); 113 if (error < 0 && error != -EPROBE_DEFER) 114 dev_warn(&serio->dev, 115 "device_attach() failed for %s (%s), error: %d\n", 116 serio->phys, serio->name, error); 117 } 118 119 120 /* 121 * Serio event processing. 122 */ 123 124 enum serio_event_type { 125 SERIO_RESCAN_PORT, 126 SERIO_RECONNECT_PORT, 127 SERIO_RECONNECT_SUBTREE, 128 SERIO_REGISTER_PORT, 129 SERIO_ATTACH_DRIVER, 130 }; 131 132 struct serio_event { 133 enum serio_event_type type; 134 void *object; 135 struct module *owner; 136 struct list_head node; 137 }; 138 139 static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ 140 static LIST_HEAD(serio_event_list); 141 142 static struct serio_event *serio_get_event(void) 143 { 144 struct serio_event *event = NULL; 145 146 guard(spinlock_irqsave)(&serio_event_lock); 147 148 if (!list_empty(&serio_event_list)) { 149 event = list_first_entry(&serio_event_list, 150 struct serio_event, node); 151 list_del_init(&event->node); 152 } 153 154 return event; 155 } 156 157 static void serio_free_event(struct serio_event *event) 158 { 159 module_put(event->owner); 160 kfree(event); 161 } 162 163 static void serio_remove_duplicate_events(void *object, 164 enum serio_event_type type) 165 { 166 struct serio_event *e, *next; 167 168 guard(spinlock_irqsave)(&serio_event_lock); 169 170 list_for_each_entry_safe(e, next, &serio_event_list, node) { 171 if (object == e->object) { 172 /* 173 * If this event is of different type we should not 174 * look further - we only suppress duplicate events 175 * that were sent back-to-back. 176 */ 177 if (type != e->type) 178 break; 179 180 list_del_init(&e->node); 181 serio_free_event(e); 182 } 183 } 184 } 185 186 static void serio_handle_event(struct work_struct *work) 187 { 188 struct serio_event *event; 189 190 guard(mutex)(&serio_mutex); 191 192 while ((event = serio_get_event())) { 193 194 switch (event->type) { 195 196 case SERIO_REGISTER_PORT: 197 serio_add_port(event->object); 198 break; 199 200 case SERIO_RECONNECT_PORT: 201 serio_reconnect_port(event->object); 202 break; 203 204 case SERIO_RESCAN_PORT: 205 serio_disconnect_port(event->object); 206 serio_find_driver(event->object); 207 break; 208 209 case SERIO_RECONNECT_SUBTREE: 210 serio_reconnect_subtree(event->object); 211 break; 212 213 case SERIO_ATTACH_DRIVER: 214 serio_attach_driver(event->object); 215 break; 216 } 217 218 serio_remove_duplicate_events(event->object, event->type); 219 serio_free_event(event); 220 } 221 } 222 223 static DECLARE_WORK(serio_event_work, serio_handle_event); 224 225 static int serio_queue_event(void *object, struct module *owner, 226 enum serio_event_type event_type) 227 { 228 struct serio_event *event; 229 230 guard(spinlock_irqsave)(&serio_event_lock); 231 232 /* 233 * Scan event list for the other events for the same serio port, 234 * starting with the most recent one. If event is the same we 235 * do not need add new one. If event is of different type we 236 * need to add this event and should not look further because 237 * we need to preseve sequence of distinct events. 238 */ 239 list_for_each_entry_reverse(event, &serio_event_list, node) { 240 if (event->object == object) { 241 if (event->type == event_type) 242 return 0; 243 break; 244 } 245 } 246 247 event = kmalloc(sizeof(*event), GFP_ATOMIC); 248 if (!event) { 249 pr_err("Not enough memory to queue event %d\n", event_type); 250 return -ENOMEM; 251 } 252 253 if (!try_module_get(owner)) { 254 pr_warn("Can't get module reference, dropping event %d\n", 255 event_type); 256 kfree(event); 257 return -EINVAL; 258 } 259 260 event->type = event_type; 261 event->object = object; 262 event->owner = owner; 263 264 list_add_tail(&event->node, &serio_event_list); 265 queue_work(system_long_wq, &serio_event_work); 266 267 return 0; 268 } 269 270 /* 271 * Remove all events that have been submitted for a given 272 * object, be it serio port or driver. 273 */ 274 static void serio_remove_pending_events(void *object) 275 { 276 struct serio_event *event, *next; 277 278 guard(spinlock_irqsave)(&serio_event_lock); 279 280 list_for_each_entry_safe(event, next, &serio_event_list, node) { 281 if (event->object == object) { 282 list_del_init(&event->node); 283 serio_free_event(event); 284 } 285 } 286 } 287 288 /* 289 * Locate child serio port (if any) that has not been fully registered yet. 290 * 291 * Children are registered by driver's connect() handler so there can't be a 292 * grandchild pending registration together with a child. 293 */ 294 static struct serio *serio_get_pending_child(struct serio *parent) 295 { 296 struct serio_event *event; 297 struct serio *serio; 298 299 guard(spinlock_irqsave)(&serio_event_lock); 300 301 list_for_each_entry(event, &serio_event_list, node) { 302 if (event->type == SERIO_REGISTER_PORT) { 303 serio = event->object; 304 if (serio->parent == parent) 305 return serio; 306 } 307 } 308 309 return NULL; 310 } 311 312 /* 313 * Serio port operations 314 */ 315 316 static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf) 317 { 318 struct serio *serio = to_serio_port(dev); 319 return sprintf(buf, "%s\n", serio->name); 320 } 321 322 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) 323 { 324 struct serio *serio = to_serio_port(dev); 325 326 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n", 327 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 328 } 329 330 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) 331 { 332 struct serio *serio = to_serio_port(dev); 333 return sprintf(buf, "%02x\n", serio->id.type); 334 } 335 336 static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf) 337 { 338 struct serio *serio = to_serio_port(dev); 339 return sprintf(buf, "%02x\n", serio->id.proto); 340 } 341 342 static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) 343 { 344 struct serio *serio = to_serio_port(dev); 345 return sprintf(buf, "%02x\n", serio->id.id); 346 } 347 348 static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf) 349 { 350 struct serio *serio = to_serio_port(dev); 351 return sprintf(buf, "%02x\n", serio->id.extra); 352 } 353 354 static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 355 { 356 struct serio *serio = to_serio_port(dev); 357 struct device_driver *drv; 358 int error; 359 360 scoped_cond_guard(mutex_intr, return -EINTR, &serio_mutex) { 361 if (!strncmp(buf, "none", count)) { 362 serio_disconnect_port(serio); 363 } else if (!strncmp(buf, "reconnect", count)) { 364 serio_reconnect_subtree(serio); 365 } else if (!strncmp(buf, "rescan", count)) { 366 serio_disconnect_port(serio); 367 serio_find_driver(serio); 368 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 369 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 370 serio_disconnect_port(serio); 371 error = serio_bind_driver(serio, to_serio_driver(drv)); 372 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); 373 if (error) 374 return error; 375 } else { 376 return -EINVAL; 377 } 378 } 379 380 return count; 381 } 382 383 static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) 384 { 385 struct serio *serio = to_serio_port(dev); 386 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto"); 387 } 388 389 static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 390 { 391 struct serio *serio = to_serio_port(dev); 392 int retval; 393 394 retval = count; 395 if (!strncmp(buf, "manual", count)) { 396 serio->manual_bind = true; 397 } else if (!strncmp(buf, "auto", count)) { 398 serio->manual_bind = false; 399 } else { 400 retval = -EINVAL; 401 } 402 403 return retval; 404 } 405 406 static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf) 407 { 408 struct serio *serio = to_serio_port(dev); 409 410 return sprintf(buf, "%s\n", serio->firmware_id); 411 } 412 413 static DEVICE_ATTR_RO(type); 414 static DEVICE_ATTR_RO(proto); 415 static DEVICE_ATTR_RO(id); 416 static DEVICE_ATTR_RO(extra); 417 418 static struct attribute *serio_device_id_attrs[] = { 419 &dev_attr_type.attr, 420 &dev_attr_proto.attr, 421 &dev_attr_id.attr, 422 &dev_attr_extra.attr, 423 NULL 424 }; 425 426 static const struct attribute_group serio_id_attr_group = { 427 .name = "id", 428 .attrs = serio_device_id_attrs, 429 }; 430 431 static DEVICE_ATTR_RO(modalias); 432 static DEVICE_ATTR_WO(drvctl); 433 static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); 434 static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); 435 static DEVICE_ATTR_RO(firmware_id); 436 437 static struct attribute *serio_device_attrs[] = { 438 &dev_attr_modalias.attr, 439 &dev_attr_description.attr, 440 &dev_attr_drvctl.attr, 441 &dev_attr_bind_mode.attr, 442 &dev_attr_firmware_id.attr, 443 NULL 444 }; 445 446 static const struct attribute_group serio_device_attr_group = { 447 .attrs = serio_device_attrs, 448 }; 449 450 static const struct attribute_group *serio_device_attr_groups[] = { 451 &serio_id_attr_group, 452 &serio_device_attr_group, 453 NULL 454 }; 455 456 static void serio_release_port(struct device *dev) 457 { 458 struct serio *serio = to_serio_port(dev); 459 460 kfree(serio); 461 module_put(THIS_MODULE); 462 } 463 464 /* 465 * Prepare serio port for registration. 466 */ 467 static void serio_init_port(struct serio *serio) 468 { 469 static atomic_t serio_no = ATOMIC_INIT(-1); 470 471 __module_get(THIS_MODULE); 472 473 INIT_LIST_HEAD(&serio->node); 474 INIT_LIST_HEAD(&serio->child_node); 475 INIT_LIST_HEAD(&serio->children); 476 spin_lock_init(&serio->lock); 477 mutex_init(&serio->drv_mutex); 478 device_initialize(&serio->dev); 479 dev_set_name(&serio->dev, "serio%lu", 480 (unsigned long)atomic_inc_return(&serio_no)); 481 serio->dev.bus = &serio_bus; 482 serio->dev.release = serio_release_port; 483 serio->dev.groups = serio_device_attr_groups; 484 if (serio->parent) { 485 serio->dev.parent = &serio->parent->dev; 486 serio->depth = serio->parent->depth + 1; 487 } else 488 serio->depth = 0; 489 lockdep_set_subclass(&serio->lock, serio->depth); 490 } 491 492 /* 493 * Complete serio port registration. 494 * Driver core will attempt to find appropriate driver for the port. 495 */ 496 static void serio_add_port(struct serio *serio) 497 { 498 struct serio *parent = serio->parent; 499 int error; 500 501 if (parent) { 502 guard(serio_pause_rx)(parent); 503 504 list_add_tail(&serio->child_node, &parent->children); 505 } 506 507 list_add_tail(&serio->node, &serio_list); 508 509 if (serio->start) 510 serio->start(serio); 511 512 error = device_add(&serio->dev); 513 if (error) 514 dev_err(&serio->dev, 515 "device_add() failed for %s (%s), error: %d\n", 516 serio->phys, serio->name, error); 517 } 518 519 /* 520 * serio_destroy_port() completes unregistration process and removes 521 * port from the system 522 */ 523 static void serio_destroy_port(struct serio *serio) 524 { 525 struct serio *child; 526 527 while ((child = serio_get_pending_child(serio)) != NULL) { 528 serio_remove_pending_events(child); 529 put_device(&child->dev); 530 } 531 532 if (serio->stop) 533 serio->stop(serio); 534 535 if (serio->parent) { 536 guard(serio_pause_rx)(serio->parent); 537 538 list_del_init(&serio->child_node); 539 serio->parent = NULL; 540 } 541 542 if (device_is_registered(&serio->dev)) 543 device_del(&serio->dev); 544 545 list_del_init(&serio->node); 546 serio_remove_pending_events(serio); 547 put_device(&serio->dev); 548 } 549 550 /* 551 * Reconnect serio port (re-initialize attached device). 552 * If reconnect fails (old device is no longer attached or 553 * there was no device to begin with) we do full rescan in 554 * hope of finding a driver for the port. 555 */ 556 static int serio_reconnect_port(struct serio *serio) 557 { 558 int error = serio_reconnect_driver(serio); 559 560 if (error) { 561 serio_disconnect_port(serio); 562 serio_find_driver(serio); 563 } 564 565 return error; 566 } 567 568 /* 569 * Reconnect serio port and all its children (re-initialize attached 570 * devices). 571 */ 572 static void serio_reconnect_subtree(struct serio *root) 573 { 574 struct serio *s = root; 575 int error; 576 577 do { 578 error = serio_reconnect_port(s); 579 if (!error) { 580 /* 581 * Reconnect was successful, move on to do the 582 * first child. 583 */ 584 if (!list_empty(&s->children)) { 585 s = list_first_entry(&s->children, 586 struct serio, child_node); 587 continue; 588 } 589 } 590 591 /* 592 * Either it was a leaf node or reconnect failed and it 593 * became a leaf node. Continue reconnecting starting with 594 * the next sibling of the parent node. 595 */ 596 while (s != root) { 597 struct serio *parent = s->parent; 598 599 if (!list_is_last(&s->child_node, &parent->children)) { 600 s = list_entry(s->child_node.next, 601 struct serio, child_node); 602 break; 603 } 604 605 s = parent; 606 } 607 } while (s != root); 608 } 609 610 /* 611 * serio_disconnect_port() unbinds a port from its driver. As a side effect 612 * all children ports are unbound and destroyed. 613 */ 614 static void serio_disconnect_port(struct serio *serio) 615 { 616 struct serio *s = serio; 617 618 /* 619 * Children ports should be disconnected and destroyed 620 * first; we travel the tree in depth-first order. 621 */ 622 while (!list_empty(&serio->children)) { 623 624 /* Locate a leaf */ 625 while (!list_empty(&s->children)) 626 s = list_first_entry(&s->children, 627 struct serio, child_node); 628 629 /* 630 * Prune this leaf node unless it is the one we 631 * started with. 632 */ 633 if (s != serio) { 634 struct serio *parent = s->parent; 635 636 device_release_driver(&s->dev); 637 serio_destroy_port(s); 638 639 s = parent; 640 } 641 } 642 643 /* 644 * OK, no children left, now disconnect this port. 645 */ 646 device_release_driver(&serio->dev); 647 } 648 649 void serio_rescan(struct serio *serio) 650 { 651 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT); 652 } 653 EXPORT_SYMBOL(serio_rescan); 654 655 void serio_reconnect(struct serio *serio) 656 { 657 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE); 658 } 659 EXPORT_SYMBOL(serio_reconnect); 660 661 /* 662 * Submits register request to kseriod for subsequent execution. 663 * Note that port registration is always asynchronous. 664 */ 665 void __serio_register_port(struct serio *serio, struct module *owner) 666 { 667 serio_init_port(serio); 668 serio_queue_event(serio, owner, SERIO_REGISTER_PORT); 669 } 670 EXPORT_SYMBOL(__serio_register_port); 671 672 /* 673 * Synchronously unregisters serio port. 674 */ 675 void serio_unregister_port(struct serio *serio) 676 { 677 guard(mutex)(&serio_mutex); 678 679 serio_disconnect_port(serio); 680 serio_destroy_port(serio); 681 } 682 EXPORT_SYMBOL(serio_unregister_port); 683 684 /* 685 * Safely unregisters children ports if they are present. 686 */ 687 void serio_unregister_child_port(struct serio *serio) 688 { 689 struct serio *s, *next; 690 691 guard(mutex)(&serio_mutex); 692 693 list_for_each_entry_safe(s, next, &serio->children, child_node) { 694 serio_disconnect_port(s); 695 serio_destroy_port(s); 696 } 697 } 698 EXPORT_SYMBOL(serio_unregister_child_port); 699 700 701 /* 702 * Serio driver operations 703 */ 704 705 static ssize_t description_show(struct device_driver *drv, char *buf) 706 { 707 struct serio_driver *driver = to_serio_driver(drv); 708 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)"); 709 } 710 static DRIVER_ATTR_RO(description); 711 712 static ssize_t bind_mode_show(struct device_driver *drv, char *buf) 713 { 714 struct serio_driver *serio_drv = to_serio_driver(drv); 715 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto"); 716 } 717 718 static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count) 719 { 720 struct serio_driver *serio_drv = to_serio_driver(drv); 721 int retval; 722 723 retval = count; 724 if (!strncmp(buf, "manual", count)) { 725 serio_drv->manual_bind = true; 726 } else if (!strncmp(buf, "auto", count)) { 727 serio_drv->manual_bind = false; 728 } else { 729 retval = -EINVAL; 730 } 731 732 return retval; 733 } 734 static DRIVER_ATTR_RW(bind_mode); 735 736 static struct attribute *serio_driver_attrs[] = { 737 &driver_attr_description.attr, 738 &driver_attr_bind_mode.attr, 739 NULL, 740 }; 741 ATTRIBUTE_GROUPS(serio_driver); 742 743 static int serio_driver_probe(struct device *dev) 744 { 745 struct serio *serio = to_serio_port(dev); 746 struct serio_driver *drv = to_serio_driver(dev->driver); 747 748 return serio_connect_driver(serio, drv); 749 } 750 751 static void serio_driver_remove(struct device *dev) 752 { 753 struct serio *serio = to_serio_port(dev); 754 755 serio_disconnect_driver(serio); 756 } 757 758 static void serio_cleanup(struct serio *serio) 759 { 760 guard(mutex)(&serio->drv_mutex); 761 762 if (serio->drv && serio->drv->cleanup) 763 serio->drv->cleanup(serio); 764 } 765 766 static void serio_shutdown(struct device *dev) 767 { 768 struct serio *serio = to_serio_port(dev); 769 770 serio_cleanup(serio); 771 } 772 773 static void serio_attach_driver(struct serio_driver *drv) 774 { 775 int error; 776 777 error = driver_attach(&drv->driver); 778 if (error) 779 pr_warn("driver_attach() failed for %s with error %d\n", 780 drv->driver.name, error); 781 } 782 783 int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name) 784 { 785 bool manual_bind = drv->manual_bind; 786 int error; 787 788 drv->driver.bus = &serio_bus; 789 drv->driver.owner = owner; 790 drv->driver.mod_name = mod_name; 791 792 /* 793 * Temporarily disable automatic binding because probing 794 * takes long time and we are better off doing it in kseriod 795 */ 796 drv->manual_bind = true; 797 798 error = driver_register(&drv->driver); 799 if (error) { 800 pr_err("driver_register() failed for %s, error: %d\n", 801 drv->driver.name, error); 802 return error; 803 } 804 805 /* 806 * Restore original bind mode and let kseriod bind the 807 * driver to free ports 808 */ 809 if (!manual_bind) { 810 drv->manual_bind = false; 811 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER); 812 if (error) { 813 driver_unregister(&drv->driver); 814 return error; 815 } 816 } 817 818 return 0; 819 } 820 EXPORT_SYMBOL(__serio_register_driver); 821 822 void serio_unregister_driver(struct serio_driver *drv) 823 { 824 struct serio *serio; 825 826 guard(mutex)(&serio_mutex); 827 828 drv->manual_bind = true; /* so serio_find_driver ignores it */ 829 serio_remove_pending_events(drv); 830 831 start_over: 832 list_for_each_entry(serio, &serio_list, node) { 833 if (serio->drv == drv) { 834 serio_disconnect_port(serio); 835 serio_find_driver(serio); 836 /* we could've deleted some ports, restart */ 837 goto start_over; 838 } 839 } 840 841 driver_unregister(&drv->driver); 842 } 843 EXPORT_SYMBOL(serio_unregister_driver); 844 845 static void serio_set_drv(struct serio *serio, struct serio_driver *drv) 846 { 847 guard(serio_pause_rx)(serio); 848 849 serio->drv = drv; 850 } 851 852 static int serio_bus_match(struct device *dev, const struct device_driver *drv) 853 { 854 struct serio *serio = to_serio_port(dev); 855 const struct serio_driver *serio_drv = to_serio_driver(drv); 856 857 if (serio->manual_bind || serio_drv->manual_bind) 858 return 0; 859 860 return serio_match_port(serio_drv->id_table, serio); 861 } 862 863 #define SERIO_ADD_UEVENT_VAR(fmt, val...) \ 864 do { \ 865 int err = add_uevent_var(env, fmt, val); \ 866 if (err) \ 867 return err; \ 868 } while (0) 869 870 static int serio_uevent(const struct device *dev, struct kobj_uevent_env *env) 871 { 872 const struct serio *serio; 873 874 if (!dev) 875 return -ENODEV; 876 877 serio = to_serio_port(dev); 878 879 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type); 880 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); 881 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); 882 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 883 884 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 885 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 886 887 if (serio->firmware_id[0]) 888 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s", 889 serio->firmware_id); 890 891 return 0; 892 } 893 #undef SERIO_ADD_UEVENT_VAR 894 895 #ifdef CONFIG_PM 896 static int serio_suspend(struct device *dev) 897 { 898 struct serio *serio = to_serio_port(dev); 899 900 serio_cleanup(serio); 901 902 return 0; 903 } 904 905 static int serio_resume(struct device *dev) 906 { 907 struct serio *serio = to_serio_port(dev); 908 int error = -ENOENT; 909 910 scoped_guard(mutex, &serio->drv_mutex) { 911 if (serio->drv && serio->drv->fast_reconnect) { 912 error = serio->drv->fast_reconnect(serio); 913 if (error && error != -ENOENT) 914 dev_warn(dev, "fast reconnect failed with error %d\n", 915 error); 916 } 917 } 918 919 if (error) { 920 /* 921 * Driver reconnect can take a while, so better let 922 * kseriod deal with it. 923 */ 924 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT); 925 } 926 927 return 0; 928 } 929 930 static const struct dev_pm_ops serio_pm_ops = { 931 .suspend = serio_suspend, 932 .resume = serio_resume, 933 .poweroff = serio_suspend, 934 .restore = serio_resume, 935 }; 936 #endif /* CONFIG_PM */ 937 938 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 939 int serio_open(struct serio *serio, struct serio_driver *drv) 940 { 941 serio_set_drv(serio, drv); 942 943 if (serio->open && serio->open(serio)) { 944 serio_set_drv(serio, NULL); 945 return -1; 946 } 947 return 0; 948 } 949 EXPORT_SYMBOL(serio_open); 950 951 /* called from serio_driver->connect/disconnect methods under serio_mutex */ 952 void serio_close(struct serio *serio) 953 { 954 if (serio->close) 955 serio->close(serio); 956 957 serio_set_drv(serio, NULL); 958 } 959 EXPORT_SYMBOL(serio_close); 960 961 irqreturn_t serio_interrupt(struct serio *serio, 962 unsigned char data, unsigned int dfl) 963 { 964 guard(spinlock_irqsave)(&serio->lock); 965 966 if (likely(serio->drv)) 967 return serio->drv->interrupt(serio, data, dfl); 968 969 if (!dfl && device_is_registered(&serio->dev)) { 970 serio_rescan(serio); 971 return IRQ_HANDLED; 972 } 973 974 return IRQ_NONE; 975 } 976 EXPORT_SYMBOL(serio_interrupt); 977 978 const struct bus_type serio_bus = { 979 .name = "serio", 980 .drv_groups = serio_driver_groups, 981 .match = serio_bus_match, 982 .uevent = serio_uevent, 983 .probe = serio_driver_probe, 984 .remove = serio_driver_remove, 985 .shutdown = serio_shutdown, 986 #ifdef CONFIG_PM 987 .pm = &serio_pm_ops, 988 #endif 989 }; 990 EXPORT_SYMBOL(serio_bus); 991 992 static int __init serio_init(void) 993 { 994 int error; 995 996 error = bus_register(&serio_bus); 997 if (error) { 998 pr_err("Failed to register serio bus, error: %d\n", error); 999 return error; 1000 } 1001 1002 return 0; 1003 } 1004 1005 static void __exit serio_exit(void) 1006 { 1007 bus_unregister(&serio_bus); 1008 1009 /* 1010 * There should not be any outstanding events but work may 1011 * still be scheduled so simply cancel it. 1012 */ 1013 cancel_work_sync(&serio_event_work); 1014 } 1015 1016 subsys_initcall(serio_init); 1017 module_exit(serio_exit); 1018