1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/export.h> 12 #include <linux/init.h> 13 #include <linux/types.h> 14 #include <linux/idr.h> 15 #include <linux/input/mt.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/random.h> 19 #include <linux/major.h> 20 #include <linux/proc_fs.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/pm.h> 24 #include <linux/poll.h> 25 #include <linux/device.h> 26 #include <linux/kstrtox.h> 27 #include <linux/mutex.h> 28 #include <linux/rcupdate.h> 29 #include "input-compat.h" 30 #include "input-core-private.h" 31 #include "input-poller.h" 32 33 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 34 MODULE_DESCRIPTION("Input core"); 35 MODULE_LICENSE("GPL"); 36 37 #define INPUT_MAX_CHAR_DEVICES 1024 38 #define INPUT_FIRST_DYNAMIC_DEV 256 39 static DEFINE_IDA(input_ida); 40 41 static LIST_HEAD(input_dev_list); 42 static LIST_HEAD(input_handler_list); 43 44 /* 45 * input_mutex protects access to both input_dev_list and input_handler_list. 46 * This also causes input_[un]register_device and input_[un]register_handler 47 * be mutually exclusive which simplifies locking in drivers implementing 48 * input handlers. 49 */ 50 static DEFINE_MUTEX(input_mutex); 51 52 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 53 54 static const unsigned int input_max_code[EV_CNT] = { 55 [EV_KEY] = KEY_MAX, 56 [EV_REL] = REL_MAX, 57 [EV_ABS] = ABS_MAX, 58 [EV_MSC] = MSC_MAX, 59 [EV_SW] = SW_MAX, 60 [EV_LED] = LED_MAX, 61 [EV_SND] = SND_MAX, 62 [EV_FF] = FF_MAX, 63 }; 64 65 static inline int is_event_supported(unsigned int code, 66 unsigned long *bm, unsigned int max) 67 { 68 return code <= max && test_bit(code, bm); 69 } 70 71 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 72 { 73 if (fuzz) { 74 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 75 return old_val; 76 77 if (value > old_val - fuzz && value < old_val + fuzz) 78 return (old_val * 3 + value) / 4; 79 80 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 81 return (old_val + value) / 2; 82 } 83 84 return value; 85 } 86 87 static void input_start_autorepeat(struct input_dev *dev, int code) 88 { 89 if (test_bit(EV_REP, dev->evbit) && 90 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 91 dev->timer.function) { 92 dev->repeat_key = code; 93 mod_timer(&dev->timer, 94 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 95 } 96 } 97 98 static void input_stop_autorepeat(struct input_dev *dev) 99 { 100 timer_delete(&dev->timer); 101 } 102 103 /* 104 * Pass values first through all filters and then, if event has not been 105 * filtered out, through all open handles. This order is achieved by placing 106 * filters at the head of the list of handles attached to the device, and 107 * placing regular handles at the tail of the list. 108 * 109 * This function is called with dev->event_lock held and interrupts disabled. 110 */ 111 static void input_pass_values(struct input_dev *dev, 112 struct input_value *vals, unsigned int count) 113 { 114 struct input_handle *handle; 115 struct input_value *v; 116 117 lockdep_assert_held(&dev->event_lock); 118 119 scoped_guard(rcu) { 120 handle = rcu_dereference(dev->grab); 121 if (handle) { 122 count = handle->handle_events(handle, vals, count); 123 break; 124 } 125 126 list_for_each_entry_rcu(handle, &dev->h_list, d_node) { 127 if (handle->open) { 128 count = handle->handle_events(handle, vals, 129 count); 130 if (!count) 131 break; 132 } 133 } 134 } 135 136 /* trigger auto repeat for key events */ 137 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 138 for (v = vals; v != vals + count; v++) { 139 if (v->type == EV_KEY && v->value != 2) { 140 if (v->value) 141 input_start_autorepeat(dev, v->code); 142 else 143 input_stop_autorepeat(dev); 144 } 145 } 146 } 147 } 148 149 #define INPUT_IGNORE_EVENT 0 150 #define INPUT_PASS_TO_HANDLERS 1 151 #define INPUT_PASS_TO_DEVICE 2 152 #define INPUT_SLOT 4 153 #define INPUT_FLUSH 8 154 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 155 156 static int input_handle_abs_event(struct input_dev *dev, 157 unsigned int code, int *pval) 158 { 159 struct input_mt *mt = dev->mt; 160 bool is_new_slot = false; 161 bool is_mt_event; 162 int *pold; 163 164 if (code == ABS_MT_SLOT) { 165 /* 166 * "Stage" the event; we'll flush it later, when we 167 * get actual touch data. 168 */ 169 if (mt && *pval >= 0 && *pval < mt->num_slots) 170 mt->slot = *pval; 171 172 return INPUT_IGNORE_EVENT; 173 } 174 175 is_mt_event = input_is_mt_value(code); 176 177 if (!is_mt_event) { 178 pold = &dev->absinfo[code].value; 179 } else if (mt) { 180 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 181 is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value; 182 } else { 183 /* 184 * Bypass filtering for multi-touch events when 185 * not employing slots. 186 */ 187 pold = NULL; 188 } 189 190 if (pold) { 191 *pval = input_defuzz_abs_event(*pval, *pold, 192 dev->absinfo[code].fuzz); 193 if (*pold == *pval) 194 return INPUT_IGNORE_EVENT; 195 196 *pold = *pval; 197 } 198 199 /* Flush pending "slot" event */ 200 if (is_new_slot) { 201 dev->absinfo[ABS_MT_SLOT].value = mt->slot; 202 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 203 } 204 205 return INPUT_PASS_TO_HANDLERS; 206 } 207 208 static int input_get_disposition(struct input_dev *dev, 209 unsigned int type, unsigned int code, int *pval) 210 { 211 int disposition = INPUT_IGNORE_EVENT; 212 int value = *pval; 213 214 /* filter-out events from inhibited devices */ 215 if (dev->inhibited) 216 return INPUT_IGNORE_EVENT; 217 218 switch (type) { 219 220 case EV_SYN: 221 switch (code) { 222 case SYN_CONFIG: 223 disposition = INPUT_PASS_TO_ALL; 224 break; 225 226 case SYN_REPORT: 227 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 228 break; 229 case SYN_MT_REPORT: 230 disposition = INPUT_PASS_TO_HANDLERS; 231 break; 232 } 233 break; 234 235 case EV_KEY: 236 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 237 238 /* auto-repeat bypasses state updates */ 239 if (value == 2) { 240 disposition = INPUT_PASS_TO_HANDLERS; 241 break; 242 } 243 244 if (!!test_bit(code, dev->key) != !!value) { 245 246 __change_bit(code, dev->key); 247 disposition = INPUT_PASS_TO_HANDLERS; 248 } 249 } 250 break; 251 252 case EV_SW: 253 if (is_event_supported(code, dev->swbit, SW_MAX) && 254 !!test_bit(code, dev->sw) != !!value) { 255 256 __change_bit(code, dev->sw); 257 disposition = INPUT_PASS_TO_HANDLERS; 258 } 259 break; 260 261 case EV_ABS: 262 if (is_event_supported(code, dev->absbit, ABS_MAX)) 263 disposition = input_handle_abs_event(dev, code, &value); 264 265 break; 266 267 case EV_REL: 268 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 269 disposition = INPUT_PASS_TO_HANDLERS; 270 271 break; 272 273 case EV_MSC: 274 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 275 disposition = INPUT_PASS_TO_ALL; 276 277 break; 278 279 case EV_LED: 280 if (is_event_supported(code, dev->ledbit, LED_MAX) && 281 !!test_bit(code, dev->led) != !!value) { 282 283 __change_bit(code, dev->led); 284 disposition = INPUT_PASS_TO_ALL; 285 } 286 break; 287 288 case EV_SND: 289 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 290 291 if (!!test_bit(code, dev->snd) != !!value) 292 __change_bit(code, dev->snd); 293 disposition = INPUT_PASS_TO_ALL; 294 } 295 break; 296 297 case EV_REP: 298 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 299 dev->rep[code] = value; 300 disposition = INPUT_PASS_TO_ALL; 301 } 302 break; 303 304 case EV_FF: 305 if (value >= 0) 306 disposition = INPUT_PASS_TO_ALL; 307 break; 308 309 case EV_PWR: 310 disposition = INPUT_PASS_TO_ALL; 311 break; 312 } 313 314 *pval = value; 315 return disposition; 316 } 317 318 static void input_event_dispose(struct input_dev *dev, int disposition, 319 unsigned int type, unsigned int code, int value) 320 { 321 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 322 dev->event(dev, type, code, value); 323 324 if (disposition & INPUT_PASS_TO_HANDLERS) { 325 struct input_value *v; 326 327 if (disposition & INPUT_SLOT) { 328 v = &dev->vals[dev->num_vals++]; 329 v->type = EV_ABS; 330 v->code = ABS_MT_SLOT; 331 v->value = dev->mt->slot; 332 } 333 334 v = &dev->vals[dev->num_vals++]; 335 v->type = type; 336 v->code = code; 337 v->value = value; 338 } 339 340 if (disposition & INPUT_FLUSH) { 341 if (dev->num_vals >= 2) 342 input_pass_values(dev, dev->vals, dev->num_vals); 343 dev->num_vals = 0; 344 /* 345 * Reset the timestamp on flush so we won't end up 346 * with a stale one. Note we only need to reset the 347 * monolithic one as we use its presence when deciding 348 * whether to generate a synthetic timestamp. 349 */ 350 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 351 } else if (dev->num_vals >= dev->max_vals - 2) { 352 dev->vals[dev->num_vals++] = input_value_sync; 353 input_pass_values(dev, dev->vals, dev->num_vals); 354 dev->num_vals = 0; 355 } 356 } 357 358 void input_handle_event(struct input_dev *dev, 359 unsigned int type, unsigned int code, int value) 360 { 361 int disposition; 362 363 lockdep_assert_held(&dev->event_lock); 364 365 disposition = input_get_disposition(dev, type, code, &value); 366 if (disposition != INPUT_IGNORE_EVENT) { 367 if (type != EV_SYN) 368 add_input_randomness(type, code, value); 369 370 input_event_dispose(dev, disposition, type, code, value); 371 } 372 } 373 374 /** 375 * input_event() - report new input event 376 * @dev: device that generated the event 377 * @type: type of the event 378 * @code: event code 379 * @value: value of the event 380 * 381 * This function should be used by drivers implementing various input 382 * devices to report input events. See also input_inject_event(). 383 * 384 * NOTE: input_event() may be safely used right after input device was 385 * allocated with input_allocate_device(), even before it is registered 386 * with input_register_device(), but the event will not reach any of the 387 * input handlers. Such early invocation of input_event() may be used 388 * to 'seed' initial state of a switch or initial position of absolute 389 * axis, etc. 390 */ 391 void input_event(struct input_dev *dev, 392 unsigned int type, unsigned int code, int value) 393 { 394 if (is_event_supported(type, dev->evbit, EV_MAX)) { 395 guard(spinlock_irqsave)(&dev->event_lock); 396 input_handle_event(dev, type, code, value); 397 } 398 } 399 EXPORT_SYMBOL(input_event); 400 401 /** 402 * input_inject_event() - send input event from input handler 403 * @handle: input handle to send event through 404 * @type: type of the event 405 * @code: event code 406 * @value: value of the event 407 * 408 * Similar to input_event() but will ignore event if device is 409 * "grabbed" and handle injecting event is not the one that owns 410 * the device. 411 */ 412 void input_inject_event(struct input_handle *handle, 413 unsigned int type, unsigned int code, int value) 414 { 415 struct input_dev *dev = handle->dev; 416 struct input_handle *grab; 417 418 if (is_event_supported(type, dev->evbit, EV_MAX)) { 419 guard(spinlock_irqsave)(&dev->event_lock); 420 guard(rcu)(); 421 422 grab = rcu_dereference(dev->grab); 423 if (!grab || grab == handle) 424 input_handle_event(dev, type, code, value); 425 426 } 427 } 428 EXPORT_SYMBOL(input_inject_event); 429 430 /** 431 * input_alloc_absinfo - allocates array of input_absinfo structs 432 * @dev: the input device emitting absolute events 433 * 434 * If the absinfo struct the caller asked for is already allocated, this 435 * functions will not do anything. 436 */ 437 void input_alloc_absinfo(struct input_dev *dev) 438 { 439 if (dev->absinfo) 440 return; 441 442 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 443 if (!dev->absinfo) { 444 dev_err(dev->dev.parent ?: &dev->dev, 445 "%s: unable to allocate memory\n", __func__); 446 /* 447 * We will handle this allocation failure in 448 * input_register_device() when we refuse to register input 449 * device with ABS bits but without absinfo. 450 */ 451 } 452 } 453 EXPORT_SYMBOL(input_alloc_absinfo); 454 455 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 456 int min, int max, int fuzz, int flat) 457 { 458 struct input_absinfo *absinfo; 459 460 __set_bit(EV_ABS, dev->evbit); 461 __set_bit(axis, dev->absbit); 462 463 input_alloc_absinfo(dev); 464 if (!dev->absinfo) 465 return; 466 467 absinfo = &dev->absinfo[axis]; 468 absinfo->minimum = min; 469 absinfo->maximum = max; 470 absinfo->fuzz = fuzz; 471 absinfo->flat = flat; 472 } 473 EXPORT_SYMBOL(input_set_abs_params); 474 475 /** 476 * input_copy_abs - Copy absinfo from one input_dev to another 477 * @dst: Destination input device to copy the abs settings to 478 * @dst_axis: ABS_* value selecting the destination axis 479 * @src: Source input device to copy the abs settings from 480 * @src_axis: ABS_* value selecting the source axis 481 * 482 * Set absinfo for the selected destination axis by copying it from 483 * the specified source input device's source axis. 484 * This is useful to e.g. setup a pen/stylus input-device for combined 485 * touchscreen/pen hardware where the pen uses the same coordinates as 486 * the touchscreen. 487 */ 488 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, 489 const struct input_dev *src, unsigned int src_axis) 490 { 491 /* src must have EV_ABS and src_axis set */ 492 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) && 493 test_bit(src_axis, src->absbit)))) 494 return; 495 496 /* 497 * input_alloc_absinfo() may have failed for the source. Our caller is 498 * expected to catch this when registering the input devices, which may 499 * happen after the input_copy_abs() call. 500 */ 501 if (!src->absinfo) 502 return; 503 504 input_set_capability(dst, EV_ABS, dst_axis); 505 if (!dst->absinfo) 506 return; 507 508 dst->absinfo[dst_axis] = src->absinfo[src_axis]; 509 } 510 EXPORT_SYMBOL(input_copy_abs); 511 512 /** 513 * input_grab_device - grabs device for exclusive use 514 * @handle: input handle that wants to own the device 515 * 516 * When a device is grabbed by an input handle all events generated by 517 * the device are delivered only to this handle. Also events injected 518 * by other input handles are ignored while device is grabbed. 519 */ 520 int input_grab_device(struct input_handle *handle) 521 { 522 struct input_dev *dev = handle->dev; 523 524 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 525 if (dev->grab) 526 return -EBUSY; 527 528 rcu_assign_pointer(dev->grab, handle); 529 } 530 531 return 0; 532 } 533 EXPORT_SYMBOL(input_grab_device); 534 535 static void __input_release_device(struct input_handle *handle) 536 { 537 struct input_dev *dev = handle->dev; 538 struct input_handle *grabber; 539 540 grabber = rcu_dereference_protected(dev->grab, 541 lockdep_is_held(&dev->mutex)); 542 if (grabber == handle) { 543 rcu_assign_pointer(dev->grab, NULL); 544 /* Make sure input_pass_values() notices that grab is gone */ 545 synchronize_rcu(); 546 547 list_for_each_entry(handle, &dev->h_list, d_node) 548 if (handle->open && handle->handler->start) 549 handle->handler->start(handle); 550 } 551 } 552 553 /** 554 * input_release_device - release previously grabbed device 555 * @handle: input handle that owns the device 556 * 557 * Releases previously grabbed device so that other input handles can 558 * start receiving input events. Upon release all handlers attached 559 * to the device have their start() method called so they have a change 560 * to synchronize device state with the rest of the system. 561 */ 562 void input_release_device(struct input_handle *handle) 563 { 564 struct input_dev *dev = handle->dev; 565 566 guard(mutex)(&dev->mutex); 567 __input_release_device(handle); 568 } 569 EXPORT_SYMBOL(input_release_device); 570 571 /** 572 * input_open_device - open input device 573 * @handle: handle through which device is being accessed 574 * 575 * This function should be called by input handlers when they 576 * want to start receive events from given input device. 577 */ 578 int input_open_device(struct input_handle *handle) 579 { 580 struct input_dev *dev = handle->dev; 581 int error; 582 583 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 584 if (dev->going_away) 585 return -ENODEV; 586 587 handle->open++; 588 589 if (handle->handler->passive_observer) 590 return 0; 591 592 if (dev->users++ || dev->inhibited) { 593 /* 594 * Device is already opened and/or inhibited, 595 * so we can exit immediately and report success. 596 */ 597 return 0; 598 } 599 600 if (dev->open) { 601 error = dev->open(dev); 602 if (error) { 603 dev->users--; 604 handle->open--; 605 /* 606 * Make sure we are not delivering any more 607 * events through this handle. 608 */ 609 synchronize_rcu(); 610 return error; 611 } 612 } 613 614 if (dev->poller) 615 input_dev_poller_start(dev->poller); 616 } 617 618 return 0; 619 } 620 EXPORT_SYMBOL(input_open_device); 621 622 int input_flush_device(struct input_handle *handle, struct file *file) 623 { 624 struct input_dev *dev = handle->dev; 625 626 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 627 if (dev->flush) 628 return dev->flush(dev, file); 629 } 630 631 return 0; 632 } 633 EXPORT_SYMBOL(input_flush_device); 634 635 /** 636 * input_close_device - close input device 637 * @handle: handle through which device is being accessed 638 * 639 * This function should be called by input handlers when they 640 * want to stop receive events from given input device. 641 */ 642 void input_close_device(struct input_handle *handle) 643 { 644 struct input_dev *dev = handle->dev; 645 646 guard(mutex)(&dev->mutex); 647 648 __input_release_device(handle); 649 650 if (!handle->handler->passive_observer) { 651 if (!--dev->users && !dev->inhibited) { 652 if (dev->poller) 653 input_dev_poller_stop(dev->poller); 654 if (dev->close) 655 dev->close(dev); 656 } 657 } 658 659 if (!--handle->open) { 660 /* 661 * synchronize_rcu() makes sure that input_pass_values() 662 * completed and that no more input events are delivered 663 * through this handle 664 */ 665 synchronize_rcu(); 666 } 667 } 668 EXPORT_SYMBOL(input_close_device); 669 670 /* 671 * Simulate keyup events for all keys that are marked as pressed. 672 * The function must be called with dev->event_lock held. 673 */ 674 static bool input_dev_release_keys(struct input_dev *dev) 675 { 676 bool need_sync = false; 677 int code; 678 679 lockdep_assert_held(&dev->event_lock); 680 681 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 682 for_each_set_bit(code, dev->key, KEY_CNT) { 683 input_handle_event(dev, EV_KEY, code, 0); 684 need_sync = true; 685 } 686 } 687 688 return need_sync; 689 } 690 691 /* 692 * Prepare device for unregistering 693 */ 694 static void input_disconnect_device(struct input_dev *dev) 695 { 696 struct input_handle *handle; 697 698 /* 699 * Mark device as going away. Note that we take dev->mutex here 700 * not to protect access to dev->going_away but rather to ensure 701 * that there are no threads in the middle of input_open_device() 702 */ 703 scoped_guard(mutex, &dev->mutex) 704 dev->going_away = true; 705 706 guard(spinlock_irq)(&dev->event_lock); 707 708 /* 709 * Simulate keyup events for all pressed keys so that handlers 710 * are not left with "stuck" keys. The driver may continue 711 * generate events even after we done here but they will not 712 * reach any handlers. 713 */ 714 if (input_dev_release_keys(dev)) 715 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 716 717 list_for_each_entry(handle, &dev->h_list, d_node) 718 handle->open = 0; 719 } 720 721 /** 722 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 723 * @ke: keymap entry containing scancode to be converted. 724 * @scancode: pointer to the location where converted scancode should 725 * be stored. 726 * 727 * This function is used to convert scancode stored in &struct keymap_entry 728 * into scalar form understood by legacy keymap handling methods. These 729 * methods expect scancodes to be represented as 'unsigned int'. 730 */ 731 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 732 unsigned int *scancode) 733 { 734 switch (ke->len) { 735 case 1: 736 *scancode = *((u8 *)ke->scancode); 737 break; 738 739 case 2: 740 *scancode = *((u16 *)ke->scancode); 741 break; 742 743 case 4: 744 *scancode = *((u32 *)ke->scancode); 745 break; 746 747 default: 748 return -EINVAL; 749 } 750 751 return 0; 752 } 753 EXPORT_SYMBOL(input_scancode_to_scalar); 754 755 /* 756 * Those routines handle the default case where no [gs]etkeycode() is 757 * defined. In this case, an array indexed by the scancode is used. 758 */ 759 760 static unsigned int input_fetch_keycode(struct input_dev *dev, 761 unsigned int index) 762 { 763 switch (dev->keycodesize) { 764 case 1: 765 return ((u8 *)dev->keycode)[index]; 766 767 case 2: 768 return ((u16 *)dev->keycode)[index]; 769 770 default: 771 return ((u32 *)dev->keycode)[index]; 772 } 773 } 774 775 static int input_default_getkeycode(struct input_dev *dev, 776 struct input_keymap_entry *ke) 777 { 778 unsigned int index; 779 int error; 780 781 if (!dev->keycodesize) 782 return -EINVAL; 783 784 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 785 index = ke->index; 786 else { 787 error = input_scancode_to_scalar(ke, &index); 788 if (error) 789 return error; 790 } 791 792 if (index >= dev->keycodemax) 793 return -EINVAL; 794 795 ke->keycode = input_fetch_keycode(dev, index); 796 ke->index = index; 797 ke->len = sizeof(index); 798 memcpy(ke->scancode, &index, sizeof(index)); 799 800 return 0; 801 } 802 803 static int input_default_setkeycode(struct input_dev *dev, 804 const struct input_keymap_entry *ke, 805 unsigned int *old_keycode) 806 { 807 unsigned int index; 808 int error; 809 int i; 810 811 if (!dev->keycodesize) 812 return -EINVAL; 813 814 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 815 index = ke->index; 816 } else { 817 error = input_scancode_to_scalar(ke, &index); 818 if (error) 819 return error; 820 } 821 822 if (index >= dev->keycodemax) 823 return -EINVAL; 824 825 if (dev->keycodesize < sizeof(ke->keycode) && 826 (ke->keycode >> (dev->keycodesize * 8))) 827 return -EINVAL; 828 829 switch (dev->keycodesize) { 830 case 1: { 831 u8 *k = (u8 *)dev->keycode; 832 *old_keycode = k[index]; 833 k[index] = ke->keycode; 834 break; 835 } 836 case 2: { 837 u16 *k = (u16 *)dev->keycode; 838 *old_keycode = k[index]; 839 k[index] = ke->keycode; 840 break; 841 } 842 default: { 843 u32 *k = (u32 *)dev->keycode; 844 *old_keycode = k[index]; 845 k[index] = ke->keycode; 846 break; 847 } 848 } 849 850 if (*old_keycode <= KEY_MAX) { 851 __clear_bit(*old_keycode, dev->keybit); 852 for (i = 0; i < dev->keycodemax; i++) { 853 if (input_fetch_keycode(dev, i) == *old_keycode) { 854 __set_bit(*old_keycode, dev->keybit); 855 /* Setting the bit twice is useless, so break */ 856 break; 857 } 858 } 859 } 860 861 __set_bit(ke->keycode, dev->keybit); 862 return 0; 863 } 864 865 /** 866 * input_get_keycode - retrieve keycode currently mapped to a given scancode 867 * @dev: input device which keymap is being queried 868 * @ke: keymap entry 869 * 870 * This function should be called by anyone interested in retrieving current 871 * keymap. Presently evdev handlers use it. 872 */ 873 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 874 { 875 guard(spinlock_irqsave)(&dev->event_lock); 876 877 return dev->getkeycode(dev, ke); 878 } 879 EXPORT_SYMBOL(input_get_keycode); 880 881 /** 882 * input_set_keycode - attribute a keycode to a given scancode 883 * @dev: input device which keymap is being updated 884 * @ke: new keymap entry 885 * 886 * This function should be called by anyone needing to update current 887 * keymap. Presently keyboard and evdev handlers use it. 888 */ 889 int input_set_keycode(struct input_dev *dev, 890 const struct input_keymap_entry *ke) 891 { 892 unsigned int old_keycode; 893 int error; 894 895 if (ke->keycode > KEY_MAX) 896 return -EINVAL; 897 898 guard(spinlock_irqsave)(&dev->event_lock); 899 900 error = dev->setkeycode(dev, ke, &old_keycode); 901 if (error) 902 return error; 903 904 /* Make sure KEY_RESERVED did not get enabled. */ 905 __clear_bit(KEY_RESERVED, dev->keybit); 906 907 /* 908 * Simulate keyup event if keycode is not present 909 * in the keymap anymore 910 */ 911 if (old_keycode > KEY_MAX) { 912 dev_warn(dev->dev.parent ?: &dev->dev, 913 "%s: got too big old keycode %#x\n", 914 __func__, old_keycode); 915 } else if (test_bit(EV_KEY, dev->evbit) && 916 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 917 __test_and_clear_bit(old_keycode, dev->key)) { 918 /* 919 * We have to use input_event_dispose() here directly instead 920 * of input_handle_event() because the key we want to release 921 * here is considered no longer supported by the device and 922 * input_handle_event() will ignore it. 923 */ 924 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS, 925 EV_KEY, old_keycode, 0); 926 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH, 927 EV_SYN, SYN_REPORT, 1); 928 } 929 930 return 0; 931 } 932 EXPORT_SYMBOL(input_set_keycode); 933 934 bool input_match_device_id(const struct input_dev *dev, 935 const struct input_device_id *id) 936 { 937 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 938 if (id->bustype != dev->id.bustype) 939 return false; 940 941 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 942 if (id->vendor != dev->id.vendor) 943 return false; 944 945 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 946 if (id->product != dev->id.product) 947 return false; 948 949 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 950 if (id->version != dev->id.version) 951 return false; 952 953 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 954 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 955 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 956 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 957 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 958 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 959 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 960 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 961 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 962 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 963 return false; 964 } 965 966 return true; 967 } 968 EXPORT_SYMBOL(input_match_device_id); 969 970 static const struct input_device_id *input_match_device(struct input_handler *handler, 971 struct input_dev *dev) 972 { 973 const struct input_device_id *id; 974 975 for (id = handler->id_table; id->flags; id++) { 976 if (input_match_device_id(dev, id) && 977 (!handler->match || handler->match(handler, dev))) { 978 return id; 979 } 980 } 981 982 return NULL; 983 } 984 985 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 986 { 987 const struct input_device_id *id; 988 int error; 989 990 id = input_match_device(handler, dev); 991 if (!id) 992 return -ENODEV; 993 994 error = handler->connect(handler, dev, id); 995 if (error && error != -ENODEV) 996 pr_err("failed to attach handler %s to device %s, error: %d\n", 997 handler->name, kobject_name(&dev->dev.kobj), error); 998 999 return error; 1000 } 1001 1002 #ifdef CONFIG_PROC_FS 1003 1004 static struct proc_dir_entry *proc_bus_input_dir; 1005 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1006 static int input_devices_state; 1007 1008 static inline void input_wakeup_procfs_readers(void) 1009 { 1010 input_devices_state++; 1011 wake_up(&input_devices_poll_wait); 1012 } 1013 1014 struct input_seq_state { 1015 unsigned short pos; 1016 bool mutex_acquired; 1017 int input_devices_state; 1018 }; 1019 1020 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1021 { 1022 struct seq_file *seq = file->private_data; 1023 struct input_seq_state *state = seq->private; 1024 1025 poll_wait(file, &input_devices_poll_wait, wait); 1026 if (state->input_devices_state != input_devices_state) { 1027 state->input_devices_state = input_devices_state; 1028 return EPOLLIN | EPOLLRDNORM; 1029 } 1030 1031 return 0; 1032 } 1033 1034 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1035 { 1036 struct input_seq_state *state = seq->private; 1037 int error; 1038 1039 error = mutex_lock_interruptible(&input_mutex); 1040 if (error) { 1041 state->mutex_acquired = false; 1042 return ERR_PTR(error); 1043 } 1044 1045 state->mutex_acquired = true; 1046 1047 return seq_list_start(&input_dev_list, *pos); 1048 } 1049 1050 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1051 { 1052 return seq_list_next(v, &input_dev_list, pos); 1053 } 1054 1055 static void input_seq_stop(struct seq_file *seq, void *v) 1056 { 1057 struct input_seq_state *state = seq->private; 1058 1059 if (state->mutex_acquired) 1060 mutex_unlock(&input_mutex); 1061 } 1062 1063 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1064 unsigned long *bitmap, int max) 1065 { 1066 int i; 1067 bool skip_empty = true; 1068 char buf[18]; 1069 1070 seq_printf(seq, "B: %s=", name); 1071 1072 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1073 if (input_bits_to_string(buf, sizeof(buf), 1074 bitmap[i], skip_empty)) { 1075 skip_empty = false; 1076 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1077 } 1078 } 1079 1080 /* 1081 * If no output was produced print a single 0. 1082 */ 1083 if (skip_empty) 1084 seq_putc(seq, '0'); 1085 1086 seq_putc(seq, '\n'); 1087 } 1088 1089 static int input_devices_seq_show(struct seq_file *seq, void *v) 1090 { 1091 struct input_dev *dev = container_of(v, struct input_dev, node); 1092 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1093 struct input_handle *handle; 1094 1095 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1096 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1097 1098 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1099 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1100 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1101 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1102 seq_puts(seq, "H: Handlers="); 1103 1104 list_for_each_entry(handle, &dev->h_list, d_node) 1105 seq_printf(seq, "%s ", handle->name); 1106 seq_putc(seq, '\n'); 1107 1108 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1109 1110 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1111 if (test_bit(EV_KEY, dev->evbit)) 1112 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1113 if (test_bit(EV_REL, dev->evbit)) 1114 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1115 if (test_bit(EV_ABS, dev->evbit)) 1116 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1117 if (test_bit(EV_MSC, dev->evbit)) 1118 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1119 if (test_bit(EV_LED, dev->evbit)) 1120 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1121 if (test_bit(EV_SND, dev->evbit)) 1122 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1123 if (test_bit(EV_FF, dev->evbit)) 1124 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1125 if (test_bit(EV_SW, dev->evbit)) 1126 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1127 1128 seq_putc(seq, '\n'); 1129 1130 kfree(path); 1131 return 0; 1132 } 1133 1134 static const struct seq_operations input_devices_seq_ops = { 1135 .start = input_devices_seq_start, 1136 .next = input_devices_seq_next, 1137 .stop = input_seq_stop, 1138 .show = input_devices_seq_show, 1139 }; 1140 1141 static int input_proc_devices_open(struct inode *inode, struct file *file) 1142 { 1143 return seq_open_private(file, &input_devices_seq_ops, 1144 sizeof(struct input_seq_state)); 1145 } 1146 1147 static const struct proc_ops input_devices_proc_ops = { 1148 .proc_open = input_proc_devices_open, 1149 .proc_poll = input_proc_devices_poll, 1150 .proc_read = seq_read, 1151 .proc_lseek = seq_lseek, 1152 .proc_release = seq_release_private, 1153 }; 1154 1155 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1156 { 1157 struct input_seq_state *state = seq->private; 1158 int error; 1159 1160 error = mutex_lock_interruptible(&input_mutex); 1161 if (error) { 1162 state->mutex_acquired = false; 1163 return ERR_PTR(error); 1164 } 1165 1166 state->mutex_acquired = true; 1167 state->pos = *pos; 1168 1169 return seq_list_start(&input_handler_list, *pos); 1170 } 1171 1172 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1173 { 1174 struct input_seq_state *state = seq->private; 1175 1176 state->pos = *pos + 1; 1177 return seq_list_next(v, &input_handler_list, pos); 1178 } 1179 1180 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1181 { 1182 struct input_handler *handler = container_of(v, struct input_handler, node); 1183 struct input_seq_state *state = seq->private; 1184 1185 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1186 if (handler->filter) 1187 seq_puts(seq, " (filter)"); 1188 if (handler->legacy_minors) 1189 seq_printf(seq, " Minor=%d", handler->minor); 1190 seq_putc(seq, '\n'); 1191 1192 return 0; 1193 } 1194 1195 static const struct seq_operations input_handlers_seq_ops = { 1196 .start = input_handlers_seq_start, 1197 .next = input_handlers_seq_next, 1198 .stop = input_seq_stop, 1199 .show = input_handlers_seq_show, 1200 }; 1201 1202 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1203 { 1204 return seq_open_private(file, &input_handlers_seq_ops, 1205 sizeof(struct input_seq_state)); 1206 } 1207 1208 static const struct proc_ops input_handlers_proc_ops = { 1209 .proc_open = input_proc_handlers_open, 1210 .proc_read = seq_read, 1211 .proc_lseek = seq_lseek, 1212 .proc_release = seq_release_private, 1213 }; 1214 1215 static int __init input_proc_init(void) 1216 { 1217 struct proc_dir_entry *entry; 1218 1219 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1220 if (!proc_bus_input_dir) 1221 return -ENOMEM; 1222 1223 entry = proc_create("devices", 0, proc_bus_input_dir, 1224 &input_devices_proc_ops); 1225 if (!entry) 1226 goto fail1; 1227 1228 entry = proc_create("handlers", 0, proc_bus_input_dir, 1229 &input_handlers_proc_ops); 1230 if (!entry) 1231 goto fail2; 1232 1233 return 0; 1234 1235 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1236 fail1: remove_proc_entry("bus/input", NULL); 1237 return -ENOMEM; 1238 } 1239 1240 static void input_proc_exit(void) 1241 { 1242 remove_proc_entry("devices", proc_bus_input_dir); 1243 remove_proc_entry("handlers", proc_bus_input_dir); 1244 remove_proc_entry("bus/input", NULL); 1245 } 1246 1247 #else /* !CONFIG_PROC_FS */ 1248 static inline void input_wakeup_procfs_readers(void) { } 1249 static inline int input_proc_init(void) { return 0; } 1250 static inline void input_proc_exit(void) { } 1251 #endif 1252 1253 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1254 static ssize_t input_dev_show_##name(struct device *dev, \ 1255 struct device_attribute *attr, \ 1256 char *buf) \ 1257 { \ 1258 struct input_dev *input_dev = to_input_dev(dev); \ 1259 \ 1260 return sysfs_emit(buf, "%s\n", \ 1261 input_dev->name ? input_dev->name : ""); \ 1262 } \ 1263 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1264 1265 INPUT_DEV_STRING_ATTR_SHOW(name); 1266 INPUT_DEV_STRING_ATTR_SHOW(phys); 1267 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1268 1269 static int input_print_modalias_bits(char *buf, int size, 1270 char name, const unsigned long *bm, 1271 unsigned int min_bit, unsigned int max_bit) 1272 { 1273 int bit = min_bit; 1274 int len = 0; 1275 1276 len += snprintf(buf, max(size, 0), "%c", name); 1277 for_each_set_bit_from(bit, bm, max_bit) 1278 len += snprintf(buf + len, max(size - len, 0), "%X,", bit); 1279 return len; 1280 } 1281 1282 static int input_print_modalias_parts(char *buf, int size, int full_len, 1283 const struct input_dev *id) 1284 { 1285 int len, klen, remainder, space; 1286 1287 len = snprintf(buf, max(size, 0), 1288 "input:b%04Xv%04Xp%04Xe%04X-", 1289 id->id.bustype, id->id.vendor, 1290 id->id.product, id->id.version); 1291 1292 len += input_print_modalias_bits(buf + len, size - len, 1293 'e', id->evbit, 0, EV_MAX); 1294 1295 /* 1296 * Calculate the remaining space in the buffer making sure we 1297 * have place for the terminating 0. 1298 */ 1299 space = max(size - (len + 1), 0); 1300 1301 klen = input_print_modalias_bits(buf + len, size - len, 1302 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1303 len += klen; 1304 1305 /* 1306 * If we have more data than we can fit in the buffer, check 1307 * if we can trim key data to fit in the rest. We will indicate 1308 * that key data is incomplete by adding "+" sign at the end, like 1309 * this: * "k1,2,3,45,+,". 1310 * 1311 * Note that we shortest key info (if present) is "k+," so we 1312 * can only try to trim if key data is longer than that. 1313 */ 1314 if (full_len && size < full_len + 1 && klen > 3) { 1315 remainder = full_len - len; 1316 /* 1317 * We can only trim if we have space for the remainder 1318 * and also for at least "k+," which is 3 more characters. 1319 */ 1320 if (remainder <= space - 3) { 1321 /* 1322 * We are guaranteed to have 'k' in the buffer, so 1323 * we need at least 3 additional bytes for storing 1324 * "+," in addition to the remainder. 1325 */ 1326 for (int i = size - 1 - remainder - 3; i >= 0; i--) { 1327 if (buf[i] == 'k' || buf[i] == ',') { 1328 strcpy(buf + i + 1, "+,"); 1329 len = i + 3; /* Not counting '\0' */ 1330 break; 1331 } 1332 } 1333 } 1334 } 1335 1336 len += input_print_modalias_bits(buf + len, size - len, 1337 'r', id->relbit, 0, REL_MAX); 1338 len += input_print_modalias_bits(buf + len, size - len, 1339 'a', id->absbit, 0, ABS_MAX); 1340 len += input_print_modalias_bits(buf + len, size - len, 1341 'm', id->mscbit, 0, MSC_MAX); 1342 len += input_print_modalias_bits(buf + len, size - len, 1343 'l', id->ledbit, 0, LED_MAX); 1344 len += input_print_modalias_bits(buf + len, size - len, 1345 's', id->sndbit, 0, SND_MAX); 1346 len += input_print_modalias_bits(buf + len, size - len, 1347 'f', id->ffbit, 0, FF_MAX); 1348 len += input_print_modalias_bits(buf + len, size - len, 1349 'w', id->swbit, 0, SW_MAX); 1350 1351 return len; 1352 } 1353 1354 static int input_print_modalias(char *buf, int size, const struct input_dev *id) 1355 { 1356 int full_len; 1357 1358 /* 1359 * Printing is done in 2 passes: first one figures out total length 1360 * needed for the modalias string, second one will try to trim key 1361 * data in case when buffer is too small for the entire modalias. 1362 * If the buffer is too small regardless, it will fill as much as it 1363 * can (without trimming key data) into the buffer and leave it to 1364 * the caller to figure out what to do with the result. 1365 */ 1366 full_len = input_print_modalias_parts(NULL, 0, 0, id); 1367 return input_print_modalias_parts(buf, size, full_len, id); 1368 } 1369 1370 static ssize_t input_dev_show_modalias(struct device *dev, 1371 struct device_attribute *attr, 1372 char *buf) 1373 { 1374 struct input_dev *id = to_input_dev(dev); 1375 ssize_t len; 1376 1377 len = input_print_modalias(buf, PAGE_SIZE, id); 1378 if (len < PAGE_SIZE - 2) 1379 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 1380 1381 return min_t(int, len, PAGE_SIZE); 1382 } 1383 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1384 1385 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1386 int max, int add_cr); 1387 1388 static ssize_t input_dev_show_properties(struct device *dev, 1389 struct device_attribute *attr, 1390 char *buf) 1391 { 1392 struct input_dev *input_dev = to_input_dev(dev); 1393 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1394 INPUT_PROP_MAX, true); 1395 return min_t(int, len, PAGE_SIZE); 1396 } 1397 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1398 1399 static int input_inhibit_device(struct input_dev *dev); 1400 static int input_uninhibit_device(struct input_dev *dev); 1401 1402 static ssize_t inhibited_show(struct device *dev, 1403 struct device_attribute *attr, 1404 char *buf) 1405 { 1406 struct input_dev *input_dev = to_input_dev(dev); 1407 1408 return sysfs_emit(buf, "%d\n", input_dev->inhibited); 1409 } 1410 1411 static ssize_t inhibited_store(struct device *dev, 1412 struct device_attribute *attr, const char *buf, 1413 size_t len) 1414 { 1415 struct input_dev *input_dev = to_input_dev(dev); 1416 ssize_t rv; 1417 bool inhibited; 1418 1419 if (kstrtobool(buf, &inhibited)) 1420 return -EINVAL; 1421 1422 if (inhibited) 1423 rv = input_inhibit_device(input_dev); 1424 else 1425 rv = input_uninhibit_device(input_dev); 1426 1427 if (rv != 0) 1428 return rv; 1429 1430 return len; 1431 } 1432 1433 static DEVICE_ATTR_RW(inhibited); 1434 1435 static struct attribute *input_dev_attrs[] = { 1436 &dev_attr_name.attr, 1437 &dev_attr_phys.attr, 1438 &dev_attr_uniq.attr, 1439 &dev_attr_modalias.attr, 1440 &dev_attr_properties.attr, 1441 &dev_attr_inhibited.attr, 1442 NULL 1443 }; 1444 1445 static const struct attribute_group input_dev_attr_group = { 1446 .attrs = input_dev_attrs, 1447 }; 1448 1449 #define INPUT_DEV_ID_ATTR(name) \ 1450 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1451 struct device_attribute *attr, \ 1452 char *buf) \ 1453 { \ 1454 struct input_dev *input_dev = to_input_dev(dev); \ 1455 return sysfs_emit(buf, "%04x\n", input_dev->id.name); \ 1456 } \ 1457 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1458 1459 INPUT_DEV_ID_ATTR(bustype); 1460 INPUT_DEV_ID_ATTR(vendor); 1461 INPUT_DEV_ID_ATTR(product); 1462 INPUT_DEV_ID_ATTR(version); 1463 1464 static struct attribute *input_dev_id_attrs[] = { 1465 &dev_attr_bustype.attr, 1466 &dev_attr_vendor.attr, 1467 &dev_attr_product.attr, 1468 &dev_attr_version.attr, 1469 NULL 1470 }; 1471 1472 static const struct attribute_group input_dev_id_attr_group = { 1473 .name = "id", 1474 .attrs = input_dev_id_attrs, 1475 }; 1476 1477 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1478 int max, int add_cr) 1479 { 1480 int i; 1481 int len = 0; 1482 bool skip_empty = true; 1483 1484 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1485 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1486 bitmap[i], skip_empty); 1487 if (len) { 1488 skip_empty = false; 1489 if (i > 0) 1490 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1491 } 1492 } 1493 1494 /* 1495 * If no output was produced print a single 0. 1496 */ 1497 if (len == 0) 1498 len = snprintf(buf, buf_size, "%d", 0); 1499 1500 if (add_cr) 1501 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1502 1503 return len; 1504 } 1505 1506 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1507 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1508 struct device_attribute *attr, \ 1509 char *buf) \ 1510 { \ 1511 struct input_dev *input_dev = to_input_dev(dev); \ 1512 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1513 input_dev->bm##bit, ev##_MAX, \ 1514 true); \ 1515 return min_t(int, len, PAGE_SIZE); \ 1516 } \ 1517 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1518 1519 INPUT_DEV_CAP_ATTR(EV, ev); 1520 INPUT_DEV_CAP_ATTR(KEY, key); 1521 INPUT_DEV_CAP_ATTR(REL, rel); 1522 INPUT_DEV_CAP_ATTR(ABS, abs); 1523 INPUT_DEV_CAP_ATTR(MSC, msc); 1524 INPUT_DEV_CAP_ATTR(LED, led); 1525 INPUT_DEV_CAP_ATTR(SND, snd); 1526 INPUT_DEV_CAP_ATTR(FF, ff); 1527 INPUT_DEV_CAP_ATTR(SW, sw); 1528 1529 static struct attribute *input_dev_caps_attrs[] = { 1530 &dev_attr_ev.attr, 1531 &dev_attr_key.attr, 1532 &dev_attr_rel.attr, 1533 &dev_attr_abs.attr, 1534 &dev_attr_msc.attr, 1535 &dev_attr_led.attr, 1536 &dev_attr_snd.attr, 1537 &dev_attr_ff.attr, 1538 &dev_attr_sw.attr, 1539 NULL 1540 }; 1541 1542 static const struct attribute_group input_dev_caps_attr_group = { 1543 .name = "capabilities", 1544 .attrs = input_dev_caps_attrs, 1545 }; 1546 1547 static const struct attribute_group *input_dev_attr_groups[] = { 1548 &input_dev_attr_group, 1549 &input_dev_id_attr_group, 1550 &input_dev_caps_attr_group, 1551 &input_poller_attribute_group, 1552 NULL 1553 }; 1554 1555 static void input_dev_release(struct device *device) 1556 { 1557 struct input_dev *dev = to_input_dev(device); 1558 1559 input_ff_destroy(dev); 1560 input_mt_destroy_slots(dev); 1561 kfree(dev->poller); 1562 kfree(dev->absinfo); 1563 kfree(dev->vals); 1564 kfree(dev); 1565 1566 module_put(THIS_MODULE); 1567 } 1568 1569 /* 1570 * Input uevent interface - loading event handlers based on 1571 * device bitfields. 1572 */ 1573 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1574 const char *name, const unsigned long *bitmap, int max) 1575 { 1576 int len; 1577 1578 if (add_uevent_var(env, "%s", name)) 1579 return -ENOMEM; 1580 1581 len = input_print_bitmap(&env->buf[env->buflen - 1], 1582 sizeof(env->buf) - env->buflen, 1583 bitmap, max, false); 1584 if (len >= (sizeof(env->buf) - env->buflen)) 1585 return -ENOMEM; 1586 1587 env->buflen += len; 1588 return 0; 1589 } 1590 1591 /* 1592 * This is a pretty gross hack. When building uevent data the driver core 1593 * may try adding more environment variables to kobj_uevent_env without 1594 * telling us, so we have no idea how much of the buffer we can use to 1595 * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially 1596 * reduce amount of memory we will use for the modalias environment variable. 1597 * 1598 * The potential additions are: 1599 * 1600 * SEQNUM=18446744073709551615 - (%llu - 28 bytes) 1601 * HOME=/ (6 bytes) 1602 * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes) 1603 * 1604 * 68 bytes total. Allow extra buffer - 96 bytes 1605 */ 1606 #define UEVENT_ENV_EXTRA_LEN 96 1607 1608 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1609 const struct input_dev *dev) 1610 { 1611 int len; 1612 1613 if (add_uevent_var(env, "MODALIAS=")) 1614 return -ENOMEM; 1615 1616 len = input_print_modalias(&env->buf[env->buflen - 1], 1617 (int)sizeof(env->buf) - env->buflen - 1618 UEVENT_ENV_EXTRA_LEN, 1619 dev); 1620 if (len >= ((int)sizeof(env->buf) - env->buflen - 1621 UEVENT_ENV_EXTRA_LEN)) 1622 return -ENOMEM; 1623 1624 env->buflen += len; 1625 return 0; 1626 } 1627 1628 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1629 do { \ 1630 int err = add_uevent_var(env, fmt, val); \ 1631 if (err) \ 1632 return err; \ 1633 } while (0) 1634 1635 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1636 do { \ 1637 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1638 if (err) \ 1639 return err; \ 1640 } while (0) 1641 1642 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1643 do { \ 1644 int err = input_add_uevent_modalias_var(env, dev); \ 1645 if (err) \ 1646 return err; \ 1647 } while (0) 1648 1649 static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env) 1650 { 1651 const struct input_dev *dev = to_input_dev(device); 1652 1653 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1654 dev->id.bustype, dev->id.vendor, 1655 dev->id.product, dev->id.version); 1656 if (dev->name) 1657 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1658 if (dev->phys) 1659 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1660 if (dev->uniq) 1661 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1662 1663 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1664 1665 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1666 if (test_bit(EV_KEY, dev->evbit)) 1667 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1668 if (test_bit(EV_REL, dev->evbit)) 1669 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1670 if (test_bit(EV_ABS, dev->evbit)) 1671 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1672 if (test_bit(EV_MSC, dev->evbit)) 1673 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1674 if (test_bit(EV_LED, dev->evbit)) 1675 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1676 if (test_bit(EV_SND, dev->evbit)) 1677 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1678 if (test_bit(EV_FF, dev->evbit)) 1679 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1680 if (test_bit(EV_SW, dev->evbit)) 1681 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1682 1683 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1684 1685 return 0; 1686 } 1687 1688 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1689 do { \ 1690 int i; \ 1691 bool active; \ 1692 \ 1693 if (!test_bit(EV_##type, dev->evbit)) \ 1694 break; \ 1695 \ 1696 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1697 active = test_bit(i, dev->bits); \ 1698 if (!active && !on) \ 1699 continue; \ 1700 \ 1701 dev->event(dev, EV_##type, i, on ? active : 0); \ 1702 } \ 1703 } while (0) 1704 1705 static void input_dev_toggle(struct input_dev *dev, bool activate) 1706 { 1707 if (!dev->event) 1708 return; 1709 1710 INPUT_DO_TOGGLE(dev, LED, led, activate); 1711 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1712 1713 if (activate && test_bit(EV_REP, dev->evbit)) { 1714 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1715 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1716 } 1717 } 1718 1719 /** 1720 * input_reset_device() - reset/restore the state of input device 1721 * @dev: input device whose state needs to be reset 1722 * 1723 * This function tries to reset the state of an opened input device and 1724 * bring internal state and state if the hardware in sync with each other. 1725 * We mark all keys as released, restore LED state, repeat rate, etc. 1726 */ 1727 void input_reset_device(struct input_dev *dev) 1728 { 1729 guard(mutex)(&dev->mutex); 1730 guard(spinlock_irqsave)(&dev->event_lock); 1731 1732 input_dev_toggle(dev, true); 1733 if (input_dev_release_keys(dev)) 1734 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1735 } 1736 EXPORT_SYMBOL(input_reset_device); 1737 1738 static int input_inhibit_device(struct input_dev *dev) 1739 { 1740 guard(mutex)(&dev->mutex); 1741 1742 if (dev->inhibited) 1743 return 0; 1744 1745 if (dev->users) { 1746 if (dev->close) 1747 dev->close(dev); 1748 if (dev->poller) 1749 input_dev_poller_stop(dev->poller); 1750 } 1751 1752 scoped_guard(spinlock_irq, &dev->event_lock) { 1753 input_mt_release_slots(dev); 1754 input_dev_release_keys(dev); 1755 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1756 input_dev_toggle(dev, false); 1757 } 1758 1759 dev->inhibited = true; 1760 1761 return 0; 1762 } 1763 1764 static int input_uninhibit_device(struct input_dev *dev) 1765 { 1766 int error; 1767 1768 guard(mutex)(&dev->mutex); 1769 1770 if (!dev->inhibited) 1771 return 0; 1772 1773 if (dev->users) { 1774 if (dev->open) { 1775 error = dev->open(dev); 1776 if (error) 1777 return error; 1778 } 1779 if (dev->poller) 1780 input_dev_poller_start(dev->poller); 1781 } 1782 1783 dev->inhibited = false; 1784 1785 scoped_guard(spinlock_irq, &dev->event_lock) 1786 input_dev_toggle(dev, true); 1787 1788 return 0; 1789 } 1790 1791 static int input_dev_suspend(struct device *dev) 1792 { 1793 struct input_dev *input_dev = to_input_dev(dev); 1794 1795 guard(spinlock_irq)(&input_dev->event_lock); 1796 1797 /* 1798 * Keys that are pressed now are unlikely to be 1799 * still pressed when we resume. 1800 */ 1801 if (input_dev_release_keys(input_dev)) 1802 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1803 1804 /* Turn off LEDs and sounds, if any are active. */ 1805 input_dev_toggle(input_dev, false); 1806 1807 return 0; 1808 } 1809 1810 static int input_dev_resume(struct device *dev) 1811 { 1812 struct input_dev *input_dev = to_input_dev(dev); 1813 1814 guard(spinlock_irq)(&input_dev->event_lock); 1815 1816 /* Restore state of LEDs and sounds, if any were active. */ 1817 input_dev_toggle(input_dev, true); 1818 1819 return 0; 1820 } 1821 1822 static int input_dev_freeze(struct device *dev) 1823 { 1824 struct input_dev *input_dev = to_input_dev(dev); 1825 1826 guard(spinlock_irq)(&input_dev->event_lock); 1827 1828 /* 1829 * Keys that are pressed now are unlikely to be 1830 * still pressed when we resume. 1831 */ 1832 if (input_dev_release_keys(input_dev)) 1833 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1834 1835 return 0; 1836 } 1837 1838 static int input_dev_poweroff(struct device *dev) 1839 { 1840 struct input_dev *input_dev = to_input_dev(dev); 1841 1842 guard(spinlock_irq)(&input_dev->event_lock); 1843 1844 /* Turn off LEDs and sounds, if any are active. */ 1845 input_dev_toggle(input_dev, false); 1846 1847 return 0; 1848 } 1849 1850 static const struct dev_pm_ops input_dev_pm_ops = { 1851 .suspend = input_dev_suspend, 1852 .resume = input_dev_resume, 1853 .freeze = input_dev_freeze, 1854 .poweroff = input_dev_poweroff, 1855 .restore = input_dev_resume, 1856 }; 1857 1858 static const struct device_type input_dev_type = { 1859 .groups = input_dev_attr_groups, 1860 .release = input_dev_release, 1861 .uevent = input_dev_uevent, 1862 .pm = pm_sleep_ptr(&input_dev_pm_ops), 1863 }; 1864 1865 static char *input_devnode(const struct device *dev, umode_t *mode) 1866 { 1867 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1868 } 1869 1870 const struct class input_class = { 1871 .name = "input", 1872 .devnode = input_devnode, 1873 }; 1874 EXPORT_SYMBOL_GPL(input_class); 1875 1876 /** 1877 * input_allocate_device - allocate memory for new input device 1878 * 1879 * Returns prepared struct input_dev or %NULL. 1880 * 1881 * NOTE: Use input_free_device() to free devices that have not been 1882 * registered; input_unregister_device() should be used for already 1883 * registered devices. 1884 */ 1885 struct input_dev *input_allocate_device(void) 1886 { 1887 static atomic_t input_no = ATOMIC_INIT(-1); 1888 struct input_dev *dev; 1889 1890 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1891 if (!dev) 1892 return NULL; 1893 1894 /* 1895 * Start with space for SYN_REPORT + 7 EV_KEY/EV_MSC events + 2 spare, 1896 * see input_estimate_events_per_packet(). We will tune the number 1897 * when we register the device. 1898 */ 1899 dev->max_vals = 10; 1900 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 1901 if (!dev->vals) { 1902 kfree(dev); 1903 return NULL; 1904 } 1905 1906 mutex_init(&dev->mutex); 1907 spin_lock_init(&dev->event_lock); 1908 timer_setup(&dev->timer, NULL, 0); 1909 INIT_LIST_HEAD(&dev->h_list); 1910 INIT_LIST_HEAD(&dev->node); 1911 1912 dev->dev.type = &input_dev_type; 1913 dev->dev.class = &input_class; 1914 device_initialize(&dev->dev); 1915 /* 1916 * From this point on we can no longer simply "kfree(dev)", we need 1917 * to use input_free_device() so that device core properly frees its 1918 * resources associated with the input device. 1919 */ 1920 1921 dev_set_name(&dev->dev, "input%lu", 1922 (unsigned long)atomic_inc_return(&input_no)); 1923 1924 __module_get(THIS_MODULE); 1925 1926 return dev; 1927 } 1928 EXPORT_SYMBOL(input_allocate_device); 1929 1930 struct input_devres { 1931 struct input_dev *input; 1932 }; 1933 1934 static int devm_input_device_match(struct device *dev, void *res, void *data) 1935 { 1936 struct input_devres *devres = res; 1937 1938 return devres->input == data; 1939 } 1940 1941 static void devm_input_device_release(struct device *dev, void *res) 1942 { 1943 struct input_devres *devres = res; 1944 struct input_dev *input = devres->input; 1945 1946 dev_dbg(dev, "%s: dropping reference to %s\n", 1947 __func__, dev_name(&input->dev)); 1948 input_put_device(input); 1949 } 1950 1951 /** 1952 * devm_input_allocate_device - allocate managed input device 1953 * @dev: device owning the input device being created 1954 * 1955 * Returns prepared struct input_dev or %NULL. 1956 * 1957 * Managed input devices do not need to be explicitly unregistered or 1958 * freed as it will be done automatically when owner device unbinds from 1959 * its driver (or binding fails). Once managed input device is allocated, 1960 * it is ready to be set up and registered in the same fashion as regular 1961 * input device. There are no special devm_input_device_[un]register() 1962 * variants, regular ones work with both managed and unmanaged devices, 1963 * should you need them. In most cases however, managed input device need 1964 * not be explicitly unregistered or freed. 1965 * 1966 * NOTE: the owner device is set up as parent of input device and users 1967 * should not override it. 1968 */ 1969 struct input_dev *devm_input_allocate_device(struct device *dev) 1970 { 1971 struct input_dev *input; 1972 struct input_devres *devres; 1973 1974 devres = devres_alloc(devm_input_device_release, 1975 sizeof(*devres), GFP_KERNEL); 1976 if (!devres) 1977 return NULL; 1978 1979 input = input_allocate_device(); 1980 if (!input) { 1981 devres_free(devres); 1982 return NULL; 1983 } 1984 1985 input->dev.parent = dev; 1986 input->devres_managed = true; 1987 1988 devres->input = input; 1989 devres_add(dev, devres); 1990 1991 return input; 1992 } 1993 EXPORT_SYMBOL(devm_input_allocate_device); 1994 1995 /** 1996 * input_free_device - free memory occupied by input_dev structure 1997 * @dev: input device to free 1998 * 1999 * This function should only be used if input_register_device() 2000 * was not called yet or if it failed. Once device was registered 2001 * use input_unregister_device() and memory will be freed once last 2002 * reference to the device is dropped. 2003 * 2004 * Device should be allocated by input_allocate_device(). 2005 * 2006 * NOTE: If there are references to the input device then memory 2007 * will not be freed until last reference is dropped. 2008 */ 2009 void input_free_device(struct input_dev *dev) 2010 { 2011 if (dev) { 2012 if (dev->devres_managed) 2013 WARN_ON(devres_destroy(dev->dev.parent, 2014 devm_input_device_release, 2015 devm_input_device_match, 2016 dev)); 2017 input_put_device(dev); 2018 } 2019 } 2020 EXPORT_SYMBOL(input_free_device); 2021 2022 /** 2023 * input_set_timestamp - set timestamp for input events 2024 * @dev: input device to set timestamp for 2025 * @timestamp: the time at which the event has occurred 2026 * in CLOCK_MONOTONIC 2027 * 2028 * This function is intended to provide to the input system a more 2029 * accurate time of when an event actually occurred. The driver should 2030 * call this function as soon as a timestamp is acquired ensuring 2031 * clock conversions in input_set_timestamp are done correctly. 2032 * 2033 * The system entering suspend state between timestamp acquisition and 2034 * calling input_set_timestamp can result in inaccurate conversions. 2035 */ 2036 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 2037 { 2038 dev->timestamp[INPUT_CLK_MONO] = timestamp; 2039 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 2040 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 2041 TK_OFFS_BOOT); 2042 } 2043 EXPORT_SYMBOL(input_set_timestamp); 2044 2045 /** 2046 * input_get_timestamp - get timestamp for input events 2047 * @dev: input device to get timestamp from 2048 * 2049 * A valid timestamp is a timestamp of non-zero value. 2050 */ 2051 ktime_t *input_get_timestamp(struct input_dev *dev) 2052 { 2053 const ktime_t invalid_timestamp = ktime_set(0, 0); 2054 2055 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 2056 input_set_timestamp(dev, ktime_get()); 2057 2058 return dev->timestamp; 2059 } 2060 EXPORT_SYMBOL(input_get_timestamp); 2061 2062 /** 2063 * input_set_capability - mark device as capable of a certain event 2064 * @dev: device that is capable of emitting or accepting event 2065 * @type: type of the event (EV_KEY, EV_REL, etc...) 2066 * @code: event code 2067 * 2068 * In addition to setting up corresponding bit in appropriate capability 2069 * bitmap the function also adjusts dev->evbit. 2070 */ 2071 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 2072 { 2073 if (type < EV_CNT && input_max_code[type] && 2074 code > input_max_code[type]) { 2075 pr_err("%s: invalid code %u for type %u\n", __func__, code, 2076 type); 2077 dump_stack(); 2078 return; 2079 } 2080 2081 switch (type) { 2082 case EV_KEY: 2083 __set_bit(code, dev->keybit); 2084 break; 2085 2086 case EV_REL: 2087 __set_bit(code, dev->relbit); 2088 break; 2089 2090 case EV_ABS: 2091 input_alloc_absinfo(dev); 2092 __set_bit(code, dev->absbit); 2093 break; 2094 2095 case EV_MSC: 2096 __set_bit(code, dev->mscbit); 2097 break; 2098 2099 case EV_SW: 2100 __set_bit(code, dev->swbit); 2101 break; 2102 2103 case EV_LED: 2104 __set_bit(code, dev->ledbit); 2105 break; 2106 2107 case EV_SND: 2108 __set_bit(code, dev->sndbit); 2109 break; 2110 2111 case EV_FF: 2112 __set_bit(code, dev->ffbit); 2113 break; 2114 2115 case EV_PWR: 2116 /* do nothing */ 2117 break; 2118 2119 default: 2120 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2121 dump_stack(); 2122 return; 2123 } 2124 2125 __set_bit(type, dev->evbit); 2126 } 2127 EXPORT_SYMBOL(input_set_capability); 2128 2129 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2130 { 2131 int mt_slots; 2132 int i; 2133 unsigned int events; 2134 2135 if (dev->mt) { 2136 mt_slots = dev->mt->num_slots; 2137 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2138 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2139 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1; 2140 mt_slots = clamp(mt_slots, 2, 32); 2141 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2142 mt_slots = 2; 2143 } else { 2144 mt_slots = 0; 2145 } 2146 2147 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2148 2149 if (test_bit(EV_ABS, dev->evbit)) 2150 for_each_set_bit(i, dev->absbit, ABS_CNT) 2151 events += input_is_mt_axis(i) ? mt_slots : 1; 2152 2153 if (test_bit(EV_REL, dev->evbit)) 2154 events += bitmap_weight(dev->relbit, REL_CNT); 2155 2156 /* Make room for KEY and MSC events */ 2157 events += 7; 2158 2159 return events; 2160 } 2161 2162 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2163 do { \ 2164 if (!test_bit(EV_##type, dev->evbit)) \ 2165 memset(dev->bits##bit, 0, \ 2166 sizeof(dev->bits##bit)); \ 2167 } while (0) 2168 2169 static void input_cleanse_bitmasks(struct input_dev *dev) 2170 { 2171 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2172 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2173 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2174 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2175 INPUT_CLEANSE_BITMASK(dev, LED, led); 2176 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2177 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2178 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2179 } 2180 2181 static void __input_unregister_device(struct input_dev *dev) 2182 { 2183 struct input_handle *handle, *next; 2184 2185 input_disconnect_device(dev); 2186 2187 scoped_guard(mutex, &input_mutex) { 2188 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2189 handle->handler->disconnect(handle); 2190 WARN_ON(!list_empty(&dev->h_list)); 2191 2192 timer_delete_sync(&dev->timer); 2193 list_del_init(&dev->node); 2194 2195 input_wakeup_procfs_readers(); 2196 } 2197 2198 device_del(&dev->dev); 2199 } 2200 2201 static void devm_input_device_unregister(struct device *dev, void *res) 2202 { 2203 struct input_devres *devres = res; 2204 struct input_dev *input = devres->input; 2205 2206 dev_dbg(dev, "%s: unregistering device %s\n", 2207 __func__, dev_name(&input->dev)); 2208 __input_unregister_device(input); 2209 } 2210 2211 /* 2212 * Generate software autorepeat event. Note that we take 2213 * dev->event_lock here to avoid racing with input_event 2214 * which may cause keys get "stuck". 2215 */ 2216 static void input_repeat_key(struct timer_list *t) 2217 { 2218 struct input_dev *dev = timer_container_of(dev, t, timer); 2219 2220 guard(spinlock_irqsave)(&dev->event_lock); 2221 2222 if (!dev->inhibited && 2223 test_bit(dev->repeat_key, dev->key) && 2224 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 2225 2226 input_set_timestamp(dev, ktime_get()); 2227 input_handle_event(dev, EV_KEY, dev->repeat_key, 2); 2228 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 2229 2230 if (dev->rep[REP_PERIOD]) 2231 mod_timer(&dev->timer, jiffies + 2232 msecs_to_jiffies(dev->rep[REP_PERIOD])); 2233 } 2234 } 2235 2236 /** 2237 * input_enable_softrepeat - enable software autorepeat 2238 * @dev: input device 2239 * @delay: repeat delay 2240 * @period: repeat period 2241 * 2242 * Enable software autorepeat on the input device. 2243 */ 2244 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2245 { 2246 dev->timer.function = input_repeat_key; 2247 dev->rep[REP_DELAY] = delay; 2248 dev->rep[REP_PERIOD] = period; 2249 } 2250 EXPORT_SYMBOL(input_enable_softrepeat); 2251 2252 bool input_device_enabled(struct input_dev *dev) 2253 { 2254 lockdep_assert_held(&dev->mutex); 2255 2256 return !dev->inhibited && dev->users > 0; 2257 } 2258 EXPORT_SYMBOL_GPL(input_device_enabled); 2259 2260 static int input_device_tune_vals(struct input_dev *dev) 2261 { 2262 struct input_value *vals; 2263 unsigned int packet_size; 2264 unsigned int max_vals; 2265 2266 packet_size = input_estimate_events_per_packet(dev); 2267 if (dev->hint_events_per_packet < packet_size) 2268 dev->hint_events_per_packet = packet_size; 2269 2270 max_vals = dev->hint_events_per_packet + 2; 2271 if (dev->max_vals >= max_vals) 2272 return 0; 2273 2274 vals = kcalloc(max_vals, sizeof(*vals), GFP_KERNEL); 2275 if (!vals) 2276 return -ENOMEM; 2277 2278 scoped_guard(spinlock_irq, &dev->event_lock) { 2279 dev->max_vals = max_vals; 2280 swap(dev->vals, vals); 2281 } 2282 2283 /* Because of swap() above, this frees the old vals memory */ 2284 kfree(vals); 2285 2286 return 0; 2287 } 2288 2289 /** 2290 * input_register_device - register device with input core 2291 * @dev: device to be registered 2292 * 2293 * This function registers device with input core. The device must be 2294 * allocated with input_allocate_device() and all it's capabilities 2295 * set up before registering. 2296 * If function fails the device must be freed with input_free_device(). 2297 * Once device has been successfully registered it can be unregistered 2298 * with input_unregister_device(); input_free_device() should not be 2299 * called in this case. 2300 * 2301 * Note that this function is also used to register managed input devices 2302 * (ones allocated with devm_input_allocate_device()). Such managed input 2303 * devices need not be explicitly unregistered or freed, their tear down 2304 * is controlled by the devres infrastructure. It is also worth noting 2305 * that tear down of managed input devices is internally a 2-step process: 2306 * registered managed input device is first unregistered, but stays in 2307 * memory and can still handle input_event() calls (although events will 2308 * not be delivered anywhere). The freeing of managed input device will 2309 * happen later, when devres stack is unwound to the point where device 2310 * allocation was made. 2311 */ 2312 int input_register_device(struct input_dev *dev) 2313 { 2314 struct input_devres *devres = NULL; 2315 struct input_handler *handler; 2316 const char *path; 2317 int error; 2318 2319 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2320 dev_err(&dev->dev, 2321 "Absolute device without dev->absinfo, refusing to register\n"); 2322 return -EINVAL; 2323 } 2324 2325 if (dev->devres_managed) { 2326 devres = devres_alloc(devm_input_device_unregister, 2327 sizeof(*devres), GFP_KERNEL); 2328 if (!devres) 2329 return -ENOMEM; 2330 2331 devres->input = dev; 2332 } 2333 2334 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2335 __set_bit(EV_SYN, dev->evbit); 2336 2337 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2338 __clear_bit(KEY_RESERVED, dev->keybit); 2339 2340 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2341 input_cleanse_bitmasks(dev); 2342 2343 error = input_device_tune_vals(dev); 2344 if (error) 2345 goto err_devres_free; 2346 2347 /* 2348 * If delay and period are pre-set by the driver, then autorepeating 2349 * is handled by the driver itself and we don't do it in input.c. 2350 */ 2351 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2352 input_enable_softrepeat(dev, 250, 33); 2353 2354 if (!dev->getkeycode) 2355 dev->getkeycode = input_default_getkeycode; 2356 2357 if (!dev->setkeycode) 2358 dev->setkeycode = input_default_setkeycode; 2359 2360 if (dev->poller) 2361 input_dev_poller_finalize(dev->poller); 2362 2363 error = device_add(&dev->dev); 2364 if (error) 2365 goto err_devres_free; 2366 2367 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2368 pr_info("%s as %s\n", 2369 dev->name ? dev->name : "Unspecified device", 2370 path ? path : "N/A"); 2371 kfree(path); 2372 2373 error = -EINTR; 2374 scoped_cond_guard(mutex_intr, goto err_device_del, &input_mutex) { 2375 list_add_tail(&dev->node, &input_dev_list); 2376 2377 list_for_each_entry(handler, &input_handler_list, node) 2378 input_attach_handler(dev, handler); 2379 2380 input_wakeup_procfs_readers(); 2381 } 2382 2383 if (dev->devres_managed) { 2384 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2385 __func__, dev_name(&dev->dev)); 2386 devres_add(dev->dev.parent, devres); 2387 } 2388 return 0; 2389 2390 err_device_del: 2391 device_del(&dev->dev); 2392 err_devres_free: 2393 devres_free(devres); 2394 return error; 2395 } 2396 EXPORT_SYMBOL(input_register_device); 2397 2398 /** 2399 * input_unregister_device - unregister previously registered device 2400 * @dev: device to be unregistered 2401 * 2402 * This function unregisters an input device. Once device is unregistered 2403 * the caller should not try to access it as it may get freed at any moment. 2404 */ 2405 void input_unregister_device(struct input_dev *dev) 2406 { 2407 if (dev->devres_managed) { 2408 WARN_ON(devres_destroy(dev->dev.parent, 2409 devm_input_device_unregister, 2410 devm_input_device_match, 2411 dev)); 2412 __input_unregister_device(dev); 2413 /* 2414 * We do not do input_put_device() here because it will be done 2415 * when 2nd devres fires up. 2416 */ 2417 } else { 2418 __input_unregister_device(dev); 2419 input_put_device(dev); 2420 } 2421 } 2422 EXPORT_SYMBOL(input_unregister_device); 2423 2424 static int input_handler_check_methods(const struct input_handler *handler) 2425 { 2426 int count = 0; 2427 2428 if (handler->filter) 2429 count++; 2430 if (handler->events) 2431 count++; 2432 if (handler->event) 2433 count++; 2434 2435 if (count > 1) { 2436 pr_err("%s: only one event processing method can be defined (%s)\n", 2437 __func__, handler->name); 2438 return -EINVAL; 2439 } 2440 2441 return 0; 2442 } 2443 2444 /** 2445 * input_register_handler - register a new input handler 2446 * @handler: handler to be registered 2447 * 2448 * This function registers a new input handler (interface) for input 2449 * devices in the system and attaches it to all input devices that 2450 * are compatible with the handler. 2451 */ 2452 int input_register_handler(struct input_handler *handler) 2453 { 2454 struct input_dev *dev; 2455 int error; 2456 2457 error = input_handler_check_methods(handler); 2458 if (error) 2459 return error; 2460 2461 scoped_cond_guard(mutex_intr, return -EINTR, &input_mutex) { 2462 INIT_LIST_HEAD(&handler->h_list); 2463 2464 list_add_tail(&handler->node, &input_handler_list); 2465 2466 list_for_each_entry(dev, &input_dev_list, node) 2467 input_attach_handler(dev, handler); 2468 2469 input_wakeup_procfs_readers(); 2470 } 2471 2472 return 0; 2473 } 2474 EXPORT_SYMBOL(input_register_handler); 2475 2476 /** 2477 * input_unregister_handler - unregisters an input handler 2478 * @handler: handler to be unregistered 2479 * 2480 * This function disconnects a handler from its input devices and 2481 * removes it from lists of known handlers. 2482 */ 2483 void input_unregister_handler(struct input_handler *handler) 2484 { 2485 struct input_handle *handle, *next; 2486 2487 guard(mutex)(&input_mutex); 2488 2489 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2490 handler->disconnect(handle); 2491 WARN_ON(!list_empty(&handler->h_list)); 2492 2493 list_del_init(&handler->node); 2494 2495 input_wakeup_procfs_readers(); 2496 } 2497 EXPORT_SYMBOL(input_unregister_handler); 2498 2499 /** 2500 * input_handler_for_each_handle - handle iterator 2501 * @handler: input handler to iterate 2502 * @data: data for the callback 2503 * @fn: function to be called for each handle 2504 * 2505 * Iterate over @bus's list of devices, and call @fn for each, passing 2506 * it @data and stop when @fn returns a non-zero value. The function is 2507 * using RCU to traverse the list and therefore may be using in atomic 2508 * contexts. The @fn callback is invoked from RCU critical section and 2509 * thus must not sleep. 2510 */ 2511 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2512 int (*fn)(struct input_handle *, void *)) 2513 { 2514 struct input_handle *handle; 2515 int retval; 2516 2517 guard(rcu)(); 2518 2519 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2520 retval = fn(handle, data); 2521 if (retval) 2522 return retval; 2523 } 2524 2525 return 0; 2526 } 2527 EXPORT_SYMBOL(input_handler_for_each_handle); 2528 2529 /* 2530 * An implementation of input_handle's handle_events() method that simply 2531 * invokes handler->event() method for each event one by one. 2532 */ 2533 static unsigned int input_handle_events_default(struct input_handle *handle, 2534 struct input_value *vals, 2535 unsigned int count) 2536 { 2537 struct input_handler *handler = handle->handler; 2538 struct input_value *v; 2539 2540 for (v = vals; v != vals + count; v++) 2541 handler->event(handle, v->type, v->code, v->value); 2542 2543 return count; 2544 } 2545 2546 /* 2547 * An implementation of input_handle's handle_events() method that invokes 2548 * handler->filter() method for each event one by one and removes events 2549 * that were filtered out from the "vals" array. 2550 */ 2551 static unsigned int input_handle_events_filter(struct input_handle *handle, 2552 struct input_value *vals, 2553 unsigned int count) 2554 { 2555 struct input_handler *handler = handle->handler; 2556 struct input_value *end = vals; 2557 struct input_value *v; 2558 2559 for (v = vals; v != vals + count; v++) { 2560 if (handler->filter(handle, v->type, v->code, v->value)) 2561 continue; 2562 if (end != v) 2563 *end = *v; 2564 end++; 2565 } 2566 2567 return end - vals; 2568 } 2569 2570 /* 2571 * An implementation of input_handle's handle_events() method that does nothing. 2572 */ 2573 static unsigned int input_handle_events_null(struct input_handle *handle, 2574 struct input_value *vals, 2575 unsigned int count) 2576 { 2577 return count; 2578 } 2579 2580 /* 2581 * Sets up appropriate handle->event_handler based on the input_handler 2582 * associated with the handle. 2583 */ 2584 static void input_handle_setup_event_handler(struct input_handle *handle) 2585 { 2586 struct input_handler *handler = handle->handler; 2587 2588 if (handler->filter) 2589 handle->handle_events = input_handle_events_filter; 2590 else if (handler->event) 2591 handle->handle_events = input_handle_events_default; 2592 else if (handler->events) 2593 handle->handle_events = handler->events; 2594 else 2595 handle->handle_events = input_handle_events_null; 2596 } 2597 2598 /** 2599 * input_register_handle - register a new input handle 2600 * @handle: handle to register 2601 * 2602 * This function puts a new input handle onto device's 2603 * and handler's lists so that events can flow through 2604 * it once it is opened using input_open_device(). 2605 * 2606 * This function is supposed to be called from handler's 2607 * connect() method. 2608 */ 2609 int input_register_handle(struct input_handle *handle) 2610 { 2611 struct input_handler *handler = handle->handler; 2612 struct input_dev *dev = handle->dev; 2613 2614 input_handle_setup_event_handler(handle); 2615 /* 2616 * We take dev->mutex here to prevent race with 2617 * input_release_device(). 2618 */ 2619 scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 2620 /* 2621 * Filters go to the head of the list, normal handlers 2622 * to the tail. 2623 */ 2624 if (handler->filter) 2625 list_add_rcu(&handle->d_node, &dev->h_list); 2626 else 2627 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2628 } 2629 2630 /* 2631 * Since we are supposed to be called from ->connect() 2632 * which is mutually exclusive with ->disconnect() 2633 * we can't be racing with input_unregister_handle() 2634 * and so separate lock is not needed here. 2635 */ 2636 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2637 2638 if (handler->start) 2639 handler->start(handle); 2640 2641 return 0; 2642 } 2643 EXPORT_SYMBOL(input_register_handle); 2644 2645 /** 2646 * input_unregister_handle - unregister an input handle 2647 * @handle: handle to unregister 2648 * 2649 * This function removes input handle from device's 2650 * and handler's lists. 2651 * 2652 * This function is supposed to be called from handler's 2653 * disconnect() method. 2654 */ 2655 void input_unregister_handle(struct input_handle *handle) 2656 { 2657 struct input_dev *dev = handle->dev; 2658 2659 list_del_rcu(&handle->h_node); 2660 2661 /* 2662 * Take dev->mutex to prevent race with input_release_device(). 2663 */ 2664 scoped_guard(mutex, &dev->mutex) 2665 list_del_rcu(&handle->d_node); 2666 2667 synchronize_rcu(); 2668 } 2669 EXPORT_SYMBOL(input_unregister_handle); 2670 2671 /** 2672 * input_get_new_minor - allocates a new input minor number 2673 * @legacy_base: beginning or the legacy range to be searched 2674 * @legacy_num: size of legacy range 2675 * @allow_dynamic: whether we can also take ID from the dynamic range 2676 * 2677 * This function allocates a new device minor for from input major namespace. 2678 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2679 * parameters and whether ID can be allocated from dynamic range if there are 2680 * no free IDs in legacy range. 2681 */ 2682 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2683 bool allow_dynamic) 2684 { 2685 /* 2686 * This function should be called from input handler's ->connect() 2687 * methods, which are serialized with input_mutex, so no additional 2688 * locking is needed here. 2689 */ 2690 if (legacy_base >= 0) { 2691 int minor = ida_alloc_range(&input_ida, legacy_base, 2692 legacy_base + legacy_num - 1, 2693 GFP_KERNEL); 2694 if (minor >= 0 || !allow_dynamic) 2695 return minor; 2696 } 2697 2698 return ida_alloc_range(&input_ida, INPUT_FIRST_DYNAMIC_DEV, 2699 INPUT_MAX_CHAR_DEVICES - 1, GFP_KERNEL); 2700 } 2701 EXPORT_SYMBOL(input_get_new_minor); 2702 2703 /** 2704 * input_free_minor - release previously allocated minor 2705 * @minor: minor to be released 2706 * 2707 * This function releases previously allocated input minor so that it can be 2708 * reused later. 2709 */ 2710 void input_free_minor(unsigned int minor) 2711 { 2712 ida_free(&input_ida, minor); 2713 } 2714 EXPORT_SYMBOL(input_free_minor); 2715 2716 static int __init input_init(void) 2717 { 2718 int err; 2719 2720 err = class_register(&input_class); 2721 if (err) { 2722 pr_err("unable to register input_dev class\n"); 2723 return err; 2724 } 2725 2726 err = input_proc_init(); 2727 if (err) 2728 goto fail1; 2729 2730 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2731 INPUT_MAX_CHAR_DEVICES, "input"); 2732 if (err) { 2733 pr_err("unable to register char major %d", INPUT_MAJOR); 2734 goto fail2; 2735 } 2736 2737 return 0; 2738 2739 fail2: input_proc_exit(); 2740 fail1: class_unregister(&input_class); 2741 return err; 2742 } 2743 2744 static void __exit input_exit(void) 2745 { 2746 input_proc_exit(); 2747 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2748 INPUT_MAX_CHAR_DEVICES); 2749 class_unregister(&input_class); 2750 } 2751 2752 subsys_initcall(input_init); 2753 module_exit(input_exit); 2754