1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/idr.h> 14 #include <linux/input/mt.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/major.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/poll.h> 23 #include <linux/device.h> 24 #include <linux/mutex.h> 25 #include <linux/rcupdate.h> 26 #include "input-compat.h" 27 #include "input-poller.h" 28 29 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 30 MODULE_DESCRIPTION("Input core"); 31 MODULE_LICENSE("GPL"); 32 33 #define INPUT_MAX_CHAR_DEVICES 1024 34 #define INPUT_FIRST_DYNAMIC_DEV 256 35 static DEFINE_IDA(input_ida); 36 37 static LIST_HEAD(input_dev_list); 38 static LIST_HEAD(input_handler_list); 39 40 /* 41 * input_mutex protects access to both input_dev_list and input_handler_list. 42 * This also causes input_[un]register_device and input_[un]register_handler 43 * be mutually exclusive which simplifies locking in drivers implementing 44 * input handlers. 45 */ 46 static DEFINE_MUTEX(input_mutex); 47 48 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 49 50 static inline int is_event_supported(unsigned int code, 51 unsigned long *bm, unsigned int max) 52 { 53 return code <= max && test_bit(code, bm); 54 } 55 56 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 57 { 58 if (fuzz) { 59 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 60 return old_val; 61 62 if (value > old_val - fuzz && value < old_val + fuzz) 63 return (old_val * 3 + value) / 4; 64 65 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 66 return (old_val + value) / 2; 67 } 68 69 return value; 70 } 71 72 static void input_start_autorepeat(struct input_dev *dev, int code) 73 { 74 if (test_bit(EV_REP, dev->evbit) && 75 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 76 dev->timer.function) { 77 dev->repeat_key = code; 78 mod_timer(&dev->timer, 79 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 80 } 81 } 82 83 static void input_stop_autorepeat(struct input_dev *dev) 84 { 85 del_timer(&dev->timer); 86 } 87 88 /* 89 * Pass event first through all filters and then, if event has not been 90 * filtered out, through all open handles. This function is called with 91 * dev->event_lock held and interrupts disabled. 92 */ 93 static unsigned int input_to_handler(struct input_handle *handle, 94 struct input_value *vals, unsigned int count) 95 { 96 struct input_handler *handler = handle->handler; 97 struct input_value *end = vals; 98 struct input_value *v; 99 100 if (handler->filter) { 101 for (v = vals; v != vals + count; v++) { 102 if (handler->filter(handle, v->type, v->code, v->value)) 103 continue; 104 if (end != v) 105 *end = *v; 106 end++; 107 } 108 count = end - vals; 109 } 110 111 if (!count) 112 return 0; 113 114 if (handler->events) 115 handler->events(handle, vals, count); 116 else if (handler->event) 117 for (v = vals; v != vals + count; v++) 118 handler->event(handle, v->type, v->code, v->value); 119 120 return count; 121 } 122 123 /* 124 * Pass values first through all filters and then, if event has not been 125 * filtered out, through all open handles. This function is called with 126 * dev->event_lock held and interrupts disabled. 127 */ 128 static void input_pass_values(struct input_dev *dev, 129 struct input_value *vals, unsigned int count) 130 { 131 struct input_handle *handle; 132 struct input_value *v; 133 134 if (!count) 135 return; 136 137 rcu_read_lock(); 138 139 handle = rcu_dereference(dev->grab); 140 if (handle) { 141 count = input_to_handler(handle, vals, count); 142 } else { 143 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 144 if (handle->open) { 145 count = input_to_handler(handle, vals, count); 146 if (!count) 147 break; 148 } 149 } 150 151 rcu_read_unlock(); 152 153 /* trigger auto repeat for key events */ 154 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 155 for (v = vals; v != vals + count; v++) { 156 if (v->type == EV_KEY && v->value != 2) { 157 if (v->value) 158 input_start_autorepeat(dev, v->code); 159 else 160 input_stop_autorepeat(dev); 161 } 162 } 163 } 164 } 165 166 static void input_pass_event(struct input_dev *dev, 167 unsigned int type, unsigned int code, int value) 168 { 169 struct input_value vals[] = { { type, code, value } }; 170 171 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 172 } 173 174 /* 175 * Generate software autorepeat event. Note that we take 176 * dev->event_lock here to avoid racing with input_event 177 * which may cause keys get "stuck". 178 */ 179 static void input_repeat_key(struct timer_list *t) 180 { 181 struct input_dev *dev = from_timer(dev, t, timer); 182 unsigned long flags; 183 184 spin_lock_irqsave(&dev->event_lock, flags); 185 186 if (test_bit(dev->repeat_key, dev->key) && 187 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 188 struct input_value vals[] = { 189 { EV_KEY, dev->repeat_key, 2 }, 190 input_value_sync 191 }; 192 193 input_set_timestamp(dev, ktime_get()); 194 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 195 196 if (dev->rep[REP_PERIOD]) 197 mod_timer(&dev->timer, jiffies + 198 msecs_to_jiffies(dev->rep[REP_PERIOD])); 199 } 200 201 spin_unlock_irqrestore(&dev->event_lock, flags); 202 } 203 204 #define INPUT_IGNORE_EVENT 0 205 #define INPUT_PASS_TO_HANDLERS 1 206 #define INPUT_PASS_TO_DEVICE 2 207 #define INPUT_SLOT 4 208 #define INPUT_FLUSH 8 209 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 210 211 static int input_handle_abs_event(struct input_dev *dev, 212 unsigned int code, int *pval) 213 { 214 struct input_mt *mt = dev->mt; 215 bool is_mt_event; 216 int *pold; 217 218 if (code == ABS_MT_SLOT) { 219 /* 220 * "Stage" the event; we'll flush it later, when we 221 * get actual touch data. 222 */ 223 if (mt && *pval >= 0 && *pval < mt->num_slots) 224 mt->slot = *pval; 225 226 return INPUT_IGNORE_EVENT; 227 } 228 229 is_mt_event = input_is_mt_value(code); 230 231 if (!is_mt_event) { 232 pold = &dev->absinfo[code].value; 233 } else if (mt) { 234 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 235 } else { 236 /* 237 * Bypass filtering for multi-touch events when 238 * not employing slots. 239 */ 240 pold = NULL; 241 } 242 243 if (pold) { 244 *pval = input_defuzz_abs_event(*pval, *pold, 245 dev->absinfo[code].fuzz); 246 if (*pold == *pval) 247 return INPUT_IGNORE_EVENT; 248 249 *pold = *pval; 250 } 251 252 /* Flush pending "slot" event */ 253 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 254 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); 255 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 256 } 257 258 return INPUT_PASS_TO_HANDLERS; 259 } 260 261 static int input_get_disposition(struct input_dev *dev, 262 unsigned int type, unsigned int code, int *pval) 263 { 264 int disposition = INPUT_IGNORE_EVENT; 265 int value = *pval; 266 267 switch (type) { 268 269 case EV_SYN: 270 switch (code) { 271 case SYN_CONFIG: 272 disposition = INPUT_PASS_TO_ALL; 273 break; 274 275 case SYN_REPORT: 276 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 277 break; 278 case SYN_MT_REPORT: 279 disposition = INPUT_PASS_TO_HANDLERS; 280 break; 281 } 282 break; 283 284 case EV_KEY: 285 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 286 287 /* auto-repeat bypasses state updates */ 288 if (value == 2) { 289 disposition = INPUT_PASS_TO_HANDLERS; 290 break; 291 } 292 293 if (!!test_bit(code, dev->key) != !!value) { 294 295 __change_bit(code, dev->key); 296 disposition = INPUT_PASS_TO_HANDLERS; 297 } 298 } 299 break; 300 301 case EV_SW: 302 if (is_event_supported(code, dev->swbit, SW_MAX) && 303 !!test_bit(code, dev->sw) != !!value) { 304 305 __change_bit(code, dev->sw); 306 disposition = INPUT_PASS_TO_HANDLERS; 307 } 308 break; 309 310 case EV_ABS: 311 if (is_event_supported(code, dev->absbit, ABS_MAX)) 312 disposition = input_handle_abs_event(dev, code, &value); 313 314 break; 315 316 case EV_REL: 317 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 318 disposition = INPUT_PASS_TO_HANDLERS; 319 320 break; 321 322 case EV_MSC: 323 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 324 disposition = INPUT_PASS_TO_ALL; 325 326 break; 327 328 case EV_LED: 329 if (is_event_supported(code, dev->ledbit, LED_MAX) && 330 !!test_bit(code, dev->led) != !!value) { 331 332 __change_bit(code, dev->led); 333 disposition = INPUT_PASS_TO_ALL; 334 } 335 break; 336 337 case EV_SND: 338 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 339 340 if (!!test_bit(code, dev->snd) != !!value) 341 __change_bit(code, dev->snd); 342 disposition = INPUT_PASS_TO_ALL; 343 } 344 break; 345 346 case EV_REP: 347 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 348 dev->rep[code] = value; 349 disposition = INPUT_PASS_TO_ALL; 350 } 351 break; 352 353 case EV_FF: 354 if (value >= 0) 355 disposition = INPUT_PASS_TO_ALL; 356 break; 357 358 case EV_PWR: 359 disposition = INPUT_PASS_TO_ALL; 360 break; 361 } 362 363 *pval = value; 364 return disposition; 365 } 366 367 static void input_handle_event(struct input_dev *dev, 368 unsigned int type, unsigned int code, int value) 369 { 370 int disposition; 371 372 /* filter-out events from inhibited devices */ 373 if (dev->inhibited) 374 return; 375 376 disposition = input_get_disposition(dev, type, code, &value); 377 if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) 378 add_input_randomness(type, code, value); 379 380 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 381 dev->event(dev, type, code, value); 382 383 if (!dev->vals) 384 return; 385 386 if (disposition & INPUT_PASS_TO_HANDLERS) { 387 struct input_value *v; 388 389 if (disposition & INPUT_SLOT) { 390 v = &dev->vals[dev->num_vals++]; 391 v->type = EV_ABS; 392 v->code = ABS_MT_SLOT; 393 v->value = dev->mt->slot; 394 } 395 396 v = &dev->vals[dev->num_vals++]; 397 v->type = type; 398 v->code = code; 399 v->value = value; 400 } 401 402 if (disposition & INPUT_FLUSH) { 403 if (dev->num_vals >= 2) 404 input_pass_values(dev, dev->vals, dev->num_vals); 405 dev->num_vals = 0; 406 /* 407 * Reset the timestamp on flush so we won't end up 408 * with a stale one. Note we only need to reset the 409 * monolithic one as we use its presence when deciding 410 * whether to generate a synthetic timestamp. 411 */ 412 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 413 } else if (dev->num_vals >= dev->max_vals - 2) { 414 dev->vals[dev->num_vals++] = input_value_sync; 415 input_pass_values(dev, dev->vals, dev->num_vals); 416 dev->num_vals = 0; 417 } 418 419 } 420 421 /** 422 * input_event() - report new input event 423 * @dev: device that generated the event 424 * @type: type of the event 425 * @code: event code 426 * @value: value of the event 427 * 428 * This function should be used by drivers implementing various input 429 * devices to report input events. See also input_inject_event(). 430 * 431 * NOTE: input_event() may be safely used right after input device was 432 * allocated with input_allocate_device(), even before it is registered 433 * with input_register_device(), but the event will not reach any of the 434 * input handlers. Such early invocation of input_event() may be used 435 * to 'seed' initial state of a switch or initial position of absolute 436 * axis, etc. 437 */ 438 void input_event(struct input_dev *dev, 439 unsigned int type, unsigned int code, int value) 440 { 441 unsigned long flags; 442 443 if (is_event_supported(type, dev->evbit, EV_MAX)) { 444 445 spin_lock_irqsave(&dev->event_lock, flags); 446 input_handle_event(dev, type, code, value); 447 spin_unlock_irqrestore(&dev->event_lock, flags); 448 } 449 } 450 EXPORT_SYMBOL(input_event); 451 452 /** 453 * input_inject_event() - send input event from input handler 454 * @handle: input handle to send event through 455 * @type: type of the event 456 * @code: event code 457 * @value: value of the event 458 * 459 * Similar to input_event() but will ignore event if device is 460 * "grabbed" and handle injecting event is not the one that owns 461 * the device. 462 */ 463 void input_inject_event(struct input_handle *handle, 464 unsigned int type, unsigned int code, int value) 465 { 466 struct input_dev *dev = handle->dev; 467 struct input_handle *grab; 468 unsigned long flags; 469 470 if (is_event_supported(type, dev->evbit, EV_MAX)) { 471 spin_lock_irqsave(&dev->event_lock, flags); 472 473 rcu_read_lock(); 474 grab = rcu_dereference(dev->grab); 475 if (!grab || grab == handle) 476 input_handle_event(dev, type, code, value); 477 rcu_read_unlock(); 478 479 spin_unlock_irqrestore(&dev->event_lock, flags); 480 } 481 } 482 EXPORT_SYMBOL(input_inject_event); 483 484 /** 485 * input_alloc_absinfo - allocates array of input_absinfo structs 486 * @dev: the input device emitting absolute events 487 * 488 * If the absinfo struct the caller asked for is already allocated, this 489 * functions will not do anything. 490 */ 491 void input_alloc_absinfo(struct input_dev *dev) 492 { 493 if (dev->absinfo) 494 return; 495 496 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 497 if (!dev->absinfo) { 498 dev_err(dev->dev.parent ?: &dev->dev, 499 "%s: unable to allocate memory\n", __func__); 500 /* 501 * We will handle this allocation failure in 502 * input_register_device() when we refuse to register input 503 * device with ABS bits but without absinfo. 504 */ 505 } 506 } 507 EXPORT_SYMBOL(input_alloc_absinfo); 508 509 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 510 int min, int max, int fuzz, int flat) 511 { 512 struct input_absinfo *absinfo; 513 514 input_alloc_absinfo(dev); 515 if (!dev->absinfo) 516 return; 517 518 absinfo = &dev->absinfo[axis]; 519 absinfo->minimum = min; 520 absinfo->maximum = max; 521 absinfo->fuzz = fuzz; 522 absinfo->flat = flat; 523 524 __set_bit(EV_ABS, dev->evbit); 525 __set_bit(axis, dev->absbit); 526 } 527 EXPORT_SYMBOL(input_set_abs_params); 528 529 530 /** 531 * input_grab_device - grabs device for exclusive use 532 * @handle: input handle that wants to own the device 533 * 534 * When a device is grabbed by an input handle all events generated by 535 * the device are delivered only to this handle. Also events injected 536 * by other input handles are ignored while device is grabbed. 537 */ 538 int input_grab_device(struct input_handle *handle) 539 { 540 struct input_dev *dev = handle->dev; 541 int retval; 542 543 retval = mutex_lock_interruptible(&dev->mutex); 544 if (retval) 545 return retval; 546 547 if (dev->grab) { 548 retval = -EBUSY; 549 goto out; 550 } 551 552 rcu_assign_pointer(dev->grab, handle); 553 554 out: 555 mutex_unlock(&dev->mutex); 556 return retval; 557 } 558 EXPORT_SYMBOL(input_grab_device); 559 560 static void __input_release_device(struct input_handle *handle) 561 { 562 struct input_dev *dev = handle->dev; 563 struct input_handle *grabber; 564 565 grabber = rcu_dereference_protected(dev->grab, 566 lockdep_is_held(&dev->mutex)); 567 if (grabber == handle) { 568 rcu_assign_pointer(dev->grab, NULL); 569 /* Make sure input_pass_event() notices that grab is gone */ 570 synchronize_rcu(); 571 572 list_for_each_entry(handle, &dev->h_list, d_node) 573 if (handle->open && handle->handler->start) 574 handle->handler->start(handle); 575 } 576 } 577 578 /** 579 * input_release_device - release previously grabbed device 580 * @handle: input handle that owns the device 581 * 582 * Releases previously grabbed device so that other input handles can 583 * start receiving input events. Upon release all handlers attached 584 * to the device have their start() method called so they have a change 585 * to synchronize device state with the rest of the system. 586 */ 587 void input_release_device(struct input_handle *handle) 588 { 589 struct input_dev *dev = handle->dev; 590 591 mutex_lock(&dev->mutex); 592 __input_release_device(handle); 593 mutex_unlock(&dev->mutex); 594 } 595 EXPORT_SYMBOL(input_release_device); 596 597 /** 598 * input_open_device - open input device 599 * @handle: handle through which device is being accessed 600 * 601 * This function should be called by input handlers when they 602 * want to start receive events from given input device. 603 */ 604 int input_open_device(struct input_handle *handle) 605 { 606 struct input_dev *dev = handle->dev; 607 int retval; 608 609 retval = mutex_lock_interruptible(&dev->mutex); 610 if (retval) 611 return retval; 612 613 if (dev->going_away) { 614 retval = -ENODEV; 615 goto out; 616 } 617 618 handle->open++; 619 620 if (dev->users++ || dev->inhibited) { 621 /* 622 * Device is already opened and/or inhibited, 623 * so we can exit immediately and report success. 624 */ 625 goto out; 626 } 627 628 if (dev->open) { 629 retval = dev->open(dev); 630 if (retval) { 631 dev->users--; 632 handle->open--; 633 /* 634 * Make sure we are not delivering any more events 635 * through this handle 636 */ 637 synchronize_rcu(); 638 goto out; 639 } 640 } 641 642 if (dev->poller) 643 input_dev_poller_start(dev->poller); 644 645 out: 646 mutex_unlock(&dev->mutex); 647 return retval; 648 } 649 EXPORT_SYMBOL(input_open_device); 650 651 int input_flush_device(struct input_handle *handle, struct file *file) 652 { 653 struct input_dev *dev = handle->dev; 654 int retval; 655 656 retval = mutex_lock_interruptible(&dev->mutex); 657 if (retval) 658 return retval; 659 660 if (dev->flush) 661 retval = dev->flush(dev, file); 662 663 mutex_unlock(&dev->mutex); 664 return retval; 665 } 666 EXPORT_SYMBOL(input_flush_device); 667 668 /** 669 * input_close_device - close input device 670 * @handle: handle through which device is being accessed 671 * 672 * This function should be called by input handlers when they 673 * want to stop receive events from given input device. 674 */ 675 void input_close_device(struct input_handle *handle) 676 { 677 struct input_dev *dev = handle->dev; 678 679 mutex_lock(&dev->mutex); 680 681 __input_release_device(handle); 682 683 if (!dev->inhibited && !--dev->users) { 684 if (dev->poller) 685 input_dev_poller_stop(dev->poller); 686 if (dev->close) 687 dev->close(dev); 688 } 689 690 if (!--handle->open) { 691 /* 692 * synchronize_rcu() makes sure that input_pass_event() 693 * completed and that no more input events are delivered 694 * through this handle 695 */ 696 synchronize_rcu(); 697 } 698 699 mutex_unlock(&dev->mutex); 700 } 701 EXPORT_SYMBOL(input_close_device); 702 703 /* 704 * Simulate keyup events for all keys that are marked as pressed. 705 * The function must be called with dev->event_lock held. 706 */ 707 static void input_dev_release_keys(struct input_dev *dev) 708 { 709 bool need_sync = false; 710 int code; 711 712 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 713 for_each_set_bit(code, dev->key, KEY_CNT) { 714 input_pass_event(dev, EV_KEY, code, 0); 715 need_sync = true; 716 } 717 718 if (need_sync) 719 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 720 721 memset(dev->key, 0, sizeof(dev->key)); 722 } 723 } 724 725 /* 726 * Prepare device for unregistering 727 */ 728 static void input_disconnect_device(struct input_dev *dev) 729 { 730 struct input_handle *handle; 731 732 /* 733 * Mark device as going away. Note that we take dev->mutex here 734 * not to protect access to dev->going_away but rather to ensure 735 * that there are no threads in the middle of input_open_device() 736 */ 737 mutex_lock(&dev->mutex); 738 dev->going_away = true; 739 mutex_unlock(&dev->mutex); 740 741 spin_lock_irq(&dev->event_lock); 742 743 /* 744 * Simulate keyup events for all pressed keys so that handlers 745 * are not left with "stuck" keys. The driver may continue 746 * generate events even after we done here but they will not 747 * reach any handlers. 748 */ 749 input_dev_release_keys(dev); 750 751 list_for_each_entry(handle, &dev->h_list, d_node) 752 handle->open = 0; 753 754 spin_unlock_irq(&dev->event_lock); 755 } 756 757 /** 758 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 759 * @ke: keymap entry containing scancode to be converted. 760 * @scancode: pointer to the location where converted scancode should 761 * be stored. 762 * 763 * This function is used to convert scancode stored in &struct keymap_entry 764 * into scalar form understood by legacy keymap handling methods. These 765 * methods expect scancodes to be represented as 'unsigned int'. 766 */ 767 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 768 unsigned int *scancode) 769 { 770 switch (ke->len) { 771 case 1: 772 *scancode = *((u8 *)ke->scancode); 773 break; 774 775 case 2: 776 *scancode = *((u16 *)ke->scancode); 777 break; 778 779 case 4: 780 *scancode = *((u32 *)ke->scancode); 781 break; 782 783 default: 784 return -EINVAL; 785 } 786 787 return 0; 788 } 789 EXPORT_SYMBOL(input_scancode_to_scalar); 790 791 /* 792 * Those routines handle the default case where no [gs]etkeycode() is 793 * defined. In this case, an array indexed by the scancode is used. 794 */ 795 796 static unsigned int input_fetch_keycode(struct input_dev *dev, 797 unsigned int index) 798 { 799 switch (dev->keycodesize) { 800 case 1: 801 return ((u8 *)dev->keycode)[index]; 802 803 case 2: 804 return ((u16 *)dev->keycode)[index]; 805 806 default: 807 return ((u32 *)dev->keycode)[index]; 808 } 809 } 810 811 static int input_default_getkeycode(struct input_dev *dev, 812 struct input_keymap_entry *ke) 813 { 814 unsigned int index; 815 int error; 816 817 if (!dev->keycodesize) 818 return -EINVAL; 819 820 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 821 index = ke->index; 822 else { 823 error = input_scancode_to_scalar(ke, &index); 824 if (error) 825 return error; 826 } 827 828 if (index >= dev->keycodemax) 829 return -EINVAL; 830 831 ke->keycode = input_fetch_keycode(dev, index); 832 ke->index = index; 833 ke->len = sizeof(index); 834 memcpy(ke->scancode, &index, sizeof(index)); 835 836 return 0; 837 } 838 839 static int input_default_setkeycode(struct input_dev *dev, 840 const struct input_keymap_entry *ke, 841 unsigned int *old_keycode) 842 { 843 unsigned int index; 844 int error; 845 int i; 846 847 if (!dev->keycodesize) 848 return -EINVAL; 849 850 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 851 index = ke->index; 852 } else { 853 error = input_scancode_to_scalar(ke, &index); 854 if (error) 855 return error; 856 } 857 858 if (index >= dev->keycodemax) 859 return -EINVAL; 860 861 if (dev->keycodesize < sizeof(ke->keycode) && 862 (ke->keycode >> (dev->keycodesize * 8))) 863 return -EINVAL; 864 865 switch (dev->keycodesize) { 866 case 1: { 867 u8 *k = (u8 *)dev->keycode; 868 *old_keycode = k[index]; 869 k[index] = ke->keycode; 870 break; 871 } 872 case 2: { 873 u16 *k = (u16 *)dev->keycode; 874 *old_keycode = k[index]; 875 k[index] = ke->keycode; 876 break; 877 } 878 default: { 879 u32 *k = (u32 *)dev->keycode; 880 *old_keycode = k[index]; 881 k[index] = ke->keycode; 882 break; 883 } 884 } 885 886 if (*old_keycode <= KEY_MAX) { 887 __clear_bit(*old_keycode, dev->keybit); 888 for (i = 0; i < dev->keycodemax; i++) { 889 if (input_fetch_keycode(dev, i) == *old_keycode) { 890 __set_bit(*old_keycode, dev->keybit); 891 /* Setting the bit twice is useless, so break */ 892 break; 893 } 894 } 895 } 896 897 __set_bit(ke->keycode, dev->keybit); 898 return 0; 899 } 900 901 /** 902 * input_get_keycode - retrieve keycode currently mapped to a given scancode 903 * @dev: input device which keymap is being queried 904 * @ke: keymap entry 905 * 906 * This function should be called by anyone interested in retrieving current 907 * keymap. Presently evdev handlers use it. 908 */ 909 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 910 { 911 unsigned long flags; 912 int retval; 913 914 spin_lock_irqsave(&dev->event_lock, flags); 915 retval = dev->getkeycode(dev, ke); 916 spin_unlock_irqrestore(&dev->event_lock, flags); 917 918 return retval; 919 } 920 EXPORT_SYMBOL(input_get_keycode); 921 922 /** 923 * input_set_keycode - attribute a keycode to a given scancode 924 * @dev: input device which keymap is being updated 925 * @ke: new keymap entry 926 * 927 * This function should be called by anyone needing to update current 928 * keymap. Presently keyboard and evdev handlers use it. 929 */ 930 int input_set_keycode(struct input_dev *dev, 931 const struct input_keymap_entry *ke) 932 { 933 unsigned long flags; 934 unsigned int old_keycode; 935 int retval; 936 937 if (ke->keycode > KEY_MAX) 938 return -EINVAL; 939 940 spin_lock_irqsave(&dev->event_lock, flags); 941 942 retval = dev->setkeycode(dev, ke, &old_keycode); 943 if (retval) 944 goto out; 945 946 /* Make sure KEY_RESERVED did not get enabled. */ 947 __clear_bit(KEY_RESERVED, dev->keybit); 948 949 /* 950 * Simulate keyup event if keycode is not present 951 * in the keymap anymore 952 */ 953 if (old_keycode > KEY_MAX) { 954 dev_warn(dev->dev.parent ?: &dev->dev, 955 "%s: got too big old keycode %#x\n", 956 __func__, old_keycode); 957 } else if (test_bit(EV_KEY, dev->evbit) && 958 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 959 __test_and_clear_bit(old_keycode, dev->key)) { 960 struct input_value vals[] = { 961 { EV_KEY, old_keycode, 0 }, 962 input_value_sync 963 }; 964 965 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 966 } 967 968 out: 969 spin_unlock_irqrestore(&dev->event_lock, flags); 970 971 return retval; 972 } 973 EXPORT_SYMBOL(input_set_keycode); 974 975 bool input_match_device_id(const struct input_dev *dev, 976 const struct input_device_id *id) 977 { 978 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 979 if (id->bustype != dev->id.bustype) 980 return false; 981 982 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 983 if (id->vendor != dev->id.vendor) 984 return false; 985 986 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 987 if (id->product != dev->id.product) 988 return false; 989 990 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 991 if (id->version != dev->id.version) 992 return false; 993 994 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 995 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 996 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 997 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 998 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 999 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 1000 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 1001 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 1002 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 1003 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 1004 return false; 1005 } 1006 1007 return true; 1008 } 1009 EXPORT_SYMBOL(input_match_device_id); 1010 1011 static const struct input_device_id *input_match_device(struct input_handler *handler, 1012 struct input_dev *dev) 1013 { 1014 const struct input_device_id *id; 1015 1016 for (id = handler->id_table; id->flags || id->driver_info; id++) { 1017 if (input_match_device_id(dev, id) && 1018 (!handler->match || handler->match(handler, dev))) { 1019 return id; 1020 } 1021 } 1022 1023 return NULL; 1024 } 1025 1026 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 1027 { 1028 const struct input_device_id *id; 1029 int error; 1030 1031 id = input_match_device(handler, dev); 1032 if (!id) 1033 return -ENODEV; 1034 1035 error = handler->connect(handler, dev, id); 1036 if (error && error != -ENODEV) 1037 pr_err("failed to attach handler %s to device %s, error: %d\n", 1038 handler->name, kobject_name(&dev->dev.kobj), error); 1039 1040 return error; 1041 } 1042 1043 #ifdef CONFIG_COMPAT 1044 1045 static int input_bits_to_string(char *buf, int buf_size, 1046 unsigned long bits, bool skip_empty) 1047 { 1048 int len = 0; 1049 1050 if (in_compat_syscall()) { 1051 u32 dword = bits >> 32; 1052 if (dword || !skip_empty) 1053 len += snprintf(buf, buf_size, "%x ", dword); 1054 1055 dword = bits & 0xffffffffUL; 1056 if (dword || !skip_empty || len) 1057 len += snprintf(buf + len, max(buf_size - len, 0), 1058 "%x", dword); 1059 } else { 1060 if (bits || !skip_empty) 1061 len += snprintf(buf, buf_size, "%lx", bits); 1062 } 1063 1064 return len; 1065 } 1066 1067 #else /* !CONFIG_COMPAT */ 1068 1069 static int input_bits_to_string(char *buf, int buf_size, 1070 unsigned long bits, bool skip_empty) 1071 { 1072 return bits || !skip_empty ? 1073 snprintf(buf, buf_size, "%lx", bits) : 0; 1074 } 1075 1076 #endif 1077 1078 #ifdef CONFIG_PROC_FS 1079 1080 static struct proc_dir_entry *proc_bus_input_dir; 1081 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1082 static int input_devices_state; 1083 1084 static inline void input_wakeup_procfs_readers(void) 1085 { 1086 input_devices_state++; 1087 wake_up(&input_devices_poll_wait); 1088 } 1089 1090 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1091 { 1092 poll_wait(file, &input_devices_poll_wait, wait); 1093 if (file->f_version != input_devices_state) { 1094 file->f_version = input_devices_state; 1095 return EPOLLIN | EPOLLRDNORM; 1096 } 1097 1098 return 0; 1099 } 1100 1101 union input_seq_state { 1102 struct { 1103 unsigned short pos; 1104 bool mutex_acquired; 1105 }; 1106 void *p; 1107 }; 1108 1109 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1110 { 1111 union input_seq_state *state = (union input_seq_state *)&seq->private; 1112 int error; 1113 1114 /* We need to fit into seq->private pointer */ 1115 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1116 1117 error = mutex_lock_interruptible(&input_mutex); 1118 if (error) { 1119 state->mutex_acquired = false; 1120 return ERR_PTR(error); 1121 } 1122 1123 state->mutex_acquired = true; 1124 1125 return seq_list_start(&input_dev_list, *pos); 1126 } 1127 1128 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1129 { 1130 return seq_list_next(v, &input_dev_list, pos); 1131 } 1132 1133 static void input_seq_stop(struct seq_file *seq, void *v) 1134 { 1135 union input_seq_state *state = (union input_seq_state *)&seq->private; 1136 1137 if (state->mutex_acquired) 1138 mutex_unlock(&input_mutex); 1139 } 1140 1141 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1142 unsigned long *bitmap, int max) 1143 { 1144 int i; 1145 bool skip_empty = true; 1146 char buf[18]; 1147 1148 seq_printf(seq, "B: %s=", name); 1149 1150 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1151 if (input_bits_to_string(buf, sizeof(buf), 1152 bitmap[i], skip_empty)) { 1153 skip_empty = false; 1154 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1155 } 1156 } 1157 1158 /* 1159 * If no output was produced print a single 0. 1160 */ 1161 if (skip_empty) 1162 seq_putc(seq, '0'); 1163 1164 seq_putc(seq, '\n'); 1165 } 1166 1167 static int input_devices_seq_show(struct seq_file *seq, void *v) 1168 { 1169 struct input_dev *dev = container_of(v, struct input_dev, node); 1170 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1171 struct input_handle *handle; 1172 1173 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1174 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1175 1176 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1177 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1178 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1179 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1180 seq_puts(seq, "H: Handlers="); 1181 1182 list_for_each_entry(handle, &dev->h_list, d_node) 1183 seq_printf(seq, "%s ", handle->name); 1184 seq_putc(seq, '\n'); 1185 1186 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1187 1188 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1189 if (test_bit(EV_KEY, dev->evbit)) 1190 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1191 if (test_bit(EV_REL, dev->evbit)) 1192 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1193 if (test_bit(EV_ABS, dev->evbit)) 1194 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1195 if (test_bit(EV_MSC, dev->evbit)) 1196 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1197 if (test_bit(EV_LED, dev->evbit)) 1198 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1199 if (test_bit(EV_SND, dev->evbit)) 1200 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1201 if (test_bit(EV_FF, dev->evbit)) 1202 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1203 if (test_bit(EV_SW, dev->evbit)) 1204 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1205 1206 seq_putc(seq, '\n'); 1207 1208 kfree(path); 1209 return 0; 1210 } 1211 1212 static const struct seq_operations input_devices_seq_ops = { 1213 .start = input_devices_seq_start, 1214 .next = input_devices_seq_next, 1215 .stop = input_seq_stop, 1216 .show = input_devices_seq_show, 1217 }; 1218 1219 static int input_proc_devices_open(struct inode *inode, struct file *file) 1220 { 1221 return seq_open(file, &input_devices_seq_ops); 1222 } 1223 1224 static const struct proc_ops input_devices_proc_ops = { 1225 .proc_open = input_proc_devices_open, 1226 .proc_poll = input_proc_devices_poll, 1227 .proc_read = seq_read, 1228 .proc_lseek = seq_lseek, 1229 .proc_release = seq_release, 1230 }; 1231 1232 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1233 { 1234 union input_seq_state *state = (union input_seq_state *)&seq->private; 1235 int error; 1236 1237 /* We need to fit into seq->private pointer */ 1238 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1239 1240 error = mutex_lock_interruptible(&input_mutex); 1241 if (error) { 1242 state->mutex_acquired = false; 1243 return ERR_PTR(error); 1244 } 1245 1246 state->mutex_acquired = true; 1247 state->pos = *pos; 1248 1249 return seq_list_start(&input_handler_list, *pos); 1250 } 1251 1252 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1253 { 1254 union input_seq_state *state = (union input_seq_state *)&seq->private; 1255 1256 state->pos = *pos + 1; 1257 return seq_list_next(v, &input_handler_list, pos); 1258 } 1259 1260 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1261 { 1262 struct input_handler *handler = container_of(v, struct input_handler, node); 1263 union input_seq_state *state = (union input_seq_state *)&seq->private; 1264 1265 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1266 if (handler->filter) 1267 seq_puts(seq, " (filter)"); 1268 if (handler->legacy_minors) 1269 seq_printf(seq, " Minor=%d", handler->minor); 1270 seq_putc(seq, '\n'); 1271 1272 return 0; 1273 } 1274 1275 static const struct seq_operations input_handlers_seq_ops = { 1276 .start = input_handlers_seq_start, 1277 .next = input_handlers_seq_next, 1278 .stop = input_seq_stop, 1279 .show = input_handlers_seq_show, 1280 }; 1281 1282 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1283 { 1284 return seq_open(file, &input_handlers_seq_ops); 1285 } 1286 1287 static const struct proc_ops input_handlers_proc_ops = { 1288 .proc_open = input_proc_handlers_open, 1289 .proc_read = seq_read, 1290 .proc_lseek = seq_lseek, 1291 .proc_release = seq_release, 1292 }; 1293 1294 static int __init input_proc_init(void) 1295 { 1296 struct proc_dir_entry *entry; 1297 1298 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1299 if (!proc_bus_input_dir) 1300 return -ENOMEM; 1301 1302 entry = proc_create("devices", 0, proc_bus_input_dir, 1303 &input_devices_proc_ops); 1304 if (!entry) 1305 goto fail1; 1306 1307 entry = proc_create("handlers", 0, proc_bus_input_dir, 1308 &input_handlers_proc_ops); 1309 if (!entry) 1310 goto fail2; 1311 1312 return 0; 1313 1314 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1315 fail1: remove_proc_entry("bus/input", NULL); 1316 return -ENOMEM; 1317 } 1318 1319 static void input_proc_exit(void) 1320 { 1321 remove_proc_entry("devices", proc_bus_input_dir); 1322 remove_proc_entry("handlers", proc_bus_input_dir); 1323 remove_proc_entry("bus/input", NULL); 1324 } 1325 1326 #else /* !CONFIG_PROC_FS */ 1327 static inline void input_wakeup_procfs_readers(void) { } 1328 static inline int input_proc_init(void) { return 0; } 1329 static inline void input_proc_exit(void) { } 1330 #endif 1331 1332 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1333 static ssize_t input_dev_show_##name(struct device *dev, \ 1334 struct device_attribute *attr, \ 1335 char *buf) \ 1336 { \ 1337 struct input_dev *input_dev = to_input_dev(dev); \ 1338 \ 1339 return scnprintf(buf, PAGE_SIZE, "%s\n", \ 1340 input_dev->name ? input_dev->name : ""); \ 1341 } \ 1342 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1343 1344 INPUT_DEV_STRING_ATTR_SHOW(name); 1345 INPUT_DEV_STRING_ATTR_SHOW(phys); 1346 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1347 1348 static int input_print_modalias_bits(char *buf, int size, 1349 char name, unsigned long *bm, 1350 unsigned int min_bit, unsigned int max_bit) 1351 { 1352 int len = 0, i; 1353 1354 len += snprintf(buf, max(size, 0), "%c", name); 1355 for (i = min_bit; i < max_bit; i++) 1356 if (bm[BIT_WORD(i)] & BIT_MASK(i)) 1357 len += snprintf(buf + len, max(size - len, 0), "%X,", i); 1358 return len; 1359 } 1360 1361 static int input_print_modalias(char *buf, int size, struct input_dev *id, 1362 int add_cr) 1363 { 1364 int len; 1365 1366 len = snprintf(buf, max(size, 0), 1367 "input:b%04Xv%04Xp%04Xe%04X-", 1368 id->id.bustype, id->id.vendor, 1369 id->id.product, id->id.version); 1370 1371 len += input_print_modalias_bits(buf + len, size - len, 1372 'e', id->evbit, 0, EV_MAX); 1373 len += input_print_modalias_bits(buf + len, size - len, 1374 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1375 len += input_print_modalias_bits(buf + len, size - len, 1376 'r', id->relbit, 0, REL_MAX); 1377 len += input_print_modalias_bits(buf + len, size - len, 1378 'a', id->absbit, 0, ABS_MAX); 1379 len += input_print_modalias_bits(buf + len, size - len, 1380 'm', id->mscbit, 0, MSC_MAX); 1381 len += input_print_modalias_bits(buf + len, size - len, 1382 'l', id->ledbit, 0, LED_MAX); 1383 len += input_print_modalias_bits(buf + len, size - len, 1384 's', id->sndbit, 0, SND_MAX); 1385 len += input_print_modalias_bits(buf + len, size - len, 1386 'f', id->ffbit, 0, FF_MAX); 1387 len += input_print_modalias_bits(buf + len, size - len, 1388 'w', id->swbit, 0, SW_MAX); 1389 1390 if (add_cr) 1391 len += snprintf(buf + len, max(size - len, 0), "\n"); 1392 1393 return len; 1394 } 1395 1396 static ssize_t input_dev_show_modalias(struct device *dev, 1397 struct device_attribute *attr, 1398 char *buf) 1399 { 1400 struct input_dev *id = to_input_dev(dev); 1401 ssize_t len; 1402 1403 len = input_print_modalias(buf, PAGE_SIZE, id, 1); 1404 1405 return min_t(int, len, PAGE_SIZE); 1406 } 1407 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1408 1409 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1410 int max, int add_cr); 1411 1412 static ssize_t input_dev_show_properties(struct device *dev, 1413 struct device_attribute *attr, 1414 char *buf) 1415 { 1416 struct input_dev *input_dev = to_input_dev(dev); 1417 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1418 INPUT_PROP_MAX, true); 1419 return min_t(int, len, PAGE_SIZE); 1420 } 1421 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1422 1423 static int input_inhibit_device(struct input_dev *dev); 1424 static int input_uninhibit_device(struct input_dev *dev); 1425 1426 static ssize_t inhibited_show(struct device *dev, 1427 struct device_attribute *attr, 1428 char *buf) 1429 { 1430 struct input_dev *input_dev = to_input_dev(dev); 1431 1432 return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited); 1433 } 1434 1435 static ssize_t inhibited_store(struct device *dev, 1436 struct device_attribute *attr, const char *buf, 1437 size_t len) 1438 { 1439 struct input_dev *input_dev = to_input_dev(dev); 1440 ssize_t rv; 1441 bool inhibited; 1442 1443 if (strtobool(buf, &inhibited)) 1444 return -EINVAL; 1445 1446 if (inhibited) 1447 rv = input_inhibit_device(input_dev); 1448 else 1449 rv = input_uninhibit_device(input_dev); 1450 1451 if (rv != 0) 1452 return rv; 1453 1454 return len; 1455 } 1456 1457 static DEVICE_ATTR_RW(inhibited); 1458 1459 static struct attribute *input_dev_attrs[] = { 1460 &dev_attr_name.attr, 1461 &dev_attr_phys.attr, 1462 &dev_attr_uniq.attr, 1463 &dev_attr_modalias.attr, 1464 &dev_attr_properties.attr, 1465 &dev_attr_inhibited.attr, 1466 NULL 1467 }; 1468 1469 static const struct attribute_group input_dev_attr_group = { 1470 .attrs = input_dev_attrs, 1471 }; 1472 1473 #define INPUT_DEV_ID_ATTR(name) \ 1474 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1475 struct device_attribute *attr, \ 1476 char *buf) \ 1477 { \ 1478 struct input_dev *input_dev = to_input_dev(dev); \ 1479 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ 1480 } \ 1481 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1482 1483 INPUT_DEV_ID_ATTR(bustype); 1484 INPUT_DEV_ID_ATTR(vendor); 1485 INPUT_DEV_ID_ATTR(product); 1486 INPUT_DEV_ID_ATTR(version); 1487 1488 static struct attribute *input_dev_id_attrs[] = { 1489 &dev_attr_bustype.attr, 1490 &dev_attr_vendor.attr, 1491 &dev_attr_product.attr, 1492 &dev_attr_version.attr, 1493 NULL 1494 }; 1495 1496 static const struct attribute_group input_dev_id_attr_group = { 1497 .name = "id", 1498 .attrs = input_dev_id_attrs, 1499 }; 1500 1501 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1502 int max, int add_cr) 1503 { 1504 int i; 1505 int len = 0; 1506 bool skip_empty = true; 1507 1508 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1509 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1510 bitmap[i], skip_empty); 1511 if (len) { 1512 skip_empty = false; 1513 if (i > 0) 1514 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1515 } 1516 } 1517 1518 /* 1519 * If no output was produced print a single 0. 1520 */ 1521 if (len == 0) 1522 len = snprintf(buf, buf_size, "%d", 0); 1523 1524 if (add_cr) 1525 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1526 1527 return len; 1528 } 1529 1530 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1531 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1532 struct device_attribute *attr, \ 1533 char *buf) \ 1534 { \ 1535 struct input_dev *input_dev = to_input_dev(dev); \ 1536 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1537 input_dev->bm##bit, ev##_MAX, \ 1538 true); \ 1539 return min_t(int, len, PAGE_SIZE); \ 1540 } \ 1541 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1542 1543 INPUT_DEV_CAP_ATTR(EV, ev); 1544 INPUT_DEV_CAP_ATTR(KEY, key); 1545 INPUT_DEV_CAP_ATTR(REL, rel); 1546 INPUT_DEV_CAP_ATTR(ABS, abs); 1547 INPUT_DEV_CAP_ATTR(MSC, msc); 1548 INPUT_DEV_CAP_ATTR(LED, led); 1549 INPUT_DEV_CAP_ATTR(SND, snd); 1550 INPUT_DEV_CAP_ATTR(FF, ff); 1551 INPUT_DEV_CAP_ATTR(SW, sw); 1552 1553 static struct attribute *input_dev_caps_attrs[] = { 1554 &dev_attr_ev.attr, 1555 &dev_attr_key.attr, 1556 &dev_attr_rel.attr, 1557 &dev_attr_abs.attr, 1558 &dev_attr_msc.attr, 1559 &dev_attr_led.attr, 1560 &dev_attr_snd.attr, 1561 &dev_attr_ff.attr, 1562 &dev_attr_sw.attr, 1563 NULL 1564 }; 1565 1566 static const struct attribute_group input_dev_caps_attr_group = { 1567 .name = "capabilities", 1568 .attrs = input_dev_caps_attrs, 1569 }; 1570 1571 static const struct attribute_group *input_dev_attr_groups[] = { 1572 &input_dev_attr_group, 1573 &input_dev_id_attr_group, 1574 &input_dev_caps_attr_group, 1575 &input_poller_attribute_group, 1576 NULL 1577 }; 1578 1579 static void input_dev_release(struct device *device) 1580 { 1581 struct input_dev *dev = to_input_dev(device); 1582 1583 input_ff_destroy(dev); 1584 input_mt_destroy_slots(dev); 1585 kfree(dev->poller); 1586 kfree(dev->absinfo); 1587 kfree(dev->vals); 1588 kfree(dev); 1589 1590 module_put(THIS_MODULE); 1591 } 1592 1593 /* 1594 * Input uevent interface - loading event handlers based on 1595 * device bitfields. 1596 */ 1597 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1598 const char *name, unsigned long *bitmap, int max) 1599 { 1600 int len; 1601 1602 if (add_uevent_var(env, "%s", name)) 1603 return -ENOMEM; 1604 1605 len = input_print_bitmap(&env->buf[env->buflen - 1], 1606 sizeof(env->buf) - env->buflen, 1607 bitmap, max, false); 1608 if (len >= (sizeof(env->buf) - env->buflen)) 1609 return -ENOMEM; 1610 1611 env->buflen += len; 1612 return 0; 1613 } 1614 1615 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1616 struct input_dev *dev) 1617 { 1618 int len; 1619 1620 if (add_uevent_var(env, "MODALIAS=")) 1621 return -ENOMEM; 1622 1623 len = input_print_modalias(&env->buf[env->buflen - 1], 1624 sizeof(env->buf) - env->buflen, 1625 dev, 0); 1626 if (len >= (sizeof(env->buf) - env->buflen)) 1627 return -ENOMEM; 1628 1629 env->buflen += len; 1630 return 0; 1631 } 1632 1633 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1634 do { \ 1635 int err = add_uevent_var(env, fmt, val); \ 1636 if (err) \ 1637 return err; \ 1638 } while (0) 1639 1640 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1641 do { \ 1642 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1643 if (err) \ 1644 return err; \ 1645 } while (0) 1646 1647 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1648 do { \ 1649 int err = input_add_uevent_modalias_var(env, dev); \ 1650 if (err) \ 1651 return err; \ 1652 } while (0) 1653 1654 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) 1655 { 1656 struct input_dev *dev = to_input_dev(device); 1657 1658 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1659 dev->id.bustype, dev->id.vendor, 1660 dev->id.product, dev->id.version); 1661 if (dev->name) 1662 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1663 if (dev->phys) 1664 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1665 if (dev->uniq) 1666 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1667 1668 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1669 1670 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1671 if (test_bit(EV_KEY, dev->evbit)) 1672 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1673 if (test_bit(EV_REL, dev->evbit)) 1674 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1675 if (test_bit(EV_ABS, dev->evbit)) 1676 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1677 if (test_bit(EV_MSC, dev->evbit)) 1678 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1679 if (test_bit(EV_LED, dev->evbit)) 1680 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1681 if (test_bit(EV_SND, dev->evbit)) 1682 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1683 if (test_bit(EV_FF, dev->evbit)) 1684 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1685 if (test_bit(EV_SW, dev->evbit)) 1686 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1687 1688 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1689 1690 return 0; 1691 } 1692 1693 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1694 do { \ 1695 int i; \ 1696 bool active; \ 1697 \ 1698 if (!test_bit(EV_##type, dev->evbit)) \ 1699 break; \ 1700 \ 1701 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1702 active = test_bit(i, dev->bits); \ 1703 if (!active && !on) \ 1704 continue; \ 1705 \ 1706 dev->event(dev, EV_##type, i, on ? active : 0); \ 1707 } \ 1708 } while (0) 1709 1710 static void input_dev_toggle(struct input_dev *dev, bool activate) 1711 { 1712 if (!dev->event) 1713 return; 1714 1715 INPUT_DO_TOGGLE(dev, LED, led, activate); 1716 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1717 1718 if (activate && test_bit(EV_REP, dev->evbit)) { 1719 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1720 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1721 } 1722 } 1723 1724 /** 1725 * input_reset_device() - reset/restore the state of input device 1726 * @dev: input device whose state needs to be reset 1727 * 1728 * This function tries to reset the state of an opened input device and 1729 * bring internal state and state if the hardware in sync with each other. 1730 * We mark all keys as released, restore LED state, repeat rate, etc. 1731 */ 1732 void input_reset_device(struct input_dev *dev) 1733 { 1734 unsigned long flags; 1735 1736 mutex_lock(&dev->mutex); 1737 spin_lock_irqsave(&dev->event_lock, flags); 1738 1739 input_dev_toggle(dev, true); 1740 input_dev_release_keys(dev); 1741 1742 spin_unlock_irqrestore(&dev->event_lock, flags); 1743 mutex_unlock(&dev->mutex); 1744 } 1745 EXPORT_SYMBOL(input_reset_device); 1746 1747 static int input_inhibit_device(struct input_dev *dev) 1748 { 1749 int ret = 0; 1750 1751 mutex_lock(&dev->mutex); 1752 1753 if (dev->inhibited) 1754 goto out; 1755 1756 if (dev->users) { 1757 if (dev->close) 1758 dev->close(dev); 1759 if (dev->poller) 1760 input_dev_poller_stop(dev->poller); 1761 } 1762 1763 spin_lock_irq(&dev->event_lock); 1764 input_dev_release_keys(dev); 1765 input_dev_toggle(dev, false); 1766 spin_unlock_irq(&dev->event_lock); 1767 1768 dev->inhibited = true; 1769 1770 out: 1771 mutex_unlock(&dev->mutex); 1772 return ret; 1773 } 1774 1775 static int input_uninhibit_device(struct input_dev *dev) 1776 { 1777 int ret = 0; 1778 1779 mutex_lock(&dev->mutex); 1780 1781 if (!dev->inhibited) 1782 goto out; 1783 1784 if (dev->users) { 1785 if (dev->open) { 1786 ret = dev->open(dev); 1787 if (ret) 1788 goto out; 1789 } 1790 if (dev->poller) 1791 input_dev_poller_start(dev->poller); 1792 } 1793 1794 dev->inhibited = false; 1795 spin_lock_irq(&dev->event_lock); 1796 input_dev_toggle(dev, true); 1797 spin_unlock_irq(&dev->event_lock); 1798 1799 out: 1800 mutex_unlock(&dev->mutex); 1801 return ret; 1802 } 1803 1804 #ifdef CONFIG_PM_SLEEP 1805 static int input_dev_suspend(struct device *dev) 1806 { 1807 struct input_dev *input_dev = to_input_dev(dev); 1808 1809 spin_lock_irq(&input_dev->event_lock); 1810 1811 /* 1812 * Keys that are pressed now are unlikely to be 1813 * still pressed when we resume. 1814 */ 1815 input_dev_release_keys(input_dev); 1816 1817 /* Turn off LEDs and sounds, if any are active. */ 1818 input_dev_toggle(input_dev, false); 1819 1820 spin_unlock_irq(&input_dev->event_lock); 1821 1822 return 0; 1823 } 1824 1825 static int input_dev_resume(struct device *dev) 1826 { 1827 struct input_dev *input_dev = to_input_dev(dev); 1828 1829 spin_lock_irq(&input_dev->event_lock); 1830 1831 /* Restore state of LEDs and sounds, if any were active. */ 1832 input_dev_toggle(input_dev, true); 1833 1834 spin_unlock_irq(&input_dev->event_lock); 1835 1836 return 0; 1837 } 1838 1839 static int input_dev_freeze(struct device *dev) 1840 { 1841 struct input_dev *input_dev = to_input_dev(dev); 1842 1843 spin_lock_irq(&input_dev->event_lock); 1844 1845 /* 1846 * Keys that are pressed now are unlikely to be 1847 * still pressed when we resume. 1848 */ 1849 input_dev_release_keys(input_dev); 1850 1851 spin_unlock_irq(&input_dev->event_lock); 1852 1853 return 0; 1854 } 1855 1856 static int input_dev_poweroff(struct device *dev) 1857 { 1858 struct input_dev *input_dev = to_input_dev(dev); 1859 1860 spin_lock_irq(&input_dev->event_lock); 1861 1862 /* Turn off LEDs and sounds, if any are active. */ 1863 input_dev_toggle(input_dev, false); 1864 1865 spin_unlock_irq(&input_dev->event_lock); 1866 1867 return 0; 1868 } 1869 1870 static const struct dev_pm_ops input_dev_pm_ops = { 1871 .suspend = input_dev_suspend, 1872 .resume = input_dev_resume, 1873 .freeze = input_dev_freeze, 1874 .poweroff = input_dev_poweroff, 1875 .restore = input_dev_resume, 1876 }; 1877 #endif /* CONFIG_PM */ 1878 1879 static const struct device_type input_dev_type = { 1880 .groups = input_dev_attr_groups, 1881 .release = input_dev_release, 1882 .uevent = input_dev_uevent, 1883 #ifdef CONFIG_PM_SLEEP 1884 .pm = &input_dev_pm_ops, 1885 #endif 1886 }; 1887 1888 static char *input_devnode(struct device *dev, umode_t *mode) 1889 { 1890 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1891 } 1892 1893 struct class input_class = { 1894 .name = "input", 1895 .devnode = input_devnode, 1896 }; 1897 EXPORT_SYMBOL_GPL(input_class); 1898 1899 /** 1900 * input_allocate_device - allocate memory for new input device 1901 * 1902 * Returns prepared struct input_dev or %NULL. 1903 * 1904 * NOTE: Use input_free_device() to free devices that have not been 1905 * registered; input_unregister_device() should be used for already 1906 * registered devices. 1907 */ 1908 struct input_dev *input_allocate_device(void) 1909 { 1910 static atomic_t input_no = ATOMIC_INIT(-1); 1911 struct input_dev *dev; 1912 1913 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1914 if (dev) { 1915 dev->dev.type = &input_dev_type; 1916 dev->dev.class = &input_class; 1917 device_initialize(&dev->dev); 1918 mutex_init(&dev->mutex); 1919 spin_lock_init(&dev->event_lock); 1920 timer_setup(&dev->timer, NULL, 0); 1921 INIT_LIST_HEAD(&dev->h_list); 1922 INIT_LIST_HEAD(&dev->node); 1923 1924 dev_set_name(&dev->dev, "input%lu", 1925 (unsigned long)atomic_inc_return(&input_no)); 1926 1927 __module_get(THIS_MODULE); 1928 } 1929 1930 return dev; 1931 } 1932 EXPORT_SYMBOL(input_allocate_device); 1933 1934 struct input_devres { 1935 struct input_dev *input; 1936 }; 1937 1938 static int devm_input_device_match(struct device *dev, void *res, void *data) 1939 { 1940 struct input_devres *devres = res; 1941 1942 return devres->input == data; 1943 } 1944 1945 static void devm_input_device_release(struct device *dev, void *res) 1946 { 1947 struct input_devres *devres = res; 1948 struct input_dev *input = devres->input; 1949 1950 dev_dbg(dev, "%s: dropping reference to %s\n", 1951 __func__, dev_name(&input->dev)); 1952 input_put_device(input); 1953 } 1954 1955 /** 1956 * devm_input_allocate_device - allocate managed input device 1957 * @dev: device owning the input device being created 1958 * 1959 * Returns prepared struct input_dev or %NULL. 1960 * 1961 * Managed input devices do not need to be explicitly unregistered or 1962 * freed as it will be done automatically when owner device unbinds from 1963 * its driver (or binding fails). Once managed input device is allocated, 1964 * it is ready to be set up and registered in the same fashion as regular 1965 * input device. There are no special devm_input_device_[un]register() 1966 * variants, regular ones work with both managed and unmanaged devices, 1967 * should you need them. In most cases however, managed input device need 1968 * not be explicitly unregistered or freed. 1969 * 1970 * NOTE: the owner device is set up as parent of input device and users 1971 * should not override it. 1972 */ 1973 struct input_dev *devm_input_allocate_device(struct device *dev) 1974 { 1975 struct input_dev *input; 1976 struct input_devres *devres; 1977 1978 devres = devres_alloc(devm_input_device_release, 1979 sizeof(*devres), GFP_KERNEL); 1980 if (!devres) 1981 return NULL; 1982 1983 input = input_allocate_device(); 1984 if (!input) { 1985 devres_free(devres); 1986 return NULL; 1987 } 1988 1989 input->dev.parent = dev; 1990 input->devres_managed = true; 1991 1992 devres->input = input; 1993 devres_add(dev, devres); 1994 1995 return input; 1996 } 1997 EXPORT_SYMBOL(devm_input_allocate_device); 1998 1999 /** 2000 * input_free_device - free memory occupied by input_dev structure 2001 * @dev: input device to free 2002 * 2003 * This function should only be used if input_register_device() 2004 * was not called yet or if it failed. Once device was registered 2005 * use input_unregister_device() and memory will be freed once last 2006 * reference to the device is dropped. 2007 * 2008 * Device should be allocated by input_allocate_device(). 2009 * 2010 * NOTE: If there are references to the input device then memory 2011 * will not be freed until last reference is dropped. 2012 */ 2013 void input_free_device(struct input_dev *dev) 2014 { 2015 if (dev) { 2016 if (dev->devres_managed) 2017 WARN_ON(devres_destroy(dev->dev.parent, 2018 devm_input_device_release, 2019 devm_input_device_match, 2020 dev)); 2021 input_put_device(dev); 2022 } 2023 } 2024 EXPORT_SYMBOL(input_free_device); 2025 2026 /** 2027 * input_set_timestamp - set timestamp for input events 2028 * @dev: input device to set timestamp for 2029 * @timestamp: the time at which the event has occurred 2030 * in CLOCK_MONOTONIC 2031 * 2032 * This function is intended to provide to the input system a more 2033 * accurate time of when an event actually occurred. The driver should 2034 * call this function as soon as a timestamp is acquired ensuring 2035 * clock conversions in input_set_timestamp are done correctly. 2036 * 2037 * The system entering suspend state between timestamp acquisition and 2038 * calling input_set_timestamp can result in inaccurate conversions. 2039 */ 2040 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 2041 { 2042 dev->timestamp[INPUT_CLK_MONO] = timestamp; 2043 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 2044 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 2045 TK_OFFS_BOOT); 2046 } 2047 EXPORT_SYMBOL(input_set_timestamp); 2048 2049 /** 2050 * input_get_timestamp - get timestamp for input events 2051 * @dev: input device to get timestamp from 2052 * 2053 * A valid timestamp is a timestamp of non-zero value. 2054 */ 2055 ktime_t *input_get_timestamp(struct input_dev *dev) 2056 { 2057 const ktime_t invalid_timestamp = ktime_set(0, 0); 2058 2059 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 2060 input_set_timestamp(dev, ktime_get()); 2061 2062 return dev->timestamp; 2063 } 2064 EXPORT_SYMBOL(input_get_timestamp); 2065 2066 /** 2067 * input_set_capability - mark device as capable of a certain event 2068 * @dev: device that is capable of emitting or accepting event 2069 * @type: type of the event (EV_KEY, EV_REL, etc...) 2070 * @code: event code 2071 * 2072 * In addition to setting up corresponding bit in appropriate capability 2073 * bitmap the function also adjusts dev->evbit. 2074 */ 2075 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 2076 { 2077 switch (type) { 2078 case EV_KEY: 2079 __set_bit(code, dev->keybit); 2080 break; 2081 2082 case EV_REL: 2083 __set_bit(code, dev->relbit); 2084 break; 2085 2086 case EV_ABS: 2087 input_alloc_absinfo(dev); 2088 if (!dev->absinfo) 2089 return; 2090 2091 __set_bit(code, dev->absbit); 2092 break; 2093 2094 case EV_MSC: 2095 __set_bit(code, dev->mscbit); 2096 break; 2097 2098 case EV_SW: 2099 __set_bit(code, dev->swbit); 2100 break; 2101 2102 case EV_LED: 2103 __set_bit(code, dev->ledbit); 2104 break; 2105 2106 case EV_SND: 2107 __set_bit(code, dev->sndbit); 2108 break; 2109 2110 case EV_FF: 2111 __set_bit(code, dev->ffbit); 2112 break; 2113 2114 case EV_PWR: 2115 /* do nothing */ 2116 break; 2117 2118 default: 2119 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2120 dump_stack(); 2121 return; 2122 } 2123 2124 __set_bit(type, dev->evbit); 2125 } 2126 EXPORT_SYMBOL(input_set_capability); 2127 2128 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2129 { 2130 int mt_slots; 2131 int i; 2132 unsigned int events; 2133 2134 if (dev->mt) { 2135 mt_slots = dev->mt->num_slots; 2136 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2137 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2138 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 2139 mt_slots = clamp(mt_slots, 2, 32); 2140 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2141 mt_slots = 2; 2142 } else { 2143 mt_slots = 0; 2144 } 2145 2146 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2147 2148 if (test_bit(EV_ABS, dev->evbit)) 2149 for_each_set_bit(i, dev->absbit, ABS_CNT) 2150 events += input_is_mt_axis(i) ? mt_slots : 1; 2151 2152 if (test_bit(EV_REL, dev->evbit)) 2153 events += bitmap_weight(dev->relbit, REL_CNT); 2154 2155 /* Make room for KEY and MSC events */ 2156 events += 7; 2157 2158 return events; 2159 } 2160 2161 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2162 do { \ 2163 if (!test_bit(EV_##type, dev->evbit)) \ 2164 memset(dev->bits##bit, 0, \ 2165 sizeof(dev->bits##bit)); \ 2166 } while (0) 2167 2168 static void input_cleanse_bitmasks(struct input_dev *dev) 2169 { 2170 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2171 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2172 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2173 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2174 INPUT_CLEANSE_BITMASK(dev, LED, led); 2175 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2176 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2177 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2178 } 2179 2180 static void __input_unregister_device(struct input_dev *dev) 2181 { 2182 struct input_handle *handle, *next; 2183 2184 input_disconnect_device(dev); 2185 2186 mutex_lock(&input_mutex); 2187 2188 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2189 handle->handler->disconnect(handle); 2190 WARN_ON(!list_empty(&dev->h_list)); 2191 2192 del_timer_sync(&dev->timer); 2193 list_del_init(&dev->node); 2194 2195 input_wakeup_procfs_readers(); 2196 2197 mutex_unlock(&input_mutex); 2198 2199 device_del(&dev->dev); 2200 } 2201 2202 static void devm_input_device_unregister(struct device *dev, void *res) 2203 { 2204 struct input_devres *devres = res; 2205 struct input_dev *input = devres->input; 2206 2207 dev_dbg(dev, "%s: unregistering device %s\n", 2208 __func__, dev_name(&input->dev)); 2209 __input_unregister_device(input); 2210 } 2211 2212 /** 2213 * input_enable_softrepeat - enable software autorepeat 2214 * @dev: input device 2215 * @delay: repeat delay 2216 * @period: repeat period 2217 * 2218 * Enable software autorepeat on the input device. 2219 */ 2220 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2221 { 2222 dev->timer.function = input_repeat_key; 2223 dev->rep[REP_DELAY] = delay; 2224 dev->rep[REP_PERIOD] = period; 2225 } 2226 EXPORT_SYMBOL(input_enable_softrepeat); 2227 2228 bool input_device_enabled(struct input_dev *dev) 2229 { 2230 lockdep_assert_held(&dev->mutex); 2231 2232 return !dev->inhibited && dev->users > 0; 2233 } 2234 EXPORT_SYMBOL_GPL(input_device_enabled); 2235 2236 /** 2237 * input_register_device - register device with input core 2238 * @dev: device to be registered 2239 * 2240 * This function registers device with input core. The device must be 2241 * allocated with input_allocate_device() and all it's capabilities 2242 * set up before registering. 2243 * If function fails the device must be freed with input_free_device(). 2244 * Once device has been successfully registered it can be unregistered 2245 * with input_unregister_device(); input_free_device() should not be 2246 * called in this case. 2247 * 2248 * Note that this function is also used to register managed input devices 2249 * (ones allocated with devm_input_allocate_device()). Such managed input 2250 * devices need not be explicitly unregistered or freed, their tear down 2251 * is controlled by the devres infrastructure. It is also worth noting 2252 * that tear down of managed input devices is internally a 2-step process: 2253 * registered managed input device is first unregistered, but stays in 2254 * memory and can still handle input_event() calls (although events will 2255 * not be delivered anywhere). The freeing of managed input device will 2256 * happen later, when devres stack is unwound to the point where device 2257 * allocation was made. 2258 */ 2259 int input_register_device(struct input_dev *dev) 2260 { 2261 struct input_devres *devres = NULL; 2262 struct input_handler *handler; 2263 unsigned int packet_size; 2264 const char *path; 2265 int error; 2266 2267 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2268 dev_err(&dev->dev, 2269 "Absolute device without dev->absinfo, refusing to register\n"); 2270 return -EINVAL; 2271 } 2272 2273 if (dev->devres_managed) { 2274 devres = devres_alloc(devm_input_device_unregister, 2275 sizeof(*devres), GFP_KERNEL); 2276 if (!devres) 2277 return -ENOMEM; 2278 2279 devres->input = dev; 2280 } 2281 2282 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2283 __set_bit(EV_SYN, dev->evbit); 2284 2285 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2286 __clear_bit(KEY_RESERVED, dev->keybit); 2287 2288 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2289 input_cleanse_bitmasks(dev); 2290 2291 packet_size = input_estimate_events_per_packet(dev); 2292 if (dev->hint_events_per_packet < packet_size) 2293 dev->hint_events_per_packet = packet_size; 2294 2295 dev->max_vals = dev->hint_events_per_packet + 2; 2296 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 2297 if (!dev->vals) { 2298 error = -ENOMEM; 2299 goto err_devres_free; 2300 } 2301 2302 /* 2303 * If delay and period are pre-set by the driver, then autorepeating 2304 * is handled by the driver itself and we don't do it in input.c. 2305 */ 2306 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2307 input_enable_softrepeat(dev, 250, 33); 2308 2309 if (!dev->getkeycode) 2310 dev->getkeycode = input_default_getkeycode; 2311 2312 if (!dev->setkeycode) 2313 dev->setkeycode = input_default_setkeycode; 2314 2315 if (dev->poller) 2316 input_dev_poller_finalize(dev->poller); 2317 2318 error = device_add(&dev->dev); 2319 if (error) 2320 goto err_free_vals; 2321 2322 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2323 pr_info("%s as %s\n", 2324 dev->name ? dev->name : "Unspecified device", 2325 path ? path : "N/A"); 2326 kfree(path); 2327 2328 error = mutex_lock_interruptible(&input_mutex); 2329 if (error) 2330 goto err_device_del; 2331 2332 list_add_tail(&dev->node, &input_dev_list); 2333 2334 list_for_each_entry(handler, &input_handler_list, node) 2335 input_attach_handler(dev, handler); 2336 2337 input_wakeup_procfs_readers(); 2338 2339 mutex_unlock(&input_mutex); 2340 2341 if (dev->devres_managed) { 2342 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2343 __func__, dev_name(&dev->dev)); 2344 devres_add(dev->dev.parent, devres); 2345 } 2346 return 0; 2347 2348 err_device_del: 2349 device_del(&dev->dev); 2350 err_free_vals: 2351 kfree(dev->vals); 2352 dev->vals = NULL; 2353 err_devres_free: 2354 devres_free(devres); 2355 return error; 2356 } 2357 EXPORT_SYMBOL(input_register_device); 2358 2359 /** 2360 * input_unregister_device - unregister previously registered device 2361 * @dev: device to be unregistered 2362 * 2363 * This function unregisters an input device. Once device is unregistered 2364 * the caller should not try to access it as it may get freed at any moment. 2365 */ 2366 void input_unregister_device(struct input_dev *dev) 2367 { 2368 if (dev->devres_managed) { 2369 WARN_ON(devres_destroy(dev->dev.parent, 2370 devm_input_device_unregister, 2371 devm_input_device_match, 2372 dev)); 2373 __input_unregister_device(dev); 2374 /* 2375 * We do not do input_put_device() here because it will be done 2376 * when 2nd devres fires up. 2377 */ 2378 } else { 2379 __input_unregister_device(dev); 2380 input_put_device(dev); 2381 } 2382 } 2383 EXPORT_SYMBOL(input_unregister_device); 2384 2385 /** 2386 * input_register_handler - register a new input handler 2387 * @handler: handler to be registered 2388 * 2389 * This function registers a new input handler (interface) for input 2390 * devices in the system and attaches it to all input devices that 2391 * are compatible with the handler. 2392 */ 2393 int input_register_handler(struct input_handler *handler) 2394 { 2395 struct input_dev *dev; 2396 int error; 2397 2398 error = mutex_lock_interruptible(&input_mutex); 2399 if (error) 2400 return error; 2401 2402 INIT_LIST_HEAD(&handler->h_list); 2403 2404 list_add_tail(&handler->node, &input_handler_list); 2405 2406 list_for_each_entry(dev, &input_dev_list, node) 2407 input_attach_handler(dev, handler); 2408 2409 input_wakeup_procfs_readers(); 2410 2411 mutex_unlock(&input_mutex); 2412 return 0; 2413 } 2414 EXPORT_SYMBOL(input_register_handler); 2415 2416 /** 2417 * input_unregister_handler - unregisters an input handler 2418 * @handler: handler to be unregistered 2419 * 2420 * This function disconnects a handler from its input devices and 2421 * removes it from lists of known handlers. 2422 */ 2423 void input_unregister_handler(struct input_handler *handler) 2424 { 2425 struct input_handle *handle, *next; 2426 2427 mutex_lock(&input_mutex); 2428 2429 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2430 handler->disconnect(handle); 2431 WARN_ON(!list_empty(&handler->h_list)); 2432 2433 list_del_init(&handler->node); 2434 2435 input_wakeup_procfs_readers(); 2436 2437 mutex_unlock(&input_mutex); 2438 } 2439 EXPORT_SYMBOL(input_unregister_handler); 2440 2441 /** 2442 * input_handler_for_each_handle - handle iterator 2443 * @handler: input handler to iterate 2444 * @data: data for the callback 2445 * @fn: function to be called for each handle 2446 * 2447 * Iterate over @bus's list of devices, and call @fn for each, passing 2448 * it @data and stop when @fn returns a non-zero value. The function is 2449 * using RCU to traverse the list and therefore may be using in atomic 2450 * contexts. The @fn callback is invoked from RCU critical section and 2451 * thus must not sleep. 2452 */ 2453 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2454 int (*fn)(struct input_handle *, void *)) 2455 { 2456 struct input_handle *handle; 2457 int retval = 0; 2458 2459 rcu_read_lock(); 2460 2461 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2462 retval = fn(handle, data); 2463 if (retval) 2464 break; 2465 } 2466 2467 rcu_read_unlock(); 2468 2469 return retval; 2470 } 2471 EXPORT_SYMBOL(input_handler_for_each_handle); 2472 2473 /** 2474 * input_register_handle - register a new input handle 2475 * @handle: handle to register 2476 * 2477 * This function puts a new input handle onto device's 2478 * and handler's lists so that events can flow through 2479 * it once it is opened using input_open_device(). 2480 * 2481 * This function is supposed to be called from handler's 2482 * connect() method. 2483 */ 2484 int input_register_handle(struct input_handle *handle) 2485 { 2486 struct input_handler *handler = handle->handler; 2487 struct input_dev *dev = handle->dev; 2488 int error; 2489 2490 /* 2491 * We take dev->mutex here to prevent race with 2492 * input_release_device(). 2493 */ 2494 error = mutex_lock_interruptible(&dev->mutex); 2495 if (error) 2496 return error; 2497 2498 /* 2499 * Filters go to the head of the list, normal handlers 2500 * to the tail. 2501 */ 2502 if (handler->filter) 2503 list_add_rcu(&handle->d_node, &dev->h_list); 2504 else 2505 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2506 2507 mutex_unlock(&dev->mutex); 2508 2509 /* 2510 * Since we are supposed to be called from ->connect() 2511 * which is mutually exclusive with ->disconnect() 2512 * we can't be racing with input_unregister_handle() 2513 * and so separate lock is not needed here. 2514 */ 2515 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2516 2517 if (handler->start) 2518 handler->start(handle); 2519 2520 return 0; 2521 } 2522 EXPORT_SYMBOL(input_register_handle); 2523 2524 /** 2525 * input_unregister_handle - unregister an input handle 2526 * @handle: handle to unregister 2527 * 2528 * This function removes input handle from device's 2529 * and handler's lists. 2530 * 2531 * This function is supposed to be called from handler's 2532 * disconnect() method. 2533 */ 2534 void input_unregister_handle(struct input_handle *handle) 2535 { 2536 struct input_dev *dev = handle->dev; 2537 2538 list_del_rcu(&handle->h_node); 2539 2540 /* 2541 * Take dev->mutex to prevent race with input_release_device(). 2542 */ 2543 mutex_lock(&dev->mutex); 2544 list_del_rcu(&handle->d_node); 2545 mutex_unlock(&dev->mutex); 2546 2547 synchronize_rcu(); 2548 } 2549 EXPORT_SYMBOL(input_unregister_handle); 2550 2551 /** 2552 * input_get_new_minor - allocates a new input minor number 2553 * @legacy_base: beginning or the legacy range to be searched 2554 * @legacy_num: size of legacy range 2555 * @allow_dynamic: whether we can also take ID from the dynamic range 2556 * 2557 * This function allocates a new device minor for from input major namespace. 2558 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2559 * parameters and whether ID can be allocated from dynamic range if there are 2560 * no free IDs in legacy range. 2561 */ 2562 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2563 bool allow_dynamic) 2564 { 2565 /* 2566 * This function should be called from input handler's ->connect() 2567 * methods, which are serialized with input_mutex, so no additional 2568 * locking is needed here. 2569 */ 2570 if (legacy_base >= 0) { 2571 int minor = ida_simple_get(&input_ida, 2572 legacy_base, 2573 legacy_base + legacy_num, 2574 GFP_KERNEL); 2575 if (minor >= 0 || !allow_dynamic) 2576 return minor; 2577 } 2578 2579 return ida_simple_get(&input_ida, 2580 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, 2581 GFP_KERNEL); 2582 } 2583 EXPORT_SYMBOL(input_get_new_minor); 2584 2585 /** 2586 * input_free_minor - release previously allocated minor 2587 * @minor: minor to be released 2588 * 2589 * This function releases previously allocated input minor so that it can be 2590 * reused later. 2591 */ 2592 void input_free_minor(unsigned int minor) 2593 { 2594 ida_simple_remove(&input_ida, minor); 2595 } 2596 EXPORT_SYMBOL(input_free_minor); 2597 2598 static int __init input_init(void) 2599 { 2600 int err; 2601 2602 err = class_register(&input_class); 2603 if (err) { 2604 pr_err("unable to register input_dev class\n"); 2605 return err; 2606 } 2607 2608 err = input_proc_init(); 2609 if (err) 2610 goto fail1; 2611 2612 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2613 INPUT_MAX_CHAR_DEVICES, "input"); 2614 if (err) { 2615 pr_err("unable to register char major %d", INPUT_MAJOR); 2616 goto fail2; 2617 } 2618 2619 return 0; 2620 2621 fail2: input_proc_exit(); 2622 fail1: class_unregister(&input_class); 2623 return err; 2624 } 2625 2626 static void __exit input_exit(void) 2627 { 2628 input_proc_exit(); 2629 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2630 INPUT_MAX_CHAR_DEVICES); 2631 class_unregister(&input_class); 2632 } 2633 2634 subsys_initcall(input_init); 2635 module_exit(input_exit); 2636